From c7ef2a9a69e75bde26660f9fda1f4ad326641621 Mon Sep 17 00:00:00 2001 From: mmetc <92726601+mmetc@users.noreply.github.com> Date: Wed, 5 Feb 2025 14:34:55 +0100 Subject: [PATCH] merge from master (dc28ae58dc59e72981ee4724b1c72a79ba586ad8) for 1.6.5 (#3444) --- .github/workflows/bats-hub.yml | 2 +- .github/workflows/bats-mysql.yml | 2 +- .github/workflows/bats-postgres.yml | 2 +- .github/workflows/bats-sqlite-coverage.yml | 2 +- .github/workflows/ci-windows-build-msi.yml | 2 +- .github/workflows/codeql-analysis.yml | 2 +- .github/workflows/docker-tests.yml | 32 +- .github/workflows/go-tests-windows.yml | 4 +- .github/workflows/go-tests.yml | 20 +- .github/workflows/publish-tarball-release.yml | 2 +- .gitignore | 3 + .golangci.yml | 123 ++-- Dockerfile | 5 +- Dockerfile.debian | 5 +- Makefile | 1 + README.md | 146 +---- cmd/crowdsec-cli/clialert/alerts.go | 2 +- cmd/crowdsec-cli/clialert/table.go | 2 +- cmd/crowdsec-cli/clibouncer/inspect.go | 2 +- cmd/crowdsec-cli/clibouncer/list.go | 3 +- cmd/crowdsec-cli/clicapi/capi.go | 13 +- cmd/crowdsec-cli/cliconfig/backup.go | 20 + cmd/crowdsec-cli/{ => cliconfig}/config.go | 14 +- .../feature_flags.go} | 4 +- cmd/crowdsec-cli/cliconfig/restore.go | 20 + .../{config_show.go => cliconfig/show.go} | 2 +- .../showyaml.go} | 8 +- cmd/crowdsec-cli/cliconsole/console.go | 10 +- cmd/crowdsec-cli/clidecision/decisions.go | 4 +- cmd/crowdsec-cli/clidecision/table.go | 13 +- cmd/crowdsec-cli/clihub/hub.go | 83 ++- cmd/crowdsec-cli/clihub/items.go | 51 +- cmd/crowdsec-cli/clihub/utils_table.go | 64 +- cmd/crowdsec-cli/clihubtest/explain.go | 7 +- cmd/crowdsec-cli/clihubtest/run.go | 4 +- cmd/crowdsec-cli/cliitem/appsec.go | 123 ---- cmd/crowdsec-cli/cliitem/cmdinspect.go | 236 +++++++ cmd/crowdsec-cli/cliitem/cmdinstall.go | 150 +++++ cmd/crowdsec-cli/cliitem/cmdremove.go | 151 +++++ cmd/crowdsec-cli/cliitem/cmdupgrade.go | 106 +++ cmd/crowdsec-cli/cliitem/collection.go | 41 -- cmd/crowdsec-cli/cliitem/context.go | 41 -- cmd/crowdsec-cli/cliitem/hubappsec.go | 255 ++++++++ cmd/crowdsec-cli/cliitem/hubcollection.go | 105 +++ cmd/crowdsec-cli/cliitem/hubcontext.go | 102 +++ cmd/crowdsec-cli/cliitem/hubparser.go | 105 +++ cmd/crowdsec-cli/cliitem/hubpostoverflow.go | 102 +++ cmd/crowdsec-cli/cliitem/hubscenario.go | 78 ++- cmd/crowdsec-cli/cliitem/item.go | 454 +------------ .../item_metrics.go => cliitem/metrics.go} | 75 ++- cmd/crowdsec-cli/cliitem/metrics_table.go | 70 ++ cmd/crowdsec-cli/cliitem/parser.go | 41 -- cmd/crowdsec-cli/cliitem/postoverflow.go | 41 -- cmd/crowdsec-cli/cliitem/suggest.go | 77 --- cmd/crowdsec-cli/clilapi/context.go | 6 +- cmd/crowdsec-cli/clilapi/register.go | 11 +- cmd/crowdsec-cli/clilapi/status.go | 2 +- cmd/crowdsec-cli/climachine/add.go | 13 +- cmd/crowdsec-cli/climachine/inspect.go | 4 +- cmd/crowdsec-cli/climachine/list.go | 3 +- cmd/crowdsec-cli/climetrics/list.go | 3 +- cmd/crowdsec-cli/climetrics/show.go | 15 + cmd/crowdsec-cli/climetrics/statacquis.go | 6 +- cmd/crowdsec-cli/climetrics/statalert.go | 6 +- .../climetrics/statappsecengine.go | 6 +- cmd/crowdsec-cli/climetrics/statappsecrule.go | 5 +- cmd/crowdsec-cli/climetrics/statbouncer.go | 13 +- cmd/crowdsec-cli/climetrics/statbucket.go | 6 +- cmd/crowdsec-cli/climetrics/statdecision.go | 6 +- cmd/crowdsec-cli/climetrics/statlapi.go | 6 +- .../climetrics/statlapibouncer.go | 6 +- .../climetrics/statlapidecision.go | 6 +- .../climetrics/statlapimachine.go | 6 +- cmd/crowdsec-cli/climetrics/statparser.go | 6 +- cmd/crowdsec-cli/climetrics/statstash.go | 6 +- cmd/crowdsec-cli/climetrics/statwhitelist.go | 6 +- cmd/crowdsec-cli/climetrics/store.go | 3 +- .../clinotifications/notifications.go | 2 +- cmd/crowdsec-cli/clisetup/setup.go | 19 +- cmd/crowdsec-cli/clisimulation/simulation.go | 6 +- cmd/crowdsec-cli/clisupport/support.go | 8 +- cmd/crowdsec-cli/completion.go | 8 +- cmd/crowdsec-cli/config_backup.go | 236 ------- cmd/crowdsec-cli/config_restore.go | 274 -------- cmd/crowdsec-cli/copyfile.go | 82 --- cmd/crowdsec-cli/dashboard.go | 24 +- cmd/crowdsec-cli/idgen/machineid.go | 6 +- cmd/crowdsec-cli/idgen/password.go | 9 +- cmd/crowdsec-cli/main.go | 13 +- cmd/crowdsec-cli/reload/message.go | 6 + .../{reload_freebsd.go => message_freebsd.go} | 2 +- .../{reload_linux.go => message_linux.go} | 2 +- cmd/crowdsec-cli/reload/message_windows.go | 3 + cmd/crowdsec-cli/reload/reload.go | 22 +- cmd/crowdsec-cli/reload/reload_windows.go | 3 - cmd/crowdsec-cli/require/branch.go | 2 +- cmd/crowdsec-cli/require/require.go | 14 +- cmd/crowdsec-cli/setup.go | 1 + cmd/crowdsec/appsec.go | 2 +- cmd/crowdsec/fatalhook.go | 24 +- cmd/crowdsec/main.go | 26 +- cmd/crowdsec/pour.go | 2 +- cmd/crowdsec/serve.go | 6 +- cmd/crowdsec/win_service.go | 2 +- cmd/notification-email/main.go | 2 +- debian/install | 1 - debian/postinst | 34 +- debian/preinst | 43 -- debian/prerm | 3 +- debian/rules | 2 +- docker/test/.python-version | 1 + docker/test/Pipfile | 11 - docker/test/Pipfile.lock | 604 ------------------ docker/test/README.md | 0 docker/test/pyproject.toml | 41 ++ docker/test/tests/conftest.py | 9 +- docker/test/tests/test_agent.py | 56 +- docker/test/tests/test_agent_only.py | 22 +- docker/test/tests/test_bouncer.py | 25 +- docker/test/tests/test_capi.py | 23 +- docker/test/tests/test_capi_whitelists.py | 22 +- docker/test/tests/test_cold_logs.py | 29 +- docker/test/tests/test_flavors.py | 47 +- docker/test/tests/test_hello.py | 13 +- docker/test/tests/test_hub.py | 16 +- docker/test/tests/test_hub_collections.py | 103 ++- docker/test/tests/test_hub_parsers.py | 60 +- docker/test/tests/test_hub_postoverflows.py | 41 +- docker/test/tests/test_hub_scenarios.py | 55 +- docker/test/tests/test_local_api_url.py | 40 +- docker/test/tests/test_local_item.py | 28 +- docker/test/tests/test_metrics.py | 46 +- docker/test/tests/test_nolapi.py | 5 +- docker/test/tests/test_simple.py | 2 +- docker/test/tests/test_tls.py | 234 ++++--- docker/test/tests/test_version.py | 8 +- docker/test/tests/test_wal.py | 16 +- docker/test/uv.lock | 587 +++++++++++++++++ go.mod | 103 +-- go.sum | 191 +++--- pkg/acquisition/acquisition.go | 69 +- pkg/acquisition/acquisition_test.go | 30 +- .../configuration/configuration.go | 10 +- pkg/acquisition/modules/appsec/appsec.go | 31 +- .../modules/appsec/appsec_hooks_test.go | 4 +- .../modules/appsec/appsec_runner.go | 27 +- .../modules/appsec/appsec_runner_test.go | 99 ++- pkg/acquisition/modules/appsec/appsec_test.go | 22 +- .../modules/appsec/bodyprocessors/raw.go | 7 +- pkg/acquisition/modules/appsec/utils.go | 1 + .../modules/cloudwatch/cloudwatch.go | 39 +- pkg/acquisition/modules/docker/docker.go | 132 ++-- pkg/acquisition/modules/docker/docker_test.go | 81 ++- pkg/acquisition/modules/file/file.go | 4 +- pkg/acquisition/modules/file/file_test.go | 18 +- pkg/acquisition/modules/http/http.go | 37 +- pkg/acquisition/modules/http/http_test.go | 98 +-- .../modules/journalctl/journalctl.go | 37 +- .../modules/journalctl/journalctl_test.go | 9 +- pkg/acquisition/modules/kafka/kafka.go | 7 +- pkg/acquisition/modules/kafka/kafka_test.go | 6 +- pkg/acquisition/modules/kinesis/kinesis.go | 132 +++- .../modules/kinesis/kinesis_test.go | 101 +-- .../modules/kubernetesaudit/k8s_audit.go | 31 +- .../modules/kubernetesaudit/k8s_audit_test.go | 6 +- pkg/acquisition/modules/loki/loki.go | 4 +- .../syslog/internal/parser/rfc3164/parse.go | 1 - .../syslog/internal/parser/rfc5424/parse.go | 3 - .../internal/parser/rfc5424/parse_test.go | 58 +- .../syslog/internal/server/syslogserver.go | 1 - pkg/acquisition/modules/syslog/syslog.go | 4 +- pkg/acquisition/modules/syslog/syslog_test.go | 4 +- .../victorialogs/internal/vlclient/types.go | 12 + .../internal/vlclient/vl_client.go | 405 ++++++++++++ .../modules/victorialogs/victorialogs.go | 369 +++++++++++ .../modules/victorialogs/victorialogs_test.go | 479 ++++++++++++++ .../wineventlog/wineventlog_windows.go | 4 +- .../wineventlog/wineventlog_windows_test.go | 56 +- pkg/acquisition/test_files/env.yaml | 6 + pkg/acquisition/victorialogs.go | 12 + pkg/alertcontext/alertcontext.go | 29 +- pkg/alertcontext/alertcontext_test.go | 57 +- pkg/apiclient/alerts_service_test.go | 18 +- pkg/apiclient/auth_jwt.go | 3 - pkg/apiclient/auth_key_test.go | 6 +- pkg/apiclient/client.go | 4 +- pkg/apiclient/client_http.go | 9 +- pkg/apiclient/client_http_test.go | 4 +- pkg/apiclient/client_test.go | 36 +- pkg/apiclient/decisions_service_test.go | 31 +- pkg/apiserver/alerts_test.go | 109 ++-- pkg/apiserver/apic.go | 1 - pkg/apiserver/apiserver.go | 44 +- pkg/apiserver/apiserver_test.go | 4 +- pkg/apiserver/controllers/v1/decisions.go | 5 +- pkg/apiserver/controllers/v1/errors.go | 12 - pkg/apiserver/decisions_test.go | 16 +- pkg/apiserver/jwt_test.go | 10 +- pkg/apiserver/machines_test.go | 6 +- pkg/apiserver/middlewares/v1/api_key.go | 1 - pkg/appsec/appsec.go | 1 - pkg/appsec/appsec_rule/appsec_rule.go | 1 - pkg/appsec/appsec_rule/modsec_rule_test.go | 2 - pkg/appsec/appsec_rules_collection.go | 5 +- pkg/appsec/coraza_logger.go | 2 +- pkg/appsec/request_test.go | 3 - pkg/cache/cache_test.go | 13 +- pkg/csconfig/api.go | 4 +- pkg/csconfig/common.go | 8 +- pkg/csconfig/config.go | 9 +- pkg/csconfig/cscli.go | 1 + pkg/csconfig/fflag.go | 2 +- pkg/csplugin/broker.go | 4 +- pkg/csplugin/listfiles_test.go | 19 +- pkg/csplugin/watcher_test.go | 13 +- pkg/csprofiles/csprofiles.go | 44 +- pkg/csprofiles/csprofiles_test.go | 10 +- pkg/cticlient/example/fire.go | 10 +- pkg/cticlient/types.go | 34 +- pkg/cticlient/types_test.go | 12 +- pkg/cwhub/cwhub.go | 8 +- pkg/cwhub/cwhub_test.go | 36 +- pkg/cwhub/dataset.go | 72 --- pkg/cwhub/doc.go | 29 +- pkg/cwhub/download.go | 126 ++++ pkg/cwhub/download_test.go | 182 ++++++ pkg/cwhub/errors.go | 19 - pkg/cwhub/fetch.go | 70 ++ pkg/cwhub/hub.go | 60 +- pkg/cwhub/hub_test.go | 263 ++++++-- pkg/cwhub/item.go | 307 ++++----- pkg/cwhub/item_test.go | 25 +- pkg/cwhub/iteminstall.go | 73 --- pkg/cwhub/iteminstall_test.go | 10 +- pkg/cwhub/itemlink.go | 78 --- pkg/cwhub/itemremove.go | 138 ---- pkg/cwhub/itemupgrade.go | 254 -------- pkg/cwhub/itemupgrade_test.go | 15 +- pkg/cwhub/remote.go | 84 --- pkg/cwhub/state.go | 62 ++ pkg/cwhub/state_test.go | 77 +++ pkg/cwhub/sync.go | 401 +++++++----- pkg/cwversion/component/component.go | 29 +- pkg/cwversion/version.go | 18 +- pkg/cwversion/version_test.go | 68 ++ pkg/database/alertfilter.go | 258 ++++++++ pkg/database/alerts.go | 288 +-------- pkg/database/database.go | 2 +- pkg/database/errors.go | 1 - pkg/database/flush.go | 16 +- pkg/database/machines.go | 9 - pkg/dumps/parser_dump.go | 20 +- pkg/emoji/emoji.go | 4 + pkg/exprhelpers/crowdsec_cti.go | 44 +- pkg/exprhelpers/debugger.go | 112 +++- pkg/exprhelpers/debugger_test.go | 1 + pkg/exprhelpers/debuggerstub_test.go | 10 + pkg/exprhelpers/exprlib_test.go | 6 +- pkg/exprhelpers/geoip.go | 3 - pkg/exprhelpers/helpers.go | 50 +- pkg/fflag/crowdsec.go | 14 +- pkg/fflag/features_test.go | 10 +- pkg/hubops/colorize.go | 38 ++ pkg/hubops/datarefresh.go | 75 +++ pkg/hubops/disable.go | 121 ++++ pkg/hubops/doc.go | 45 ++ pkg/hubops/download.go | 212 ++++++ pkg/hubops/enable.go | 113 ++++ pkg/hubops/plan.go | 250 ++++++++ pkg/hubops/purge.go | 88 +++ pkg/hubtest/hubtest.go | 12 +- pkg/hubtest/hubtest_item.go | 36 +- pkg/hubtest/parser_assert.go | 6 +- pkg/leakybucket/bayesian.go | 4 +- pkg/leakybucket/blackhole.go | 3 - pkg/leakybucket/bucket.go | 3 +- pkg/leakybucket/buckets.go | 1 - pkg/leakybucket/buckets_test.go | 41 +- pkg/leakybucket/conditional.go | 6 +- pkg/leakybucket/manager_load.go | 132 ++-- pkg/leakybucket/manager_load_test.go | 73 ++- pkg/leakybucket/manager_run.go | 13 +- pkg/leakybucket/overflow_filter.go | 4 +- pkg/leakybucket/overflows.go | 32 +- pkg/leakybucket/processor.go | 3 +- pkg/leakybucket/reset_filter.go | 10 +- pkg/leakybucket/uniq.go | 16 +- pkg/longpollclient/client.go | 2 +- pkg/metabase/container.go | 56 +- pkg/metabase/metabase.go | 43 +- pkg/parser/enrich.go | 6 +- pkg/parser/enrich_date.go | 1 + pkg/parser/enrich_date_test.go | 32 + pkg/parser/enrich_geoip.go | 3 - pkg/parser/node.go | 18 +- pkg/parser/parsing_test.go | 83 +-- pkg/parser/runtime.go | 49 +- pkg/parser/stage.go | 6 +- pkg/parser/whitelist_test.go | 4 +- pkg/setup/detect_test.go | 18 +- pkg/setup/install.go | 71 +- pkg/types/appsec_event.go | 1 - pkg/types/constants.go | 34 +- pkg/types/event.go | 6 +- pkg/types/event_test.go | 2 - pkg/types/getfstype.go | 1 - pkg/types/ip.go | 10 +- pkg/types/ip_test.go | 18 +- pkg/types/utils.go | 38 +- rpm/SPECS/crowdsec.spec | 41 +- test/bats/01_crowdsec.bats | 39 +- test/bats/01_cscli.bats | 44 +- test/bats/01_cscli_lapi.bats | 16 +- test/bats/02_nolapi.bats | 12 - test/bats/03_noagent.bats | 12 - test/bats/04_nocapi.bats | 13 +- test/bats/07_setup.bats | 31 +- test/bats/08_metrics.bats | 4 +- test/bats/08_metrics_bouncer.bats | 28 +- test/bats/10_bouncers.bats | 7 +- test/bats/20_hub.bats | 44 +- test/bats/20_hub_collections.bats | 381 ----------- test/bats/20_hub_collections_dep.bats | 26 +- test/bats/20_hub_items.bats | 82 ++- test/bats/20_hub_parsers.bats | 383 ----------- test/bats/20_hub_postoverflows.bats | 383 ----------- test/bats/20_hub_scenarios.bats | 382 ----------- test/bats/30_machines.bats | 5 +- test/bats/80_alerts.bats | 2 +- test/bats/90_decisions.bats | 17 +- test/bats/crowdsec-acquisition.bats | 78 +++ test/bats/cscli-hubtype-inspect.bats | 93 +++ test/bats/cscli-hubtype-install.bats | 301 +++++++++ test/bats/cscli-hubtype-list.bats | 130 ++++ test/bats/cscli-hubtype-remove.bats | 245 +++++++ test/bats/cscli-hubtype-upgrade.bats | 253 ++++++++ test/bats/cscli-parsers.bats | 44 ++ test/bats/cscli-postoverflows.bats | 44 ++ test/bats/hub-index.bats | 357 +++++++++++ test/bin/remove-all-hub-items | 2 +- test/lib/config/config-local | 2 +- test/lib/setup_file.sh | 24 +- test/localstack/docker-compose.yml | 1 - wizard.sh | 72 +-- 344 files changed, 10732 insertions(+), 7624 deletions(-) create mode 100644 cmd/crowdsec-cli/cliconfig/backup.go rename cmd/crowdsec-cli/{ => cliconfig}/config.go (58%) rename cmd/crowdsec-cli/{config_feature_flags.go => cliconfig/feature_flags.go} (96%) create mode 100644 cmd/crowdsec-cli/cliconfig/restore.go rename cmd/crowdsec-cli/{config_show.go => cliconfig/show.go} (99%) rename cmd/crowdsec-cli/{config_showyaml.go => cliconfig/showyaml.go} (62%) delete mode 100644 cmd/crowdsec-cli/cliitem/appsec.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdinspect.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdinstall.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdremove.go create mode 100644 cmd/crowdsec-cli/cliitem/cmdupgrade.go delete mode 100644 cmd/crowdsec-cli/cliitem/collection.go delete mode 100644 cmd/crowdsec-cli/cliitem/context.go create mode 100644 cmd/crowdsec-cli/cliitem/hubappsec.go create mode 100644 cmd/crowdsec-cli/cliitem/hubcollection.go create mode 100644 cmd/crowdsec-cli/cliitem/hubcontext.go create mode 100644 cmd/crowdsec-cli/cliitem/hubparser.go create mode 100644 cmd/crowdsec-cli/cliitem/hubpostoverflow.go rename cmd/crowdsec-cli/{clihub/item_metrics.go => cliitem/metrics.go} (78%) create mode 100644 cmd/crowdsec-cli/cliitem/metrics_table.go delete mode 100644 cmd/crowdsec-cli/cliitem/parser.go delete mode 100644 cmd/crowdsec-cli/cliitem/postoverflow.go delete mode 100644 cmd/crowdsec-cli/cliitem/suggest.go delete mode 100644 cmd/crowdsec-cli/config_backup.go delete mode 100644 cmd/crowdsec-cli/config_restore.go delete mode 100644 cmd/crowdsec-cli/copyfile.go create mode 100644 cmd/crowdsec-cli/reload/message.go rename cmd/crowdsec-cli/reload/{reload_freebsd.go => message_freebsd.go} (64%) rename cmd/crowdsec-cli/reload/{reload_linux.go => message_linux.go} (62%) create mode 100644 cmd/crowdsec-cli/reload/message_windows.go delete mode 100644 cmd/crowdsec-cli/reload/reload_windows.go delete mode 100644 debian/preinst create mode 100644 docker/test/.python-version delete mode 100644 docker/test/Pipfile delete mode 100644 docker/test/Pipfile.lock create mode 100644 docker/test/README.md create mode 100644 docker/test/pyproject.toml create mode 100644 docker/test/uv.lock create mode 100644 pkg/acquisition/modules/victorialogs/internal/vlclient/types.go create mode 100644 pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go create mode 100644 pkg/acquisition/modules/victorialogs/victorialogs.go create mode 100644 pkg/acquisition/modules/victorialogs/victorialogs_test.go create mode 100644 pkg/acquisition/test_files/env.yaml create mode 100644 pkg/acquisition/victorialogs.go delete mode 100644 pkg/cwhub/dataset.go create mode 100644 pkg/cwhub/download.go create mode 100644 pkg/cwhub/download_test.go delete mode 100644 pkg/cwhub/errors.go create mode 100644 pkg/cwhub/fetch.go delete mode 100644 pkg/cwhub/iteminstall.go delete mode 100644 pkg/cwhub/itemlink.go delete mode 100644 pkg/cwhub/itemremove.go delete mode 100644 pkg/cwhub/itemupgrade.go delete mode 100644 pkg/cwhub/remote.go create mode 100644 pkg/cwhub/state.go create mode 100644 pkg/cwhub/state_test.go create mode 100644 pkg/cwversion/version_test.go create mode 100644 pkg/database/alertfilter.go create mode 100644 pkg/exprhelpers/debuggerstub_test.go create mode 100644 pkg/hubops/colorize.go create mode 100644 pkg/hubops/datarefresh.go create mode 100644 pkg/hubops/disable.go create mode 100644 pkg/hubops/doc.go create mode 100644 pkg/hubops/download.go create mode 100644 pkg/hubops/enable.go create mode 100644 pkg/hubops/plan.go create mode 100644 pkg/hubops/purge.go delete mode 100644 test/bats/20_hub_collections.bats delete mode 100644 test/bats/20_hub_parsers.bats delete mode 100644 test/bats/20_hub_postoverflows.bats delete mode 100644 test/bats/20_hub_scenarios.bats create mode 100644 test/bats/crowdsec-acquisition.bats create mode 100644 test/bats/cscli-hubtype-inspect.bats create mode 100644 test/bats/cscli-hubtype-install.bats create mode 100644 test/bats/cscli-hubtype-list.bats create mode 100644 test/bats/cscli-hubtype-remove.bats create mode 100644 test/bats/cscli-hubtype-upgrade.bats create mode 100644 test/bats/cscli-parsers.bats create mode 100644 test/bats/cscli-postoverflows.bats create mode 100644 test/bats/hub-index.bats diff --git a/.github/workflows/bats-hub.yml b/.github/workflows/bats-hub.yml index e631c3ebc71..42f1252c8b9 100644 --- a/.github/workflows/bats-hub.yml +++ b/.github/workflows/bats-hub.yml @@ -33,7 +33,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-mysql.yml b/.github/workflows/bats-mysql.yml index a94e28b1f97..394b85427fe 100644 --- a/.github/workflows/bats-mysql.yml +++ b/.github/workflows/bats-mysql.yml @@ -36,7 +36,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-postgres.yml b/.github/workflows/bats-postgres.yml index a1054463341..25c302da787 100644 --- a/.github/workflows/bats-postgres.yml +++ b/.github/workflows/bats-postgres.yml @@ -45,7 +45,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/bats-sqlite-coverage.yml b/.github/workflows/bats-sqlite-coverage.yml index ac685bf4e87..a5b2758b6b0 100644 --- a/.github/workflows/bats-sqlite-coverage.yml +++ b/.github/workflows/bats-sqlite-coverage.yml @@ -31,7 +31,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: "Install bats dependencies" env: diff --git a/.github/workflows/ci-windows-build-msi.yml b/.github/workflows/ci-windows-build-msi.yml index 07e29071e05..5f26b0fccbf 100644 --- a/.github/workflows/ci-windows-build-msi.yml +++ b/.github/workflows/ci-windows-build-msi.yml @@ -35,7 +35,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Build run: make windows_installer BUILD_RE2_WASM=1 diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 4128cb435f9..cd37c7afaa9 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -52,7 +52,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod cache-dependency-path: "**/go.sum" # Initializes the CodeQL tools for scanning. diff --git a/.github/workflows/docker-tests.yml b/.github/workflows/docker-tests.yml index 918f3bcaf1d..647f3e55cdb 100644 --- a/.github/workflows/docker-tests.yml +++ b/.github/workflows/docker-tests.yml @@ -49,28 +49,30 @@ jobs: cache-from: type=gha cache-to: type=gha,mode=min - - name: "Setup Python" + - name: "Create Docker network" + run: docker network create net-test + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: 0.5.24 + enable-cache: true + cache-dependency-glob: "uv.lock" + + - name: "Set up Python" uses: actions/setup-python@v5 with: - python-version: "3.x" - cache: 'pipenv' + python-version-file: "./docker/test/.python-version" - - name: "Install dependencies" + # running serially to reduce test flakiness + - name: Lint and run the tests run: | cd docker/test - python -m pip install --upgrade pipenv wheel - pipenv install --deploy - - - name: "Create Docker network" - run: docker network create net-test - - - name: "Run tests" + uv sync --all-extras --dev --locked + uv run ruff check + uv run pytest tests -n 1 --durations=0 --color=yes env: CROWDSEC_TEST_VERSION: test CROWDSEC_TEST_FLAVORS: ${{ matrix.flavor }} CROWDSEC_TEST_NETWORK: net-test CROWDSEC_TEST_TIMEOUT: 90 - # running serially to reduce test flakiness - run: | - cd docker/test - pipenv run pytest -n 1 --durations=0 --color=yes diff --git a/.github/workflows/go-tests-windows.yml b/.github/workflows/go-tests-windows.yml index 2966b999a4a..68cb9715b18 100644 --- a/.github/workflows/go-tests-windows.yml +++ b/.github/workflows/go-tests-windows.yml @@ -34,7 +34,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Build run: | @@ -61,6 +61,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.61 + version: v1.63 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 3f4aa67e139..5a8148c473e 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -42,7 +42,6 @@ jobs: DEBUG: "" LAMBDA_EXECUTOR: "" KINESIS_ERROR_PROBABILITY: "" - DOCKER_HOST: unix:///var/run/docker.sock KINESIS_INITIALIZE_STREAMS: ${{ env.KINESIS_INITIALIZE_STREAMS }} LOCALSTACK_HOST: ${{ env.AWS_HOST }} # Required so that resource urls are provided properly # e.g sqs url will get localhost if we don't set this env to map our service @@ -115,6 +114,17 @@ jobs: --health-retries 5 --health-start-period 30s + victorialogs: + image: victoriametrics/victoria-logs:v1.5.0-victorialogs + ports: + - "9428:9428" + options: >- + --name=victorialogs1 + --health-cmd "wget -q -O - http://0.0.0.0:9428" + --health-interval 30s + --health-timeout 10s + --health-retries 5 + --health-start-period 30s steps: - name: Check out CrowdSec repository @@ -126,7 +136,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Run "go generate" and check for changes run: | @@ -144,11 +154,11 @@ jobs: go generate ./... protoc --version if [[ $(git status --porcelain) ]]; then - echo "Error: Uncommitted changes found after running 'make generate'. Please commit all generated code." + echo "Error: Uncommitted changes found after running 'go generate'. Please commit all generated code." git diff exit 1 else - echo "No changes detected after running 'make generate'." + echo "No changes detected after running 'go generate'." fi - name: Create localstack streams @@ -190,6 +200,6 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: - version: v1.61 + version: v1.63 args: --issues-exit-code=1 --timeout 10m only-new-issues: false diff --git a/.github/workflows/publish-tarball-release.yml b/.github/workflows/publish-tarball-release.yml index 6a41c3fba53..18541f86e41 100644 --- a/.github/workflows/publish-tarball-release.yml +++ b/.github/workflows/publish-tarball-release.yml @@ -25,7 +25,7 @@ jobs: - name: "Set up Go" uses: actions/setup-go@v5 with: - go-version: "1.23" + go-version-file: go.mod - name: Build the binaries run: | diff --git a/.gitignore b/.gitignore index 6e6624fd282..cba570fdb84 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,9 @@ # Test dependencies test/tools/* +# Saved test status +test/bats/.bats/run-logs + # VMs used for dev/test .vagrant diff --git a/.golangci.yml b/.golangci.yml index acde901dbe6..b3be5adb687 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,41 @@ # https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml +run: + build-tags: + - expr_debug + linters-settings: + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + # Default: false + check-type-assertions: false + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - (*bytes.Buffer).ReadFrom # TODO: + - io.Copy # TODO: + - (net/http.ResponseWriter).Write # TODO: + - (*os/exec.Cmd).Start + - (*os/exec.Cmd).Wait + - (*os.Process).Kill + - (*text/template.Template).ExecuteTemplate + - syscall.FreeLibrary + - golang.org/x/sys/windows.CloseHandle + - golang.org/x/sys/windows.ResetEvent + - (*golang.org/x/sys/windows/svc/eventlog.Log).Info + - (*golang.org/x/sys/windows/svc/mgr.Mgr).Disconnect + + - (github.com/bluele/gcache.Cache).Set + - (github.com/gin-gonic/gin.ResponseWriter).WriteString + - (*github.com/segmentio/kafka-go.Reader).SetOffsetAt + - (*gopkg.in/tomb.v2.Tomb).Wait + + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterArgs + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterBody + - (*github.com/crowdsecurity/crowdsec/pkg/appsec.ReqDumpFilter).FilterHeaders + - (*github.com/crowdsecurity/crowdsec/pkg/longpollclient.LongPollClient).Stop + gci: sections: - standard @@ -62,6 +97,7 @@ linters-settings: - "!**/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go" - "!**/pkg/acquisition/modules/loki/loki.go" - "!**/pkg/acquisition/modules/loki/timestamp_test.go" + - "!**/pkg/acquisition/modules/victorialogs/victorialogs.go" - "!**/pkg/acquisition/modules/s3/s3.go" - "!**/pkg/acquisition/modules/syslog/syslog.go" - "!**/pkg/acquisition/modules/wineventlog/wineventlog_windows.go" @@ -118,7 +154,7 @@ linters-settings: arguments: [6] - name: function-length # lower this after refactoring - arguments: [110, 237] + arguments: [111, 238] - name: get-return disabled: true - name: increment-decrement @@ -183,23 +219,17 @@ linters-settings: - ifElseChain - importShadow - hugeParam - - rangeValCopy - commentedOutCode - commentedOutImport - unnamedResult - sloppyReassign - appendCombine - - captLocal - typeUnparen - commentFormatting - deferInLoop # - - sprintfQuotedString # - whyNoLint - equalFold # - unnecessaryBlock # - - ptrToRefParam # - - stringXbytes # - - appendAssign # - tooManyResultsChecker - unnecessaryDefer - docStub @@ -211,9 +241,7 @@ linters: # # DEPRECATED by golangi-lint # - - execinquery - exportloopref - - gomnd # # Redundant @@ -322,10 +350,6 @@ issues: - govet text: "shadow: declaration of \"(err|ctx)\" shadows declaration" - - linters: - - errcheck - text: "Error return value of `.*` is not checked" - # Will fix, trivial - just beware of merge conflicts - linters: @@ -340,14 +364,6 @@ issues: - errorlint text: "non-wrapping format verb for fmt.Errorf. Use `%w` to format errors" - - linters: - - errorlint - text: "type assertion on error will fail on wrapped errors. Use errors.As to check for specific errors" - - - linters: - - errorlint - text: "type switch on error will fail on wrapped errors. Use errors.As to check for specific errors" - - linters: - nosprintfhostport text: "host:port in url should be constructed with net.JoinHostPort and not directly with fmt.Sprintf" @@ -383,6 +399,11 @@ issues: path: pkg/acquisition/modules/loki/internal/lokiclient/loki_client.go text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" + - linters: + - revive + path: pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go + text: "confusing-naming: Method 'QueryRange' differs only by capitalization to method 'queryRange' in the same source file" + - linters: - revive path: cmd/crowdsec-cli/copyfile.go @@ -409,12 +430,6 @@ issues: path: "pkg/(.+)_test.go" text: "line-length-limit: .*" - # tolerate deep exit in tests, for now - - linters: - - revive - path: "pkg/(.+)_test.go" - text: "deep-exit: .*" - # we use t,ctx instead of ctx,t in tests - linters: - revive @@ -429,30 +444,62 @@ issues: - linters: - revive - path: "cmd/crowdsec-cli/clihub/item_metrics.go" + path: "cmd/crowdsec/crowdsec.go" text: "deep-exit: .*" - linters: - revive - path: "cmd/crowdsec-cli/idgen/password.go" + path: "cmd/crowdsec/api.go" text: "deep-exit: .*" - linters: - revive - path: "pkg/leakybucket/overflows.go" + path: "cmd/crowdsec/win_service.go" text: "deep-exit: .*" - linters: - - revive - path: "cmd/crowdsec/crowdsec.go" - text: "deep-exit: .*" + - recvcheck + path: "pkg/csplugin/hclog_adapter.go" + text: 'the methods of "HCLogAdapter" use pointer receiver and non-pointer receiver.' + # encoding to json/yaml requires value receivers - linters: - - revive - path: "cmd/crowdsec/api.go" - text: "deep-exit: .*" + - recvcheck + path: "pkg/cwhub/item.go" + text: 'the methods of "Item" use pointer receiver and non-pointer receiver.' + + - linters: + - gocritic + path: "cmd/crowdsec-cli" + text: "rangeValCopy: .*" + + - linters: + - gocritic + path: "pkg/(cticlient|hubtest)" + text: "rangeValCopy: .*" + + - linters: + - gocritic + path: "(.+)_test.go" + text: "rangeValCopy: .*" + + - linters: + - gocritic + path: "pkg/(appsec|acquisition|dumps|alertcontext|leakybucket|exprhelpers)" + text: "rangeValCopy: .*" - linters: - revive - path: "cmd/crowdsec/win_service.go" - text: "deep-exit: .*" + path: "pkg/types/utils.go" + text: "argument-limit: .*" + + # need some cleanup first: to create db in memory and share the client, not the config + - linters: + - usetesting + path: "pkg/apiserver/(.+)_test.go" + text: "os.MkdirTemp.* could be replaced by t.TempDir.*" + + - linters: + - usetesting + path: "pkg/apiserver/(.+)_test.go" + text: "os.CreateTemp.* could be replaced by os.CreateTemp.*" diff --git a/Dockerfile b/Dockerfile index 880df88dc02..d368f0f6ede 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,4 @@ -# vim: set ft=dockerfile: -FROM golang:1.23-alpine3.20 AS build +FROM docker.io/golang:1.23-alpine3.20 AS build ARG BUILD_VERSION @@ -31,7 +30,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 CGO_CFLAGS="-D_LARGEFILE64_ # In case we need to remove agents here.. # cscli machines list -o json | yq '.[].machineId' | xargs -r cscli machines delete -FROM alpine:latest AS slim +FROM docker.io/alpine:latest AS slim RUN apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community tzdata bash rsync && \ mkdir -p /staging/etc/crowdsec && \ diff --git a/Dockerfile.debian b/Dockerfile.debian index 5d47f167e99..a9b58c633ed 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,5 +1,4 @@ -# vim: set ft=dockerfile: -FROM golang:1.23-bookworm AS build +FROM docker.io/golang:1.23-bookworm AS build ARG BUILD_VERSION @@ -36,7 +35,7 @@ RUN make clean release DOCKER_BUILD=1 BUILD_STATIC=1 && \ # In case we need to remove agents here.. # cscli machines list -o json | yq '.[].machineId' | xargs -r cscli machines delete -FROM debian:bookworm-slim AS slim +FROM docker.io/debian:bookworm-slim AS slim ENV DEBIAN_FRONTEND=noninteractive ENV DEBCONF_NOWARNINGS="yes" diff --git a/Makefile b/Makefile index f8ae66e1cb6..93387488001 100644 --- a/Makefile +++ b/Makefile @@ -138,6 +138,7 @@ COMPONENTS := \ datasource_journalctl \ datasource_kinesis \ datasource_loki \ + datasource_victorialogs \ datasource_s3 \ datasource_syslog \ datasource_wineventlog \ diff --git a/README.md b/README.md index 1e57d4e91c4..dc6d3ee6806 100644 --- a/README.md +++ b/README.md @@ -8,83 +8,47 @@

- - - - -Go Reference - - - - - +

+_CrowdSec is an open-source and participative security solution offering crowdsourced server detection and protection against malicious IPs. Detect and block with our Security Engine, contribute to the network, and enjoy our real-time community blocklist._ +

-:computer: Console (WebApp) -:books: Documentation -:diamond_shape_with_a_dot_inside: Configuration Hub -:speech_balloon: Discourse (Forum) -:speech_balloon: Discord (Live Chat) +CrowdSec schema

+## Features & Advantages -:dancer: This is a community-driven project, we need your feedback. - -## +### Versatile Security Engine -CrowdSec is a free, modern & collaborative behavior detection engine, coupled with a global IP reputation network. It stacks on fail2ban's philosophy but is IPV6 compatible and 60x faster (Go vs Python), it uses Grok patterns to parse logs and YAML scenarios to identify behaviors. CrowdSec is engineered for modern Cloud / Containers / VM-based infrastructures (by decoupling detection and remediation). Once detected you can remedy threats with various bouncers (firewall block, nginx http 403, Captchas, etc.) while the aggressive IP can be sent to CrowdSec for curation before being shared among all users to further improve everyone's security. See [FAQ](https://doc.crowdsec.net/docs/faq) or read below for more. +[CrowdSec Security Engine](https://doc.crowdsec.net/docs/next/intro/) is an all-in-one [IDS/IPS](https://doc.crowdsec.net/docs/next/log_processor/intro) and [WAF](https://doc.crowdsec.net/docs/next/appsec/intro). -## 2 mins install +It detects bad behaviors by analyzing log sources and HTTP requests, and allows active remedation thanks to the [Remediation Components](https://doc.crowdsec.net/u/bouncers/intro). -Installing it through the [Package system](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) of your OS is the easiest way to proceed. -Otherwise, you can install it from source. +[Detection rules are available on our hub](https://hub.crowdsec.net) under MIT license. -### From package (Debian) +### CrowdSec Community Blocklist -```sh -curl -s https://packagecloud.io/install/repositories/crowdsec/crowdsec/script.deb.sh | sudo bash -sudo apt-get update -sudo apt-get install crowdsec -``` + -### From package (rhel/centos/amazon linux) +The "Community Blocklist" is a curated list of IP addresses identified as malicious by CrowdSec. The Security Engine proactively block the IP addresses of this blocklist, preventing malevolent IPs from reaching your systems. -```sh -curl -s https://packagecloud.io/install/repositories/crowdsec/crowdsec/script.rpm.sh | sudo bash -sudo yum install crowdsec -``` +[![CrowdSec Community Blocklist](https://doc.crowdsec.net/assets/images/data_insights-1e7678f47cb672122cc847d068b6eadf.png)](https://doc.crowdsec.net/docs/next/central_api/community_blocklist) -### From package (FreeBSD) - -``` -sudo pkg update -sudo pkg install crowdsec -``` + -### From source +### Console - Monitoring & Automation of your security stack -```sh -wget https://github.com/crowdsecurity/crowdsec/releases/latest/download/crowdsec-release.tgz -tar xzvf crowdsec-release.tgz -cd crowdsec-v* && sudo ./wizard.sh -i -``` +[![CrowdSec Console](https://doc.crowdsec.net/assets/images/visualizer-summary-c8087e2eaef65d110bad6a7f274cf953.png)](https://doc.crowdsec.net/u/console/intro) -## :information_source: About the CrowdSec project +### Multiple Platforms support -Crowdsec is an open-source, lightweight software, detecting peers with aggressive behaviors to prevent them from accessing your systems. Its user-friendly design and assistance offer a low technical barrier of entry and nevertheless a high security gain. +[![Multiple Platforms support](https://github.com/crowdsecurity/crowdsec-docs/blob/main/crowdsec-docs/static/img/supported_platforms.png)](https://doc.crowdsec.net/) -The architecture is as follows : - -

- CrowdSec -

- -Once an unwanted behavior is detected, deal with it through a [bouncer](https://app.crowdsec.net/hub/remediation-components). The aggressive IP, scenario triggered and timestamp are sent for curation, to avoid poisoning & false positives. (This can be disabled). If verified, this IP is then redistributed to all CrowdSec users running the same scenario. ## Outnumbering hackers all together @@ -92,72 +56,18 @@ By sharing the threat they faced, all users are protecting each-others (hence th CrowdSec ships by default with scenarios (brute force, port scan, web scan, etc.) adapted for most contexts, but you can easily extend it by picking more of them from the **[HUB](https://hub.crowdsec.net)**. It is also easy to adapt an existing one or create one yourself. -## :point_right: What it is not - -CrowdSec is not a SIEM, storing your logs (neither locally nor remotely). Your data are analyzed locally and forgotten. - -Signals sent to the curation platform are limited to the very strict minimum: IP, Scenario, Timestamp. They are only used to allow the system to spot new rogue IPs, and rule out false positives or poisoning attempts. - -## :arrow_down: Install it ! - -Crowdsec is available for various platforms : - - - [Use our debian repositories](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) or the [official debian packages](https://packages.debian.org/search?keywords=crowdsec&searchon=names&suite=stable§ion=all) - - An [image](https://hub.docker.com/r/crowdsecurity/crowdsec) is available for docker - - [Prebuilt release packages](https://github.com/crowdsecurity/crowdsec/releases) are also available (suitable for `amd64`) - - You can as well [build it from source](https://doc.crowdsec.net/docs/user_guides/building) - -Or look directly at [installation documentation](https://doc.crowdsec.net/docs/getting_started/install_crowdsec) for other methods and platforms. - -## :tada: Key benefits - -### Fast assisted installation, no technical barrier - -
- Initial configuration is automated, providing functional out-of-the-box setup - -
- -### Out of the box detection - -
- Baseline detection is effective out-of-the-box, no fine-tuning required (click to expand) - -
- -### Easy bouncer deployment - -
- It's trivial to add bouncers to enforce decisions of crowdsec (click to expand) - -
- -### Easy dashboard access - -
- It's easy to deploy a metabase interface to view your data simply with cscli (click to expand) - -
- -### Hot & Cold logs - -
- Process cold logs, for forensic, tests and chasing false positives & false negatives (click to expand) - -
- - -## 📦 About this repository - -This repository contains the code for the two main components of crowdsec : - - `crowdsec` : the daemon a-la-fail2ban that can read, parse, enrich and apply heuristics to logs. This is the component in charge of "detecting" the attacks - - `cscli` : the cli tool mainly used to interact with crowdsec : ban/unban/view current bans, enable/disable parsers and scenarios. +## Installation + -## Contributing +[Follow our documentation to install CrowdSec in a few minutes on Linux, Windows, Docker, OpnSense, Kubernetes, and more.](https://doc.crowdsec.net/) -If you wish to contribute to the core of crowdsec, you are welcome to open a PR in this repository. -If you wish to add a new parser, scenario or collection, please open a PR in the [hub repository](https://github.com/crowdsecurity/hub). +## Resources -If you wish to contribute to the documentation, please open a PR in the [documentation repository](http://github.com/crowdsecurity/crowdsec-docs). + - [Console](https://app.crowdsec.net): Supercharge your CrowdSec setup with visualization, management capabilities, extra blocklists and premium features. + - [Documentation](https://doc.crowdsec.net): Learn how to exploit your CrowdSec setup to deter more attacks. + - [Discord](https://discord.gg/crowdsec): A question or a suggestion? This is the place. + - [Hub](https://hub.crowdsec.net): Improve your stack protection, find the relevant remediation components for your infrastructure. + - [CrowdSec Academy](https://academy.crowdsec.net/): Learn and grow with our courses. + - [Corporate Website](https://crowdsec.net): For everything else. diff --git a/cmd/crowdsec-cli/clialert/alerts.go b/cmd/crowdsec-cli/clialert/alerts.go index 5907d4a0fa8..4ae72919a9e 100644 --- a/cmd/crowdsec-cli/clialert/alerts.go +++ b/cmd/crowdsec-cli/clialert/alerts.go @@ -78,7 +78,7 @@ func (cli *cliAlerts) alertsToTable(alerts *models.GetAlertsResponse, printMachi alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), decisionsFromAlert(alertItem), - *alertItem.StartAt, + alertItem.CreatedAt, } if printMachine { row = append(row, alertItem.MachineID) diff --git a/cmd/crowdsec-cli/clialert/table.go b/cmd/crowdsec-cli/clialert/table.go index 1416e1e435c..4fe7c4b99c6 100644 --- a/cmd/crowdsec-cli/clialert/table.go +++ b/cmd/crowdsec-cli/clialert/table.go @@ -86,7 +86,7 @@ func alertDecisionsTable(out io.Writer, wantColor string, alert *models.Alert) { } if foundActive { - fmt.Printf(" - Active Decisions :\n") + t.Writer.SetTitle("Active Decisions") t.Render() // Send output } } diff --git a/cmd/crowdsec-cli/clibouncer/inspect.go b/cmd/crowdsec-cli/clibouncer/inspect.go index b62344baa9b..9f1d56124d8 100644 --- a/cmd/crowdsec-cli/clibouncer/inspect.go +++ b/cmd/crowdsec-cli/clibouncer/inspect.go @@ -47,7 +47,7 @@ func (cli *cliBouncers) inspectHuman(out io.Writer, bouncer *ent.Bouncer) { t.AppendRow(table.Row{"Feature Flags", ff}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprint(out, t.Render()) } func (cli *cliBouncers) inspect(bouncer *ent.Bouncer) error { diff --git a/cmd/crowdsec-cli/clibouncer/list.go b/cmd/crowdsec-cli/clibouncer/list.go index a13ca994e1e..4ed22ce752f 100644 --- a/cmd/crowdsec-cli/clibouncer/list.go +++ b/cmd/crowdsec-cli/clibouncer/list.go @@ -37,7 +37,7 @@ func (cli *cliBouncers) listHuman(out io.Writer, bouncers ent.Bouncers) { t.AppendRow(table.Row{b.Name, b.IPAddress, revoked, lastPull, b.Type, b.Version, b.AuthType}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { @@ -71,7 +71,6 @@ func (cli *cliBouncers) listCSV(out io.Writer, bouncers ent.Bouncers) error { func (cli *cliBouncers) List(ctx context.Context, out io.Writer, db *database.Client) error { // XXX: must use the provided db object, the one in the struct might be nil // (calling List directly skips the PersistentPreRunE) - bouncers, err := db.ListBouncers(ctx) if err != nil { return fmt.Errorf("unable to list bouncers: %w", err) diff --git a/cmd/crowdsec-cli/clicapi/capi.go b/cmd/crowdsec-cli/clicapi/capi.go index 61d59836fdd..14637a26e1a 100644 --- a/cmd/crowdsec-cli/clicapi/capi.go +++ b/cmd/crowdsec-cli/clicapi/capi.go @@ -66,7 +66,12 @@ func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputF return fmt.Errorf("unable to generate machine id: %w", err) } - password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) + pstr, err := idgen.GeneratePassword(idgen.PasswordLength) + if err != nil { + return err + } + + password := strfmt.Password(pstr) apiurl, err := url.Parse(types.CAPIBaseURL) if err != nil { @@ -118,7 +123,9 @@ func (cli *cliCapi) register(ctx context.Context, capiUserPrefix string, outputF fmt.Println(string(apiConfigDump)) } - log.Warning(reload.Message) + if msg := reload.UserMessage(); msg != "" { + log.Warning(msg) + } return nil } @@ -256,7 +263,7 @@ func (cli *cliCapi) newStatusCmd() *cobra.Command { Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/cliconfig/backup.go b/cmd/crowdsec-cli/cliconfig/backup.go new file mode 100644 index 00000000000..5cd34fcf07f --- /dev/null +++ b/cmd/crowdsec-cli/cliconfig/backup.go @@ -0,0 +1,20 @@ +package cliconfig + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func (cli *cliConfig) newBackupCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "backup", + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, _ []string) error { + configDir := cli.cfg().ConfigPaths.ConfigDir + return fmt.Errorf("'cscli config backup' has been removed, you can manually backup/restore %s instead", configDir) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/config.go b/cmd/crowdsec-cli/cliconfig/config.go similarity index 58% rename from cmd/crowdsec-cli/config.go rename to cmd/crowdsec-cli/cliconfig/config.go index 4cf8916ad4b..22095ac7d5b 100644 --- a/cmd/crowdsec-cli/config.go +++ b/cmd/crowdsec-cli/cliconfig/config.go @@ -1,20 +1,26 @@ -package main +package cliconfig import ( "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) +type configGetter func() *csconfig.Config + +type mergedConfigGetter func() string + type cliConfig struct { cfg configGetter } -func NewCLIConfig(cfg configGetter) *cliConfig { +func New(cfg configGetter) *cliConfig { return &cliConfig{ cfg: cfg, } } -func (cli *cliConfig) NewCommand() *cobra.Command { +func (cli *cliConfig) NewCommand(mergedConfigGetter mergedConfigGetter) *cobra.Command { cmd := &cobra.Command{ Use: "config [command]", Short: "Allows to view current config", @@ -23,7 +29,7 @@ func (cli *cliConfig) NewCommand() *cobra.Command { } cmd.AddCommand(cli.newShowCmd()) - cmd.AddCommand(cli.newShowYAMLCmd()) + cmd.AddCommand(cli.newShowYAMLCmd(mergedConfigGetter)) cmd.AddCommand(cli.newBackupCmd()) cmd.AddCommand(cli.newRestoreCmd()) cmd.AddCommand(cli.newFeatureFlagsCmd()) diff --git a/cmd/crowdsec-cli/config_feature_flags.go b/cmd/crowdsec-cli/cliconfig/feature_flags.go similarity index 96% rename from cmd/crowdsec-cli/config_feature_flags.go rename to cmd/crowdsec-cli/cliconfig/feature_flags.go index 760e2194bb3..c03db10ccce 100644 --- a/cmd/crowdsec-cli/config_feature_flags.go +++ b/cmd/crowdsec-cli/cliconfig/feature_flags.go @@ -1,4 +1,4 @@ -package main +package cliconfig import ( "fmt" @@ -86,7 +86,7 @@ func (cli *cliConfig) featureFlags(showRetired bool) error { fmt.Println("To enable a feature you can: ") fmt.Println(" - set the environment variable CROWDSEC_FEATURE_ to true") - featurePath, err := filepath.Abs(csconfig.GetFeatureFilePath(ConfigFilePath)) + featurePath, err := filepath.Abs(csconfig.GetFeatureFilePath(cli.cfg().FilePath)) if err != nil { // we already read the file, shouldn't happen return err diff --git a/cmd/crowdsec-cli/cliconfig/restore.go b/cmd/crowdsec-cli/cliconfig/restore.go new file mode 100644 index 00000000000..d368b27ea30 --- /dev/null +++ b/cmd/crowdsec-cli/cliconfig/restore.go @@ -0,0 +1,20 @@ +package cliconfig + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func (cli *cliConfig) newRestoreCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "restore", + DisableAutoGenTag: true, + RunE: func(cmd *cobra.Command, _ []string) error { + configDir := cli.cfg().ConfigPaths.ConfigDir + return fmt.Errorf("'cscli config restore' has been removed, you can manually backup/restore %s instead", configDir) + }, + } + + return cmd +} diff --git a/cmd/crowdsec-cli/config_show.go b/cmd/crowdsec-cli/cliconfig/show.go similarity index 99% rename from cmd/crowdsec-cli/config_show.go rename to cmd/crowdsec-cli/cliconfig/show.go index 3d17d264574..90c0ab71069 100644 --- a/cmd/crowdsec-cli/config_show.go +++ b/cmd/crowdsec-cli/cliconfig/show.go @@ -1,4 +1,4 @@ -package main +package cliconfig import ( "encoding/json" diff --git a/cmd/crowdsec-cli/config_showyaml.go b/cmd/crowdsec-cli/cliconfig/showyaml.go similarity index 62% rename from cmd/crowdsec-cli/config_showyaml.go rename to cmd/crowdsec-cli/cliconfig/showyaml.go index 10549648d09..2e46a0171ab 100644 --- a/cmd/crowdsec-cli/config_showyaml.go +++ b/cmd/crowdsec-cli/cliconfig/showyaml.go @@ -1,4 +1,4 @@ -package main +package cliconfig import ( "fmt" @@ -6,19 +6,19 @@ import ( "github.com/spf13/cobra" ) -func (cli *cliConfig) showYAML() error { +func (cli *cliConfig) showYAML(mergedConfig string) error { fmt.Println(mergedConfig) return nil } -func (cli *cliConfig) newShowYAMLCmd() *cobra.Command { +func (cli *cliConfig) newShowYAMLCmd(mergedConfigGetter mergedConfigGetter) *cobra.Command { cmd := &cobra.Command{ Use: "show-yaml", Short: "Displays merged config.yaml + config.yaml.local", Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - return cli.showYAML() + return cli.showYAML(mergedConfigGetter()) }, } diff --git a/cmd/crowdsec-cli/cliconsole/console.go b/cmd/crowdsec-cli/cliconsole/console.go index 448ddcee7fa..fcc128bd5b5 100644 --- a/cmd/crowdsec-cli/cliconsole/console.go +++ b/cmd/crowdsec-cli/cliconsole/console.go @@ -114,7 +114,7 @@ func (cli *cliConsole) enroll(ctx context.Context, key string, name string, over } } - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { return err } @@ -214,7 +214,9 @@ Enable given information push to the central API. Allows to empower the console` log.Infof("%v have been enabled", args) } - log.Info(reload.Message) + if reload.UserMessage() != "" { + log.Info(reload.UserMessage()) + } return nil }, @@ -248,7 +250,9 @@ Disable given information push to the central API.`, log.Infof("%v have been disabled", args) } - log.Info(reload.Message) + if msg := reload.UserMessage(); msg != "" { + log.Info(msg) + } return nil }, diff --git a/cmd/crowdsec-cli/clidecision/decisions.go b/cmd/crowdsec-cli/clidecision/decisions.go index 307cabffe51..b5865bab6e0 100644 --- a/cmd/crowdsec-cli/clidecision/decisions.go +++ b/cmd/crowdsec-cli/clidecision/decisions.go @@ -170,7 +170,7 @@ func (cli *cliDecisions) NewCommand() *cobra.Command { return cmd } -func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOpts, NoSimu *bool, contained *bool, printMachine bool) error { +func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOpts, noSimu *bool, contained *bool, printMachine bool) error { var err error *filter.ScopeEquals, err = clialert.SanitizeScope(*filter.ScopeEquals, *filter.IPEquals, *filter.RangeEquals) @@ -181,7 +181,7 @@ func (cli *cliDecisions) list(ctx context.Context, filter apiclient.AlertsListOp filter.ActiveDecisionEquals = new(bool) *filter.ActiveDecisionEquals = true - if NoSimu != nil && *NoSimu { + if noSimu != nil && *noSimu { filter.IncludeSimulated = new(bool) } /* nullify the empty entries to avoid bad filter */ diff --git a/cmd/crowdsec-cli/clidecision/table.go b/cmd/crowdsec-cli/clidecision/table.go index 189eb80b8e5..4beda572d8e 100644 --- a/cmd/crowdsec-cli/clidecision/table.go +++ b/cmd/crowdsec-cli/clidecision/table.go @@ -3,13 +3,17 @@ package clidecision import ( "io" "strconv" + "strings" + + "github.com/fatih/color" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" "github.com/crowdsecurity/crowdsec/pkg/models" ) func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsResponse, printMachine bool) { - t := cstable.New(out, cli.cfg().Cscli.Color) + wantColor := cli.cfg().Cscli.Color + t := cstable.New(out, wantColor) t.SetRowLines(false) header := []string{"ID", "Source", "Scope:Value", "Reason", "Action", "Country", "AS", "Events", "expiration", "Alert ID"} @@ -25,6 +29,11 @@ func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsR *decisionItem.Type = "(simul)" + *decisionItem.Type } + duration := *decisionItem.Duration + if strings.HasPrefix(duration, "-") && wantColor != "no" { + duration = color.RedString(duration) + } + row := []string{ strconv.Itoa(int(decisionItem.ID)), *decisionItem.Origin, @@ -34,7 +43,7 @@ func (cli *cliDecisions) decisionsTable(out io.Writer, alerts *models.GetAlertsR alertItem.Source.Cn, alertItem.Source.GetAsNumberName(), strconv.Itoa(int(*alertItem.EventsCount)), - *decisionItem.Duration, + duration, strconv.Itoa(int(alertItem.ID)), } diff --git a/cmd/crowdsec-cli/clihub/hub.go b/cmd/crowdsec-cli/clihub/hub.go index f189d6a2e13..66fbe7c405a 100644 --- a/cmd/crowdsec-cli/clihub/hub.go +++ b/cmd/crowdsec-cli/clihub/hub.go @@ -5,15 +5,18 @@ import ( "encoding/json" "fmt" "io" + "os" "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "gopkg.in/yaml.v3" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" ) type configGetter = func() *csconfig.Config @@ -55,11 +58,11 @@ func (cli *cliHub) List(out io.Writer, hub *cwhub.Hub, all bool) error { cfg := cli.cfg() for _, v := range hub.Warnings { - log.Info(v) + fmt.Fprintln(os.Stderr, v) } for _, line := range hub.ItemStats() { - log.Info(line) + fmt.Fprintln(os.Stderr, line) } items := make(map[string][]*cwhub.Item) @@ -90,7 +93,7 @@ func (cli *cliHub) newListCmd() *cobra.Command { Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + hub, err := require.Hub(cli.cfg(), log.StandardLogger()) if err != nil { return err } @@ -100,23 +103,22 @@ func (cli *cliHub) newListCmd() *cobra.Command { } flags := cmd.Flags() - flags.BoolVarP(&all, "all", "a", false, "List disabled items as well") + flags.BoolVarP(&all, "all", "a", false, "List all available items, including those not installed") return cmd } func (cli *cliHub) update(ctx context.Context, withContent bool) error { local := cli.cfg().Hub - remote := require.RemoteHub(ctx, cli.cfg()) - remote.EmbedItemContent = withContent - // don't use require.Hub because if there is no index file, it would fail - hub, err := cwhub.NewHub(local, remote, log.StandardLogger()) + hub, err := cwhub.NewHub(local, log.StandardLogger()) if err != nil { return err } - if err := hub.Update(ctx); err != nil { + indexProvider := require.HubDownloader(ctx, cli.cfg()) + + if err := hub.Update(ctx, indexProvider, withContent); err != nil { return fmt.Errorf("failed to update hub: %w", err) } @@ -125,7 +127,7 @@ func (cli *cliHub) update(ctx context.Context, withContent bool) error { } for _, v := range hub.Warnings { - log.Info(v) + fmt.Fprintln(os.Stderr, v) } return nil @@ -140,10 +142,18 @@ func (cli *cliHub) newUpdateCmd() *cobra.Command { Long: ` Fetches the .index.json file from the hub, containing the list of available configs. `, + Example: `# Download the last version of the index file. +cscli hub update + +# Download a 4x bigger version with all item contents (effectively pre-caching item downloads, but not data files). +cscli hub update --with-content`, Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.update(cmd.Context(), withContent) + if cmd.Flags().Changed("with-content") { + return cli.update(cmd.Context(), withContent) + } + return cli.update(cmd.Context(), cli.cfg().Cscli.HubWithContent) }, } @@ -153,36 +163,49 @@ Fetches the .index.json file from the hub, containing the list of available conf return cmd } -func (cli *cliHub) upgrade(ctx context.Context, force bool) error { - hub, err := require.Hub(cli.cfg(), require.RemoteHub(ctx, cli.cfg()), log.StandardLogger()) +func (cli *cliHub) upgrade(ctx context.Context, yes bool, dryRun bool, force bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cfg, log.StandardLogger()) if err != nil { return err } - for _, itemType := range cwhub.ItemTypes { - updated := 0 + plan := hubops.NewActionPlan(hub) - log.Infof("Upgrading %s", itemType) + contentProvider := require.HubDownloader(ctx, cfg) + for _, itemType := range cwhub.ItemTypes { for _, item := range hub.GetInstalledByType(itemType, true) { - didUpdate, err := item.Upgrade(ctx, force) - if err != nil { + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { return err } - - if didUpdate { - updated++ - } } + } + + if err := plan.AddCommand(hubops.NewDataRefreshCommand(force)); err != nil { + return err + } - log.Infof("Upgraded %d %s", updated, itemType) + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + return err + } + + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) } return nil } func (cli *cliHub) newUpgradeCmd() *cobra.Command { - var force bool + var ( + yes bool + dryRun bool + force bool + ) cmd := &cobra.Command{ Use: "upgrade", @@ -190,15 +213,23 @@ func (cli *cliHub) newUpgradeCmd() *cobra.Command { Long: ` Upgrade all configs installed from Crowdsec Hub. Run 'sudo cscli hub update' if you want the latest versions available. `, + Example: `# Upgrade all the collections, scenarios etc. to the latest version in the downloaded index. Update data files too. +cscli hub upgrade + +# Upgrade tainted items as well; force re-download of data files. +cscli hub upgrade --force`, Args: cobra.NoArgs, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return cli.upgrade(cmd.Context(), force) + return cli.upgrade(cmd.Context(), yes, dryRun, force) }, } flags := cmd.Flags() - flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") + flags.BoolVar(&yes, "yes", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated items; always update data files") + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") return cmd } diff --git a/cmd/crowdsec-cli/clihub/items.go b/cmd/crowdsec-cli/clihub/items.go index f86fe65a2a1..87cb10b1f93 100644 --- a/cmd/crowdsec-cli/clihub/items.go +++ b/cmd/crowdsec-cli/clihub/items.go @@ -5,13 +5,9 @@ import ( "encoding/json" "fmt" "io" - "os" - "path/filepath" "slices" "strings" - "gopkg.in/yaml.v3" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) @@ -67,7 +63,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st continue } - listHubItemTable(out, wantColor, "\n"+strings.ToUpper(itemType), items[itemType]) + listHubItemTable(out, wantColor, strings.ToUpper(itemType), items[itemType]) nothingToDisplay = false } @@ -97,7 +93,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st Name: item.Name, LocalVersion: item.State.LocalVersion, LocalPath: item.State.LocalPath, - Description: item.Description, + Description: strings.TrimSpace(item.Description), Status: status, UTF8Status: fmt.Sprintf("%v %s", statusEmo, status), } @@ -109,7 +105,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st return fmt.Errorf("failed to parse: %w", err) } - out.Write(x) + fmt.Fprint(out, string(x)) case "raw": csvwriter := csv.NewWriter(out) @@ -128,7 +124,7 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st item.Name, item.State.Text(), item.State.LocalVersion, - item.Description, + strings.TrimSpace(item.Description), } if len(itemTypes) > 1 { row = append(row, itemType) @@ -145,42 +141,3 @@ func ListItems(out io.Writer, wantColor string, itemTypes []string, items map[st return nil } - -func InspectItem(item *cwhub.Item, wantMetrics bool, output string, prometheusURL string, wantColor string) error { - switch output { - case "human", "raw": - enc := yaml.NewEncoder(os.Stdout) - enc.SetIndent(2) - - if err := enc.Encode(item); err != nil { - return fmt.Errorf("unable to encode item: %w", err) - } - case "json": - b, err := json.MarshalIndent(*item, "", " ") - if err != nil { - return fmt.Errorf("unable to serialize item: %w", err) - } - - fmt.Print(string(b)) - } - - if output != "human" { - return nil - } - - if item.State.Tainted { - fmt.Println() - fmt.Printf(`This item is tainted. Use "%s %s inspect --diff %s" to see why.`, filepath.Base(os.Args[0]), item.Type, item.Name) - fmt.Println() - } - - if wantMetrics { - fmt.Printf("\nCurrent metrics: \n") - - if err := showMetrics(prometheusURL, item, wantColor); err != nil { - return err - } - } - - return nil -} diff --git a/cmd/crowdsec-cli/clihub/utils_table.go b/cmd/crowdsec-cli/clihub/utils_table.go index 98f14341b10..b89f8447896 100644 --- a/cmd/crowdsec-cli/clihub/utils_table.go +++ b/cmd/crowdsec-cli/clihub/utils_table.go @@ -3,7 +3,6 @@ package clihub import ( "fmt" "io" - "strconv" "github.com/jedib0t/go-pretty/v6/table" @@ -21,65 +20,6 @@ func listHubItemTable(out io.Writer, wantColor string, title string, items []*cw t.AppendRow(table.Row{item.Name, status, item.State.LocalVersion, item.State.LocalPath}) } - io.WriteString(out, title+"\n") - io.WriteString(out, t.Render()+"\n") -} - -func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { - t := cstable.NewLight(out, wantColor).Writer - t.AppendHeader(table.Row{"Inband Hits", "Outband Hits"}) - - t.AppendRow(table.Row{ - strconv.Itoa(metrics["inband_hits"]), - strconv.Itoa(metrics["outband_hits"]), - }) - - io.WriteString(out, fmt.Sprintf("\n - (AppSec Rule) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") -} - -func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { - if metrics["instantiation"] == 0 { - return - } - - t := cstable.New(out, wantColor).Writer - t.AppendHeader(table.Row{"Current Count", "Overflows", "Instantiated", "Poured", "Expired"}) - - t.AppendRow(table.Row{ - strconv.Itoa(metrics["curr_count"]), - strconv.Itoa(metrics["overflow"]), - strconv.Itoa(metrics["instantiation"]), - strconv.Itoa(metrics["pour"]), - strconv.Itoa(metrics["underflow"]), - }) - - io.WriteString(out, fmt.Sprintf("\n - (Scenario) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") -} - -func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { - t := cstable.New(out, wantColor).Writer - t.AppendHeader(table.Row{"Parsers", "Hits", "Parsed", "Unparsed"}) - - // don't show table if no hits - showTable := false - - for source, stats := range metrics { - if stats["hits"] > 0 { - t.AppendRow(table.Row{ - source, - strconv.Itoa(stats["hits"]), - strconv.Itoa(stats["parsed"]), - strconv.Itoa(stats["unparsed"]), - }) - - showTable = true - } - } - - if showTable { - io.WriteString(out, fmt.Sprintf("\n - (Parser) %s:\n", itemName)) - io.WriteString(out, t.Render()+"\n") - } + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } diff --git a/cmd/crowdsec-cli/clihubtest/explain.go b/cmd/crowdsec-cli/clihubtest/explain.go index dbe10fa7ec0..877aec98a37 100644 --- a/cmd/crowdsec-cli/clihubtest/explain.go +++ b/cmd/crowdsec-cli/clihubtest/explain.go @@ -14,9 +14,12 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error return fmt.Errorf("can't load test: %+v", err) } + cfg := cli.cfg() + patternDir := cfg.ConfigPaths.PatternDir + err = test.ParserAssert.LoadTest(test.ParserResultFile) if err != nil { - if err = test.Run(); err != nil { + if err = test.Run(patternDir); err != nil { return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) } @@ -27,7 +30,7 @@ func (cli *cliHubTest) explain(testName string, details bool, skipOk bool) error err = test.ScenarioAssert.LoadTest(test.ScenarioResultFile, test.BucketPourResultFile) if err != nil { - if err = test.Run(); err != nil { + if err = test.Run(patternDir); err != nil { return fmt.Errorf("running test '%s' failed: %+v", test.Name, err) } diff --git a/cmd/crowdsec-cli/clihubtest/run.go b/cmd/crowdsec-cli/clihubtest/run.go index 31cceb81884..94a3b0c10f3 100644 --- a/cmd/crowdsec-cli/clihubtest/run.go +++ b/cmd/crowdsec-cli/clihubtest/run.go @@ -42,12 +42,14 @@ func (cli *cliHubTest) run(runAll bool, nucleiTargetHost string, appSecHost stri // set timezone to avoid DST issues os.Setenv("TZ", "UTC") + patternDir := cfg.ConfigPaths.PatternDir + for _, test := range hubPtr.Tests { if cfg.Cscli.Output == "human" { log.Infof("Running test '%s'", test.Name) } - err := test.Run() + err := test.Run(patternDir) if err != nil { log.Errorf("running test '%s' failed: %+v", test.Name, err) } diff --git a/cmd/crowdsec-cli/cliitem/appsec.go b/cmd/crowdsec-cli/cliitem/appsec.go deleted file mode 100644 index 44afa2133bd..00000000000 --- a/cmd/crowdsec-cli/cliitem/appsec.go +++ /dev/null @@ -1,123 +0,0 @@ -package cliitem - -import ( - "fmt" - "os" - - "golang.org/x/text/cases" - "golang.org/x/text/language" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/crowdsec/pkg/appsec" - "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewAppsecConfig(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.APPSEC_CONFIGS, - singular: "appsec-config", - oneOrMore: "appsec-config(s)", - help: cliHelp{ - example: `cscli appsec-configs list -a -cscli appsec-configs install crowdsecurity/vpatch -cscli appsec-configs inspect crowdsecurity/vpatch -cscli appsec-configs upgrade crowdsecurity/vpatch -cscli appsec-configs remove crowdsecurity/vpatch -`, - }, - installHelp: cliHelp{ - example: `cscli appsec-configs install crowdsecurity/vpatch`, - }, - removeHelp: cliHelp{ - example: `cscli appsec-configs remove crowdsecurity/vpatch`, - }, - upgradeHelp: cliHelp{ - example: `cscli appsec-configs upgrade crowdsecurity/vpatch`, - }, - inspectHelp: cliHelp{ - example: `cscli appsec-configs inspect crowdsecurity/vpatch`, - }, - listHelp: cliHelp{ - example: `cscli appsec-configs list -cscli appsec-configs list -a -cscli appsec-configs list crowdsecurity/vpatch`, - }, - } -} - -func NewAppsecRule(cfg configGetter) *cliItem { - inspectDetail := func(item *cwhub.Item) error { - // Only show the converted rules in human mode - if cfg().Cscli.Output != "human" { - return nil - } - - appsecRule := appsec.AppsecCollectionConfig{} - - yamlContent, err := os.ReadFile(item.State.LocalPath) - if err != nil { - return fmt.Errorf("unable to read file %s: %w", item.State.LocalPath, err) - } - - if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil { - return fmt.Errorf("unable to parse yaml file %s: %w", item.State.LocalPath, err) - } - - for _, ruleType := range appsec_rule.SupportedTypes() { - fmt.Printf("\n%s format:\n", cases.Title(language.Und, cases.NoLower).String(ruleType)) - - for _, rule := range appsecRule.Rules { - convertedRule, _, err := rule.Convert(ruleType, appsecRule.Name) - if err != nil { - return fmt.Errorf("unable to convert rule %s: %w", rule.Name, err) - } - - fmt.Println(convertedRule) - } - - switch ruleType { //nolint:gocritic - case appsec_rule.ModsecurityRuleType: - for _, rule := range appsecRule.SecLangRules { - fmt.Println(rule) - } - } - } - - return nil - } - - return &cliItem{ - cfg: cfg, - name: "appsec-rules", - singular: "appsec-rule", - oneOrMore: "appsec-rule(s)", - help: cliHelp{ - example: `cscli appsec-rules list -a -cscli appsec-rules install crowdsecurity/crs -cscli appsec-rules inspect crowdsecurity/crs -cscli appsec-rules upgrade crowdsecurity/crs -cscli appsec-rules remove crowdsecurity/crs -`, - }, - installHelp: cliHelp{ - example: `cscli appsec-rules install crowdsecurity/crs`, - }, - removeHelp: cliHelp{ - example: `cscli appsec-rules remove crowdsecurity/crs`, - }, - upgradeHelp: cliHelp{ - example: `cscli appsec-rules upgrade crowdsecurity/crs`, - }, - inspectHelp: cliHelp{ - example: `cscli appsec-rules inspect crowdsecurity/crs`, - }, - inspectDetail: inspectDetail, - listHelp: cliHelp{ - example: `cscli appsec-rules list -cscli appsec-rules list -a -cscli appsec-rules list crowdsecurity/crs`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/cmdinspect.go b/cmd/crowdsec-cli/cliitem/cmdinspect.go new file mode 100644 index 00000000000..b5ee0816d72 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdinspect.go @@ -0,0 +1,236 @@ +package cliitem + +import ( + "cmp" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/hexops/gotextdiff" + "github.com/hexops/gotextdiff/myers" + "github.com/hexops/gotextdiff/span" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff bool, rev bool, noMetrics bool) error { + cfg := cli.cfg() + + if rev && !diff { + return errors.New("--rev can only be used with --diff") + } + + if url != "" { + cfg.Cscli.PrometheusUrl = url + } + + var contentProvider cwhub.ContentProvider + + if diff { + contentProvider = require.HubDownloader(ctx, cfg) + } + + hub, err := require.Hub(cfg, log.StandardLogger()) + if err != nil { + return err + } + + for _, name := range args { + item := hub.GetItem(cli.name, name) + if item == nil { + return fmt.Errorf("can't find '%s' in %s", name, cli.name) + } + + if diff { + fmt.Println(cli.whyTainted(ctx, hub, contentProvider, item, rev)) + + continue + } + + if err = inspectItem(hub, item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { + return err + } + + if cli.inspectDetail != nil { + if err = cli.inspectDetail(item); err != nil { + return err + } + } + } + + return nil +} + +// return the diff between the installed version and the latest version +func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, contentProvider cwhub.ContentProvider, reverse bool) (string, error) { + if !item.State.Installed { + return "", fmt.Errorf("'%s' is not installed", item.FQName()) + } + + dest, err := os.CreateTemp("", "cscli-diff-*") + if err != nil { + return "", fmt.Errorf("while creating temporary file: %w", err) + } + defer os.Remove(dest.Name()) + + _, remoteURL, err := item.FetchContentTo(ctx, contentProvider, dest.Name()) + if err != nil { + return "", err + } + + latestContent, err := os.ReadFile(dest.Name()) + if err != nil { + return "", fmt.Errorf("while reading %s: %w", dest.Name(), err) + } + + localContent, err := os.ReadFile(item.State.LocalPath) + if err != nil { + return "", fmt.Errorf("while reading %s: %w", item.State.LocalPath, err) + } + + file1 := item.State.LocalPath + file2 := remoteURL + content1 := string(localContent) + content2 := string(latestContent) + + if reverse { + file1, file2 = file2, file1 + content1, content2 = content2, content1 + } + + edits := myers.ComputeEdits(span.URIFromPath(file1), content1, content2) + diff := gotextdiff.ToUnified(file1, file2, content1, edits) + + return fmt.Sprintf("%s", diff), nil +} + +func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, item *cwhub.Item, reverse bool) string { + if !item.State.Installed { + return fmt.Sprintf("# %s is not installed", item.FQName()) + } + + if !item.State.Tainted { + return fmt.Sprintf("# %s is not tainted", item.FQName()) + } + + if len(item.State.TaintedBy) == 0 { + return fmt.Sprintf("# %s is tainted but we don't know why. please report this as a bug", item.FQName()) + } + + ret := []string{ + fmt.Sprintf("# Let's see why %s is tainted.", item.FQName()), + } + + for _, fqsub := range item.State.TaintedBy { + ret = append(ret, fmt.Sprintf("\n-> %s\n", fqsub)) + + sub, err := hub.GetItemFQ(fqsub) + if err != nil { + ret = append(ret, err.Error()) + } + + diff, err := cli.itemDiff(ctx, sub, contentProvider, reverse) + if err != nil { + ret = append(ret, err.Error()) + } + + if diff != "" { + ret = append(ret, diff) + } else if len(sub.State.TaintedBy) > 0 { + taintList := strings.Join(sub.State.TaintedBy, ", ") + if sub.FQName() == taintList { + // hack: avoid message "item is tainted by itself" + continue + } + + ret = append(ret, fmt.Sprintf("# %s is tainted by %s", sub.FQName(), taintList)) + } + } + + return strings.Join(ret, "\n") +} + +func (cli cliItem) newInspectCmd() *cobra.Command { + var ( + url string + diff bool + rev bool + noMetrics bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.inspectHelp.use, "inspect [item]..."), + Short: cmp.Or(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), + Long: cmp.Or(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), + Example: cli.inspectHelp.example, + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.inspect(cmd.Context(), args, url, diff, rev, noMetrics) + }, + } + + flags := cmd.Flags() + flags.StringVarP(&url, "url", "u", "", "Prometheus url") + flags.BoolVar(&diff, "diff", false, "Show diff with latest version (for tainted items)") + flags.BoolVar(&rev, "rev", false, "Reverse diff output") + flags.BoolVar(&noMetrics, "no-metrics", false, "Don't show metrics (when cscli.output=human)") + + return cmd +} + +func inspectItem(hub *cwhub.Hub, item *cwhub.Item, wantMetrics bool, output string, prometheusURL string, wantColor string) error { + // This is dirty... + // We want to show current dependencies (from content), not latest (from index). + // The item is modifed but after this function the whole hub should be thrown away. + // A cleaner way would be to copy the struct first. + item.Dependencies = item.CurrentDependencies() + + switch output { + case "human", "raw": + enc := yaml.NewEncoder(os.Stdout) + enc.SetIndent(2) + + if err := enc.Encode(item); err != nil { + return fmt.Errorf("unable to encode item: %w", err) + } + case "json": + b, err := json.MarshalIndent(*item, "", " ") + if err != nil { + return fmt.Errorf("unable to serialize item: %w", err) + } + + fmt.Print(string(b)) + } + + if output != "human" { + return nil + } + + if item.State.Tainted { + fmt.Println() + fmt.Printf(`This item is tainted. Use "%s %s inspect --diff %s" to see why.`, filepath.Base(os.Args[0]), item.Type, item.Name) + fmt.Println() + } + + if wantMetrics { + fmt.Printf("\nCurrent metrics: \n") + + if err := showMetrics(prometheusURL, hub, item, wantColor); err != nil { + return err + } + } + + return nil +} diff --git a/cmd/crowdsec-cli/cliitem/cmdinstall.go b/cmd/crowdsec-cli/cliitem/cmdinstall.go new file mode 100644 index 00000000000..74ffbe727f4 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdinstall.go @@ -0,0 +1,150 @@ +package cliitem + +import ( + "cmp" + "context" + "errors" + "fmt" + "slices" + "strings" + + "github.com/agext/levenshtein" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" +) + +// suggestNearestMessage returns a message with the most similar item name, if one is found +func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) string { + const maxDistance = 7 + + score := 100 + nearest := "" + + for _, item := range hub.GetItemsByType(itemType, false) { + d := levenshtein.Distance(itemName, item.Name, nil) + if d < score { + score = d + nearest = item.Name + } + } + + msg := fmt.Sprintf("can't find '%s' in %s", itemName, itemType) + + if score < maxDistance { + msg += fmt.Sprintf(", did you mean '%s'?", nearest) + } + + return msg +} + +func (cli cliItem) install(ctx context.Context, args []string, yes bool, dryRun bool, downloadOnly bool, force bool, ignoreError bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cfg, log.StandardLogger()) + if err != nil { + return err + } + + plan := hubops.NewActionPlan(hub) + + contentProvider := require.HubDownloader(ctx, cfg) + + for _, name := range args { + item := hub.GetItem(cli.name, name) + if item == nil { + msg := suggestNearestMessage(hub, cli.name, name) + if !ignoreError { + return errors.New(msg) + } + + log.Error(msg) + + continue + } + + if err = plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { + return err + } + + if !downloadOnly { + if err = plan.AddCommand(hubops.NewEnableCommand(item, force)); err != nil { + return err + } + } + } + + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + if !ignoreError { + return err + } + + log.Error(err) + } + + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) + } + + return nil +} + +func compAllItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { + hub, err := require.Hub(cfg(), nil) + if err != nil { + return nil, cobra.ShellCompDirectiveDefault + } + + comp := make([]string, 0) + + for _, item := range hub.GetItemsByType(itemType, false) { + if !slices.Contains(args, item.Name) && strings.Contains(item.Name, toComplete) { + comp = append(comp, item.Name) + } + } + + cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) + + return comp, cobra.ShellCompDirectiveNoFileComp +} + +func (cli cliItem) newInstallCmd() *cobra.Command { + var ( + yes bool + dryRun bool + downloadOnly bool + force bool + ignoreError bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.installHelp.use, "install [item]..."), + Short: cmp.Or(cli.installHelp.short, "Install given "+cli.oneOrMore), + Long: cmp.Or(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), + Example: cli.installHelp.example, + Args: cobra.MinimumNArgs(1), + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compAllItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.install(cmd.Context(), args, yes, dryRun, downloadOnly, force, ignoreError) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") + flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") + flags.BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple "+cli.name) + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + + return cmd +} diff --git a/cmd/crowdsec-cli/cliitem/cmdremove.go b/cmd/crowdsec-cli/cliitem/cmdremove.go new file mode 100644 index 00000000000..c8ea041acbf --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdremove.go @@ -0,0 +1,151 @@ +package cliitem + +import ( + "cmp" + "context" + "errors" + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" +) + +func (cli cliItem) removePlan(hub *cwhub.Hub, args []string, purge bool, force bool, all bool) (*hubops.ActionPlan, error) { + plan := hubops.NewActionPlan(hub) + + if all { + itemGetter := hub.GetInstalledByType + if purge { + itemGetter = hub.GetItemsByType + } + + for _, item := range itemGetter(cli.name, true) { + if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { + return nil, err + } + + if purge { + if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { + return nil, err + } + } + } + + return plan, nil + } + + if len(args) == 0 { + return nil, fmt.Errorf("specify at least one %s to remove or '--all'", cli.singular) + } + + for _, itemName := range args { + item := hub.GetItem(cli.name, itemName) + if item == nil { + return nil, fmt.Errorf("can't find '%s' in %s", itemName, cli.name) + } + + parents := installedParentNames(item) + + if !force && len(parents) > 0 { + log.Warningf("%s belongs to collections: %s", item.Name, parents) + log.Warningf("Run 'sudo cscli %s remove %s --force' if you want to force remove this %s", item.Type, item.Name, cli.singular) + + continue + } + + if err := plan.AddCommand(hubops.NewDisableCommand(item, force)); err != nil { + return nil, err + } + + if purge { + if err := plan.AddCommand(hubops.NewPurgeCommand(item, force)); err != nil { + return nil, err + } + } + } + + return plan, nil +} + +// return the names of the installed parents of an item, used to check if we can remove it +func installedParentNames(item *cwhub.Item) []string { + ret := make([]string, 0) + + for _, parent := range item.Ancestors() { + if parent.State.Installed { + ret = append(ret, parent.Name) + } + } + + return ret +} + +func (cli cliItem) remove(ctx context.Context, args []string, yes bool, dryRun bool, purge bool, force bool, all bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cli.cfg(), log.StandardLogger()) + if err != nil { + return err + } + + plan, err := cli.removePlan(hub, args, purge, force, all) + if err != nil { + return err + } + + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + return err + } + + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) + } + + return nil +} + +func (cli cliItem) newRemoveCmd() *cobra.Command { + var ( + yes bool + dryRun bool + purge bool + force bool + all bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.removeHelp.use, "remove [item]..."), + Short: cmp.Or(cli.removeHelp.short, "Remove given "+cli.oneOrMore), + Long: cmp.Or(cli.removeHelp.long, "Remove one or more "+cli.name), + Example: cli.removeHelp.example, + Aliases: []string{"delete"}, + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) > 0 && all { + return errors.New("can't specify items and '--all' at the same time") + } + + return cli.remove(cmd.Context(), args, yes, dryRun, purge, force, all) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVar(&purge, "purge", false, "Delete source file too") + flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") + flags.BoolVar(&all, "all", false, "Remove all the "+cli.name) + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + + return cmd +} diff --git a/cmd/crowdsec-cli/cliitem/cmdupgrade.go b/cmd/crowdsec-cli/cliitem/cmdupgrade.go new file mode 100644 index 00000000000..5320bc04bc6 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/cmdupgrade.go @@ -0,0 +1,106 @@ +package cliitem + +import ( + "cmp" + "context" + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" +) + +func (cli cliItem) upgradePlan(hub *cwhub.Hub, contentProvider cwhub.ContentProvider, args []string, force bool, all bool) (*hubops.ActionPlan, error) { + plan := hubops.NewActionPlan(hub) + + if all { + for _, item := range hub.GetInstalledByType(cli.name, true) { + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { + return nil, err + } + } + + return plan, nil + } + + if len(args) == 0 { + return nil, fmt.Errorf("specify at least one %s to upgrade or '--all'", cli.singular) + } + + for _, itemName := range args { + item := hub.GetItem(cli.name, itemName) + if item == nil { + return nil, fmt.Errorf("can't find '%s' in %s", itemName, cli.name) + } + + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, force)); err != nil { + return nil, err + } + } + + return plan, nil +} + +func (cli cliItem) upgrade(ctx context.Context, args []string, yes bool, dryRun bool, force bool, all bool) error { + cfg := cli.cfg() + + hub, err := require.Hub(cfg, log.StandardLogger()) + if err != nil { + return err + } + + contentProvider := require.HubDownloader(ctx, cfg) + + plan, err := cli.upgradePlan(hub, contentProvider, args, force, all) + if err != nil { + return err + } + + verbose := (cfg.Cscli.Output == "raw") + + if err := plan.Execute(ctx, yes, dryRun, verbose); err != nil { + return err + } + + if msg := reload.UserMessage(); msg != "" && plan.ReloadNeeded { + fmt.Println("\n" + msg) + } + + return nil +} + +func (cli cliItem) newUpgradeCmd() *cobra.Command { + var ( + yes bool + dryRun bool + all bool + force bool + ) + + cmd := &cobra.Command{ + Use: cmp.Or(cli.upgradeHelp.use, "upgrade [item]..."), + Short: cmp.Or(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), + Long: cmp.Or(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), + Example: cli.upgradeHelp.example, + DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return compInstalledItems(cli.name, args, toComplete, cli.cfg) + }, + RunE: func(cmd *cobra.Command, args []string) error { + return cli.upgrade(cmd.Context(), args, yes, dryRun, force, all) + }, + } + + flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "Confirm execution without prompt") + flags.BoolVar(&dryRun, "dry-run", false, "Don't install or remove anything; print the execution plan") + flags.BoolVarP(&all, "all", "a", false, "Upgrade all the "+cli.name) + flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") + + return cmd +} diff --git a/cmd/crowdsec-cli/cliitem/collection.go b/cmd/crowdsec-cli/cliitem/collection.go deleted file mode 100644 index ea91c1e537a..00000000000 --- a/cmd/crowdsec-cli/cliitem/collection.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewCollection(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.COLLECTIONS, - singular: "collection", - oneOrMore: "collection(s)", - help: cliHelp{ - example: `cscli collections list -a -cscli collections install crowdsecurity/http-cve crowdsecurity/iptables -cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables -cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables -cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables -`, - }, - installHelp: cliHelp{ - example: `cscli collections install crowdsecurity/http-cve crowdsecurity/iptables`, - }, - removeHelp: cliHelp{ - example: `cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables`, - }, - upgradeHelp: cliHelp{ - example: `cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables`, - }, - inspectHelp: cliHelp{ - example: `cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables`, - }, - listHelp: cliHelp{ - example: `cscli collections list -cscli collections list -a -cscli collections list crowdsecurity/http-cve crowdsecurity/iptables - -List only enabled collections unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/context.go b/cmd/crowdsec-cli/cliitem/context.go deleted file mode 100644 index 7d110b8203d..00000000000 --- a/cmd/crowdsec-cli/cliitem/context.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewContext(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.CONTEXTS, - singular: "context", - oneOrMore: "context(s)", - help: cliHelp{ - example: `cscli contexts list -a -cscli contexts install crowdsecurity/yyy crowdsecurity/zzz -cscli contexts inspect crowdsecurity/yyy crowdsecurity/zzz -cscli contexts upgrade crowdsecurity/yyy crowdsecurity/zzz -cscli contexts remove crowdsecurity/yyy crowdsecurity/zzz -`, - }, - installHelp: cliHelp{ - example: `cscli contexts install crowdsecurity/yyy crowdsecurity/zzz`, - }, - removeHelp: cliHelp{ - example: `cscli contexts remove crowdsecurity/yyy crowdsecurity/zzz`, - }, - upgradeHelp: cliHelp{ - example: `cscli contexts upgrade crowdsecurity/yyy crowdsecurity/zzz`, - }, - inspectHelp: cliHelp{ - example: `cscli contexts inspect crowdsecurity/yyy crowdsecurity/zzz`, - }, - listHelp: cliHelp{ - example: `cscli contexts list -cscli contexts list -a -cscli contexts list crowdsecurity/yyy crowdsecurity/zzz - -List only enabled contexts unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/hubappsec.go b/cmd/crowdsec-cli/cliitem/hubappsec.go new file mode 100644 index 00000000000..7f9143d35b8 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubappsec.go @@ -0,0 +1,255 @@ +package cliitem + +import ( + "fmt" + "os" + + "golang.org/x/text/cases" + "golang.org/x/text/language" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/crowdsec/pkg/appsec" + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewAppsecConfig(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.APPSEC_CONFIGS, + singular: "appsec-config", + oneOrMore: "appsec-config(s)", + help: cliHelp{ + example: `cscli appsec-configs list -a +cscli appsec-configs install crowdsecurity/virtual-patching +cscli appsec-configs inspect crowdsecurity/virtual-patching +cscli appsec-configs upgrade crowdsecurity/virtual-patching +cscli appsec-configs remove crowdsecurity/virtual-patching +`, + }, + installHelp: cliHelp{ + example: `# Install some appsec-configs. +cscli appsec-configs install crowdsecurity/virtual-patching + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-configs install crowdsecurity/virtual-patching --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-configs install crowdsecurity/virtual-patching --dry-run -o raw + +# Download only, to be installed later. +cscli appsec-configs install crowdsecurity/virtual-patching --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-configs install crowdsecurity/virtual-patching --force + +# Proceed without prompting. +cscli appsec-configs install crowdsecurity/virtual-patching --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some appsec-configs. +cscli appsec-configs remove crowdsecurity/virtual-patching + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-configs remove crowdsecurity/virtual-patching --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-configs remove crowdsecurity/virtual-patching --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli appsec-configs remove crowdsecurity/virtual-patching --purge + +# Remove tainted items. +cscli appsec-configs remove crowdsecurity/virtual-patching --force + +# Proceed without prompting. +cscli appsec-configs remove crowdsecurity/virtual-patching --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some appsec-configs. If they are not currently installed, they are downloaded but not installed. +cscli appsec-configs upgrade crowdsecurity/virtual-patching + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --force + +# Proceed without prompting. +cscli appsec-configs upgrade crowdsecurity/virtual-patching --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and ancestor collections of appsec-configs (installed or not). +cscli appsec-configs inspect crowdsecurity/virtual-patching + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli appsec-configs inspect crowdsecurity/virtual-patching --no-metrics + +# Display difference between a tainted item and the latest one. +cscli appsec-configs inspect crowdsecurity/virtual-patching --diff + +# Reverse the above diff +cscli appsec-configs inspect crowdsecurity/virtual-patching --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) appsec-configs. +cscli appsec-configs list + +# List all available appsec-configs (installed or not). +cscli appsec-configs list -a + +# List specific appsec-configs (installed or not). +cscli appsec-configs list crowdsecurity/virtual-patching crowdsecurity/generic-rules`, + }, + } +} + +func NewAppsecRule(cfg configGetter) *cliItem { + inspectDetail := func(item *cwhub.Item) error { + // Only show the converted rules in human mode + if cfg().Cscli.Output != "human" { + return nil + } + + appsecRule := appsec.AppsecCollectionConfig{} + + yamlContent, err := os.ReadFile(item.State.LocalPath) + if err != nil { + return fmt.Errorf("unable to read file %s: %w", item.State.LocalPath, err) + } + + if err := yaml.Unmarshal(yamlContent, &appsecRule); err != nil { + return fmt.Errorf("unable to parse yaml file %s: %w", item.State.LocalPath, err) + } + + for _, ruleType := range appsec_rule.SupportedTypes() { + fmt.Printf("\n%s format:\n", cases.Title(language.Und, cases.NoLower).String(ruleType)) + + for _, rule := range appsecRule.Rules { + convertedRule, _, err := rule.Convert(ruleType, appsecRule.Name) + if err != nil { + return fmt.Errorf("unable to convert rule %s: %w", rule.Name, err) + } + + fmt.Println(convertedRule) + } + + switch ruleType { //nolint:gocritic + case appsec_rule.ModsecurityRuleType: + for _, rule := range appsecRule.SecLangRules { + fmt.Println(rule) + } + } + } + + return nil + } + + return &cliItem{ + cfg: cfg, + name: "appsec-rules", + singular: "appsec-rule", + oneOrMore: "appsec-rule(s)", + help: cliHelp{ + example: `cscli appsec-rules list -a +cscli appsec-rules install crowdsecurity/crs +cscli appsec-rules inspect crowdsecurity/crs +cscli appsec-rules upgrade crowdsecurity/crs +cscli appsec-rules remove crowdsecurity/crs +`, + }, + installHelp: cliHelp{ + example: `# Install some appsec-rules. +cscli appsec-rules install crowdsecurity/crs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-rules install crowdsecurity/crs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-rules install crowdsecurity/crs --dry-run -o raw + +# Download only, to be installed later. +cscli appsec-rules install crowdsecurity/crs --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-rules install crowdsecurity/crs --force + +# Proceed without prompting. +cscli appsec-rules install crowdsecurity/crs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some appsec-rules. +cscli appsec-rules remove crowdsecurity/crs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-rules remove crowdsecurity/crs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-rules remove crowdsecurity/crs --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli appsec-rules remove crowdsecurity/crs --purge + +# Remove tainted items. +cscli appsec-rules remove crowdsecurity/crs --force + +# Proceed without prompting. +cscli appsec-rules remove crowdsecurity/crs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some appsec-rules. If they are not currently installed, they are downloaded but not installed. +cscli appsec-rules upgrade crowdsecurity/crs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli appsec-rules upgrade crowdsecurity/crs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli appsec-rules upgrade crowdsecurity/crs --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli appsec-rules upgrade crowdsecurity/crs --force + +# Proceed without prompting. +cscli appsec-rules upgrade crowdsecurity/crs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and ancestor collections of appsec-rules (installed or not). +cscli appsec-rules inspect crowdsecurity/crs + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli appsec-configs inspect crowdsecurity/crs --no-metrics + +# Display difference between a tainted item and the latest one. +cscli appsec-rules inspect crowdsecurity/crs --diff + +# Reverse the above diff +cscli appsec-rules inspect crowdsecurity/crs --diff --rev`, + }, + inspectDetail: inspectDetail, + listHelp: cliHelp{ + example: `# List enabled (installed) appsec-rules. +cscli appsec-rules list + +# List all available appsec-rules (installed or not). +cscli appsec-rules list -a + +# List specific appsec-rules (installed or not). +cscli appsec-rules list crowdsecurity/crs crowdsecurity/vpatch-git-config`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubcollection.go b/cmd/crowdsec-cli/cliitem/hubcollection.go new file mode 100644 index 00000000000..b45f956e0ac --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubcollection.go @@ -0,0 +1,105 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewCollection(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.COLLECTIONS, + singular: "collection", + oneOrMore: "collection(s)", + help: cliHelp{ + example: `cscli collections list -a +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables +cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables +`, + }, + installHelp: cliHelp{ + example: `# Install some collections. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --dry-run -o raw + +# Download only, to be installed later. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --force + +# Proceed without prompting. +cscli collections install crowdsecurity/http-cve crowdsecurity/iptables --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some collections. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --purge + +# Remove tainted items. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --force + +# Proceed without prompting. +cscli collections remove crowdsecurity/http-cve crowdsecurity/iptables --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some collections. If they are not currently installed, they are downloaded but not installed. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --force + +# Proceed without prompting. +cscli collections upgrade crowdsecurity/http-cve crowdsecurity/iptables --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and dependencies of collections (installed or not). +cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli collections inspect crowdsecurity/http-cve crowdsecurity/iptables --no-metrics + +# Display difference between a tainted item and the latest one, or the reason for the taint if it's a dependency. +cscli collections inspect crowdsecurity/http-cve --diff + +# Reverse the above diff +cscli collections inspect crowdsecurity/http-cve --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) collections. +cscli collections list + +# List all available collections (installed or not). +cscli collections list -a + +# List specific collections (installed or not). +cscli collections list crowdsecurity/http-cve crowdsecurity/iptables`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubcontext.go b/cmd/crowdsec-cli/cliitem/hubcontext.go new file mode 100644 index 00000000000..3a94687843d --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubcontext.go @@ -0,0 +1,102 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewContext(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.CONTEXTS, + singular: "context", + oneOrMore: "context(s)", + help: cliHelp{ + example: `cscli contexts list -a +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet +cscli contexts inspect crowdsecurity/bf_base crowdsecurity/fortinet +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet +`, + }, + installHelp: cliHelp{ + example: `# Install some contexts. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --dry-run -o raw + +# Download only, to be installed later. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --force + +# Proceed without prompting. +cscli contexts install crowdsecurity/bf_base crowdsecurity/fortinet --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some contexts. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --purge + +# Remove tainted items. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --force + +# Proceed without prompting. +cscli contexts remove crowdsecurity/bf_base crowdsecurity/fortinet --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some contexts. If they are not currently installed, they are downloaded but not installed. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --force + +# Proceed without prompting. +cscli contexts upgrade crowdsecurity/bf_base crowdsecurity/fortinet --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state and ancestor collections of contexts (installed or not). +cscli contexts inspect crowdsecurity/bf_base crowdsecurity/fortinet + +# Display difference between a tainted item and the latest one. +cscli contexts inspect crowdsecurity/bf_base --diff + +# Reverse the above diff +cscli contexts inspect crowdsecurity/bf_base --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) contexts. +cscli contexts list + +# List all available contexts (installed or not). +cscli contexts list -a + +# List specific contexts (installed or not). +cscli contexts list crowdsecurity/bf_base crowdsecurity/fortinet`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubparser.go b/cmd/crowdsec-cli/cliitem/hubparser.go new file mode 100644 index 00000000000..440cb61204f --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubparser.go @@ -0,0 +1,105 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewParser(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.PARSERS, + singular: "parser", + oneOrMore: "parser(s)", + help: cliHelp{ + example: `cscli parsers list -a +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs +cscli parsers inspect crowdsecurity/caddy-logs crowdsecurity/sshd-logs +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs +`, + }, + installHelp: cliHelp{ + example: `# Install some parsers. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run -o raw + +# Download only, to be installed later. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force + +# Proceed without prompting. +cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some parsers. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --purge + +# Remove tainted items. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force + +# Proceed without prompting. +cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some parsers. If they are not currently installed, they are downloaded but not installed. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --force + +# Proceed without prompting. +cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state, metrics and ancestor collections of parsers (installed or not). +cscli parsers inspect crowdsecurity/httpd-logs crowdsecurity/sshd-logs + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli parsers inspect crowdsecurity/httpd-logs --no-metrics + +# Display difference between a tainted item and the latest one. +cscli parsers inspect crowdsecurity/httpd-logs --diff + +# Reverse the above diff +cscli parsers inspect crowdsecurity/httpd-logs --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) parsers. +cscli parsers list + +# List all available parsers (installed or not). +cscli parsers list -a + +# List specific parsers (installed or not). +cscli parsers list crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubpostoverflow.go b/cmd/crowdsec-cli/cliitem/hubpostoverflow.go new file mode 100644 index 00000000000..cfd5f7c95aa --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/hubpostoverflow.go @@ -0,0 +1,102 @@ +package cliitem + +import ( + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +func NewPostOverflow(cfg configGetter) *cliItem { + return &cliItem{ + cfg: cfg, + name: cwhub.POSTOVERFLOWS, + singular: "postoverflow", + oneOrMore: "postoverflow(s)", + help: cliHelp{ + example: `cscli postoverflows list -a +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns +cscli postoverflows inspect crowdsecurity/cdn-whitelist crowdsecurity/rdns +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdns +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns +`, + }, + installHelp: cliHelp{ + example: `# Install some postoverflows. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run -o raw + +# Download only, to be installed later. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --force + +# Proceed without prompting. +cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + removeHelp: cliHelp{ + example: `# Uninstall some postoverflows. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --purge + +# Remove tainted items. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --force + +# Proceed without prompting. +cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + upgradeHelp: cliHelp{ + example: `# Upgrade some postoverflows. If they are not currently installed, they are downloaded but not installed. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --force + +# Proceed without prompting. +cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdnss --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, + }, + inspectHelp: cliHelp{ + example: `# Display metadata, state and ancestor collections of postoverflows (installed or not). +cscli postoverflows inspect crowdsecurity/cdn-whitelist + +# Display difference between a tainted item and the latest one. +cscli postoverflows inspect crowdsecurity/cdn-whitelist --diff + +# Reverse the above diff +cscli postoverflows inspect crowdsecurity/cdn-whitelist --diff --rev`, + }, + listHelp: cliHelp{ + example: `# List enabled (installed) postoverflows. +cscli postoverflows list + +# List all available postoverflows (installed or not). +cscli postoverflows list -a + +# List specific postoverflows (installed or not). +cscli postoverflows list crowdsecurity/cdn-whitelists crowdsecurity/rdns`, + }, + } +} diff --git a/cmd/crowdsec-cli/cliitem/hubscenario.go b/cmd/crowdsec-cli/cliitem/hubscenario.go index a5e854b3c82..5dee3323f6f 100644 --- a/cmd/crowdsec-cli/cliitem/hubscenario.go +++ b/cmd/crowdsec-cli/cliitem/hubscenario.go @@ -19,23 +19,87 @@ cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing `, }, installHelp: cliHelp{ - example: `cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Install some scenarios. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run -o raw + +# Download only, to be installed later. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --download-only + +# Install over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --force + +# Proceed without prompting. +cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/http-probing --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, }, removeHelp: cliHelp{ - example: `cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Uninstall some scenarios. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run -o raw + +# Uninstall and also remove the downloaded files. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --purge + +# Remove tainted items. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --force + +# Proceed without prompting. +cscli scenarios remove crowdsecurity/ssh-bf crowdsecurity/http-probing --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, }, upgradeHelp: cliHelp{ - example: `cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Upgrade some scenarios. If they are not currently installed, they are downloaded but not installed. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Show the execution plan without changing anything - compact output sorted by type and name. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run + +# Show the execution plan without changing anything - verbose output sorted by execution order. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --dry-run -o raw + +# Upgrade over tainted items. Can be used to restore or repair after local modifications or missing dependencies. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --force + +# Proceed without prompting. +cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/http-probing --yes + +# The "--yes" parameter is implied when the command is not connected to a terminal, like pipes or scripts.`, }, inspectHelp: cliHelp{ - example: `cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/http-probing`, + example: `# Display metadata, state, metrics and ancestor collections of scenarios (installed or not). +cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/http-probing + +# Don't collect metrics (avoid error if crowdsec is not running). +cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics + +# Display difference between a tainted item and the latest one. +cscli scenarios inspect crowdsecurity/ssh-bf --diff + +# Reverse the above diff +cscli scenarios inspect crowdsecurity/ssh-bf --diff --rev`, }, listHelp: cliHelp{ - example: `cscli scenarios list + example: `# List enabled (installed) scenarios. +cscli scenarios list + +# List all available scenarios (installed or not). cscli scenarios list -a -cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/http-probing -List only enabled scenarios unless "-a" or names are specified.`, +# List specific scenarios (installed or not). +cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/http-probing`, }, } } diff --git a/cmd/crowdsec-cli/cliitem/item.go b/cmd/crowdsec-cli/cliitem/item.go index 28828eb9c95..3dcc0665a89 100644 --- a/cmd/crowdsec-cli/cliitem/item.go +++ b/cmd/crowdsec-cli/cliitem/item.go @@ -2,21 +2,14 @@ package cliitem import ( "cmp" - "context" - "errors" "fmt" - "os" "strings" "github.com/fatih/color" - "github.com/hexops/gotextdiff" - "github.com/hexops/gotextdiff/myers" - "github.com/hexops/gotextdiff/span" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clihub" - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/reload" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" @@ -67,365 +60,10 @@ func (cli cliItem) NewCommand() *cobra.Command { return cmd } -func (cli cliItem) install(ctx context.Context, args []string, downloadOnly bool, force bool, ignoreError bool) error { - cfg := cli.cfg() - - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) - if err != nil { - return err - } - - for _, name := range args { - item := hub.GetItem(cli.name, name) - if item == nil { - msg := suggestNearestMessage(hub, cli.name, name) - if !ignoreError { - return errors.New(msg) - } - - log.Error(msg) - - continue - } - - if err := item.Install(ctx, force, downloadOnly); err != nil { - if !ignoreError { - return fmt.Errorf("error while installing '%s': %w", item.Name, err) - } - - log.Errorf("Error while installing '%s': %s", item.Name, err) - } - } - - log.Info(reload.Message) - - return nil -} - -func (cli cliItem) newInstallCmd() *cobra.Command { - var ( - downloadOnly bool - force bool - ignoreError bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.installHelp.use, "install [item]..."), - Short: cmp.Or(cli.installHelp.short, "Install given "+cli.oneOrMore), - Long: cmp.Or(cli.installHelp.long, fmt.Sprintf("Fetch and install one or more %s from the hub", cli.name)), - Example: cli.installHelp.example, - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compAllItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.install(cmd.Context(), args, downloadOnly, force, ignoreError) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&downloadOnly, "download-only", "d", false, "Only download packages, don't enable") - flags.BoolVar(&force, "force", false, "Force install: overwrite tainted and outdated files") - flags.BoolVar(&ignoreError, "ignore", false, "Ignore errors when installing multiple "+cli.name) - - return cmd -} - -// return the names of the installed parents of an item, used to check if we can remove it -func istalledParentNames(item *cwhub.Item) []string { - ret := make([]string, 0) - - for _, parent := range item.Ancestors() { - if parent.State.Installed { - ret = append(ret, parent.Name) - } - } - - return ret -} - -func (cli cliItem) remove(args []string, purge bool, force bool, all bool) error { - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) - if err != nil { - return err - } - - if all { - itemGetter := hub.GetInstalledByType - if purge { - itemGetter = hub.GetItemsByType - } - - removed := 0 - - for _, item := range itemGetter(cli.name, true) { - didRemove, err := item.Remove(purge, force) - if err != nil { - return err - } - - if didRemove { - log.Infof("Removed %s", item.Name) - - removed++ - } - } - - log.Infof("Removed %d %s", removed, cli.name) - - if removed > 0 { - log.Info(reload.Message) - } - - return nil - } - - if len(args) == 0 { - return fmt.Errorf("specify at least one %s to remove or '--all'", cli.singular) - } - - removed := 0 - - for _, itemName := range args { - item := hub.GetItem(cli.name, itemName) - if item == nil { - return fmt.Errorf("can't find '%s' in %s", itemName, cli.name) - } - - parents := istalledParentNames(item) - - if !force && len(parents) > 0 { - log.Warningf("%s belongs to collections: %s", item.Name, parents) - log.Warningf("Run 'sudo cscli %s remove %s --force' if you want to force remove this %s", item.Type, item.Name, cli.singular) - - continue - } - - didRemove, err := item.Remove(purge, force) - if err != nil { - return err - } - - if didRemove { - log.Infof("Removed %s", item.Name) - - removed++ - } - } - - log.Infof("Removed %d %s", removed, cli.name) - - if removed > 0 { - log.Info(reload.Message) - } - - return nil -} - -func (cli cliItem) newRemoveCmd() *cobra.Command { - var ( - purge bool - force bool - all bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.removeHelp.use, "remove [item]..."), - Short: cmp.Or(cli.removeHelp.short, "Remove given "+cli.oneOrMore), - Long: cmp.Or(cli.removeHelp.long, "Remove one or more "+cli.name), - Example: cli.removeHelp.example, - Aliases: []string{"delete"}, - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(_ *cobra.Command, args []string) error { - return cli.remove(args, purge, force, all) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&purge, "purge", false, "Delete source file too") - flags.BoolVar(&force, "force", false, "Force remove: remove tainted and outdated files") - flags.BoolVar(&all, "all", false, "Remove all the "+cli.name) - - return cmd -} - -func (cli cliItem) upgrade(ctx context.Context, args []string, force bool, all bool) error { - cfg := cli.cfg() - - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) - if err != nil { - return err - } - - if all { - updated := 0 - - for _, item := range hub.GetInstalledByType(cli.name, true) { - didUpdate, err := item.Upgrade(ctx, force) - if err != nil { - return err - } - - if didUpdate { - updated++ - } - } - - log.Infof("Updated %d %s", updated, cli.name) - - if updated > 0 { - log.Info(reload.Message) - } - - return nil - } - - if len(args) == 0 { - return fmt.Errorf("specify at least one %s to upgrade or '--all'", cli.singular) - } - - updated := 0 - - for _, itemName := range args { - item := hub.GetItem(cli.name, itemName) - if item == nil { - return fmt.Errorf("can't find '%s' in %s", itemName, cli.name) - } - - didUpdate, err := item.Upgrade(ctx, force) - if err != nil { - return err - } - - if didUpdate { - log.Infof("Updated %s", item.Name) - - updated++ - } - } - - if updated > 0 { - log.Info(reload.Message) - } - - return nil -} - -func (cli cliItem) newUpgradeCmd() *cobra.Command { - var ( - all bool - force bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.upgradeHelp.use, "upgrade [item]..."), - Short: cmp.Or(cli.upgradeHelp.short, "Upgrade given "+cli.oneOrMore), - Long: cmp.Or(cli.upgradeHelp.long, fmt.Sprintf("Fetch and upgrade one or more %s from the hub", cli.name)), - Example: cli.upgradeHelp.example, - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.upgrade(cmd.Context(), args, force, all) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&all, "all", "a", false, "Upgrade all the "+cli.name) - flags.BoolVar(&force, "force", false, "Force upgrade: overwrite tainted and outdated files") - - return cmd -} - -func (cli cliItem) inspect(ctx context.Context, args []string, url string, diff bool, rev bool, noMetrics bool) error { - cfg := cli.cfg() - - if rev && !diff { - return errors.New("--rev can only be used with --diff") - } - - if url != "" { - cfg.Cscli.PrometheusUrl = url - } - - remote := (*cwhub.RemoteHubCfg)(nil) - - if diff { - remote = require.RemoteHub(ctx, cfg) - } - - hub, err := require.Hub(cfg, remote, log.StandardLogger()) - if err != nil { - return err - } - - for _, name := range args { - item := hub.GetItem(cli.name, name) - if item == nil { - return fmt.Errorf("can't find '%s' in %s", name, cli.name) - } - - if diff { - fmt.Println(cli.whyTainted(ctx, hub, item, rev)) - - continue - } - - if err = clihub.InspectItem(item, !noMetrics, cfg.Cscli.Output, cfg.Cscli.PrometheusUrl, cfg.Cscli.Color); err != nil { - return err - } - - if cli.inspectDetail != nil { - if err = cli.inspectDetail(item); err != nil { - return err - } - } - } - - return nil -} - -func (cli cliItem) newInspectCmd() *cobra.Command { - var ( - url string - diff bool - rev bool - noMetrics bool - ) - - cmd := &cobra.Command{ - Use: cmp.Or(cli.inspectHelp.use, "inspect [item]..."), - Short: cmp.Or(cli.inspectHelp.short, "Inspect given "+cli.oneOrMore), - Long: cmp.Or(cli.inspectHelp.long, "Inspect the state of one or more "+cli.name), - Example: cli.inspectHelp.example, - Args: cobra.MinimumNArgs(1), - DisableAutoGenTag: true, - ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - return compInstalledItems(cli.name, args, toComplete, cli.cfg) - }, - RunE: func(cmd *cobra.Command, args []string) error { - return cli.inspect(cmd.Context(), args, url, diff, rev, noMetrics) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&url, "url", "u", "", "Prometheus url") - flags.BoolVar(&diff, "diff", false, "Show diff with latest version (for tainted items)") - flags.BoolVar(&rev, "rev", false, "Reverse diff output") - flags.BoolVar(&noMetrics, "no-metrics", false, "Don't show metrics (when cscli.output=human)") - - return cmd -} - func (cli cliItem) list(args []string, all bool) error { cfg := cli.cfg() - hub, err := require.Hub(cli.cfg(), nil, log.StandardLogger()) + hub, err := require.Hub(cli.cfg(), log.StandardLogger()) if err != nil { return err } @@ -460,91 +98,23 @@ func (cli cliItem) newListCmd() *cobra.Command { return cmd } -// return the diff between the installed version and the latest version -func (cli cliItem) itemDiff(ctx context.Context, item *cwhub.Item, reverse bool) (string, error) { - if !item.State.Installed { - return "", fmt.Errorf("'%s' is not installed", item.FQName()) - } - - dest, err := os.CreateTemp("", "cscli-diff-*") - if err != nil { - return "", fmt.Errorf("while creating temporary file: %w", err) - } - defer os.Remove(dest.Name()) - - _, remoteURL, err := item.FetchContentTo(ctx, dest.Name()) - if err != nil { - return "", err - } - - latestContent, err := os.ReadFile(dest.Name()) - if err != nil { - return "", fmt.Errorf("while reading %s: %w", dest.Name(), err) - } - - localContent, err := os.ReadFile(item.State.LocalPath) +func compInstalledItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { + hub, err := require.Hub(cfg(), nil) if err != nil { - return "", fmt.Errorf("while reading %s: %w", item.State.LocalPath, err) - } - - file1 := item.State.LocalPath - file2 := remoteURL - content1 := string(localContent) - content2 := string(latestContent) - - if reverse { - file1, file2 = file2, file1 - content1, content2 = content2, content1 - } - - edits := myers.ComputeEdits(span.URIFromPath(file1), content1, content2) - diff := gotextdiff.ToUnified(file1, file2, content1, edits) - - return fmt.Sprintf("%s", diff), nil -} - -func (cli cliItem) whyTainted(ctx context.Context, hub *cwhub.Hub, item *cwhub.Item, reverse bool) string { - if !item.State.Installed { - return fmt.Sprintf("# %s is not installed", item.FQName()) - } - - if !item.State.Tainted { - return fmt.Sprintf("# %s is not tainted", item.FQName()) - } - - if len(item.State.TaintedBy) == 0 { - return fmt.Sprintf("# %s is tainted but we don't know why. please report this as a bug", item.FQName()) - } - - ret := []string{ - fmt.Sprintf("# Let's see why %s is tainted.", item.FQName()), + return nil, cobra.ShellCompDirectiveDefault } - for _, fqsub := range item.State.TaintedBy { - ret = append(ret, fmt.Sprintf("\n-> %s\n", fqsub)) - - sub, err := hub.GetItemFQ(fqsub) - if err != nil { - ret = append(ret, err.Error()) - } - - diff, err := cli.itemDiff(ctx, sub, reverse) - if err != nil { - ret = append(ret, err.Error()) - } + items := hub.GetInstalledByType(itemType, true) - if diff != "" { - ret = append(ret, diff) - } else if len(sub.State.TaintedBy) > 0 { - taintList := strings.Join(sub.State.TaintedBy, ", ") - if sub.FQName() == taintList { - // hack: avoid message "item is tainted by itself" - continue - } + comp := make([]string, 0) - ret = append(ret, fmt.Sprintf("# %s is tainted by %s", sub.FQName(), taintList)) + for _, item := range items { + if strings.Contains(item.Name, toComplete) { + comp = append(comp, item.Name) } } - return strings.Join(ret, "\n") + cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) + + return comp, cobra.ShellCompDirectiveNoFileComp } diff --git a/cmd/crowdsec-cli/clihub/item_metrics.go b/cmd/crowdsec-cli/cliitem/metrics.go similarity index 78% rename from cmd/crowdsec-cli/clihub/item_metrics.go rename to cmd/crowdsec-cli/cliitem/metrics.go index f4af8f635db..4999ea38078 100644 --- a/cmd/crowdsec-cli/clihub/item_metrics.go +++ b/cmd/crowdsec-cli/cliitem/metrics.go @@ -1,6 +1,7 @@ -package clihub +package cliitem import ( + "fmt" "net/http" "strconv" "strings" @@ -16,22 +17,31 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/cwhub" ) -func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) error { +func showMetrics(prometheusURL string, hub *cwhub.Hub, hubItem *cwhub.Item, wantColor string) error { switch hubItem.Type { case cwhub.PARSERS: - metrics := getParserMetric(prometheusURL, hubItem.Name) + metrics, err := getParserMetric(prometheusURL, hubItem.Name) + if err != nil { + return err + } parserMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.SCENARIOS: - metrics := getScenarioMetric(prometheusURL, hubItem.Name) + metrics, err := getScenarioMetric(prometheusURL, hubItem.Name) + if err != nil { + return err + } scenarioMetricsTable(color.Output, wantColor, hubItem.Name, metrics) case cwhub.COLLECTIONS: - for _, sub := range hubItem.SubItems() { - if err := showMetrics(prometheusURL, sub, wantColor); err != nil { + for sub := range hubItem.CurrentDependencies().SubItems(hub) { + if err := showMetrics(prometheusURL, hub, sub, wantColor); err != nil { return err } } case cwhub.APPSEC_RULES: - metrics := getAppsecRuleMetric(prometheusURL, hubItem.Name) + metrics, err := getAppsecRuleMetric(prometheusURL, hubItem.Name) + if err != nil { + return err + } appsecMetricsTable(color.Output, wantColor, hubItem.Name, metrics) default: // no metrics for this item type } @@ -40,11 +50,15 @@ func showMetrics(prometheusURL string, hubItem *cwhub.Item, wantColor string) er } // getParserMetric is a complete rip from prom2json -func getParserMetric(url string, itemName string) map[string]map[string]int { +func getParserMetric(url string, itemName string) (map[string]map[string]int, error) { stats := make(map[string]map[string]int) - result := getPrometheusMetric(url) - for idx, fam := range result { + results, err := getPrometheusMetric(url) + if err != nil { + return nil, err + } + + for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue } @@ -128,10 +142,10 @@ func getParserMetric(url string, itemName string) map[string]map[string]int { } } - return stats + return stats, nil } -func getScenarioMetric(url string, itemName string) map[string]int { +func getScenarioMetric(url string, itemName string) (map[string]int, error) { stats := make(map[string]int) stats["instantiation"] = 0 @@ -140,8 +154,12 @@ func getScenarioMetric(url string, itemName string) map[string]int { stats["pour"] = 0 stats["underflow"] = 0 - result := getPrometheusMetric(url) - for idx, fam := range result { + results, err := getPrometheusMetric(url) + if err != nil { + return nil, err + } + + for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue } @@ -192,16 +210,20 @@ func getScenarioMetric(url string, itemName string) map[string]int { } } - return stats + return stats, nil } -func getAppsecRuleMetric(url string, itemName string) map[string]int { +func getAppsecRuleMetric(url string, itemName string) (map[string]int, error) { stats := make(map[string]int) stats["inband_hits"] = 0 stats["outband_hits"] = 0 - results := getPrometheusMetric(url) + results, err := getPrometheusMetric(url) + if err != nil { + return nil, err + } + for idx, fam := range results { if !strings.HasPrefix(fam.Name, "cs_") { continue @@ -257,10 +279,10 @@ func getAppsecRuleMetric(url string, itemName string) map[string]int { } } - return stats + return stats, nil } -func getPrometheusMetric(url string) []*prom2json.Family { +func getPrometheusMetric(url string) ([]*prom2json.Family, error) { mfChan := make(chan *dto.MetricFamily, 1024) // Start with the DefaultTransport for sane defaults. @@ -271,12 +293,15 @@ func getPrometheusMetric(url string) []*prom2json.Family { // Timeout early if the server doesn't even return the headers. transport.ResponseHeaderTimeout = time.Minute + var fetchErr error + go func() { defer trace.CatchPanic("crowdsec/GetPrometheusMetric") - err := prom2json.FetchMetricFamilies(url, mfChan, transport) - if err != nil { - log.Fatalf("failed to fetch prometheus metrics : %v", err) + // mfChan is closed by prom2json.FetchMetricFamilies in all cases. + if err := prom2json.FetchMetricFamilies(url, mfChan, transport); err != nil { + fetchErr = fmt.Errorf("failed to fetch prometheus metrics: %w", err) + return } }() @@ -285,7 +310,11 @@ func getPrometheusMetric(url string) []*prom2json.Family { result = append(result, prom2json.NewFamily(mf)) } + if fetchErr != nil { + return nil, fetchErr + } + log.Debugf("Finished reading prometheus output, %d entries", len(result)) - return result + return result, nil } diff --git a/cmd/crowdsec-cli/cliitem/metrics_table.go b/cmd/crowdsec-cli/cliitem/metrics_table.go new file mode 100644 index 00000000000..a41ea0fad39 --- /dev/null +++ b/cmd/crowdsec-cli/cliitem/metrics_table.go @@ -0,0 +1,70 @@ +package cliitem + +import ( + "fmt" + "io" + "strconv" + + "github.com/jedib0t/go-pretty/v6/table" + + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cstable" +) + +func appsecMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { + t := cstable.NewLight(out, wantColor).Writer + t.AppendHeader(table.Row{"Inband Hits", "Outband Hits"}) + + t.AppendRow(table.Row{ + strconv.Itoa(metrics["inband_hits"]), + strconv.Itoa(metrics["outband_hits"]), + }) + + t.SetTitle("(AppSec) " + itemName) + fmt.Fprintln(out, t.Render()) +} + +func scenarioMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]int) { + if metrics["instantiation"] == 0 { + return + } + + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Current Count", "Overflows", "Instantiated", "Poured", "Expired"}) + + t.AppendRow(table.Row{ + strconv.Itoa(metrics["curr_count"]), + strconv.Itoa(metrics["overflow"]), + strconv.Itoa(metrics["instantiation"]), + strconv.Itoa(metrics["pour"]), + strconv.Itoa(metrics["underflow"]), + }) + + t.SetTitle("(Scenario) " + itemName) + fmt.Fprintln(out, t.Render()) +} + +func parserMetricsTable(out io.Writer, wantColor string, itemName string, metrics map[string]map[string]int) { + t := cstable.New(out, wantColor).Writer + t.AppendHeader(table.Row{"Parsers", "Hits", "Parsed", "Unparsed"}) + + // don't show table if no hits + showTable := false + + for source, stats := range metrics { + if stats["hits"] > 0 { + t.AppendRow(table.Row{ + source, + strconv.Itoa(stats["hits"]), + strconv.Itoa(stats["parsed"]), + strconv.Itoa(stats["unparsed"]), + }) + + showTable = true + } + } + + if showTable { + t.SetTitle("(Parser) " + itemName) + fmt.Fprintln(out, t.Render()) + } +} diff --git a/cmd/crowdsec-cli/cliitem/parser.go b/cmd/crowdsec-cli/cliitem/parser.go deleted file mode 100644 index bc1d96bdaf0..00000000000 --- a/cmd/crowdsec-cli/cliitem/parser.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewParser(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.PARSERS, - singular: "parser", - oneOrMore: "parser(s)", - help: cliHelp{ - example: `cscli parsers list -a -cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs -cscli parsers inspect crowdsecurity/caddy-logs crowdsecurity/sshd-logs -cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs -cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs -`, - }, - installHelp: cliHelp{ - example: `cscli parsers install crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, - }, - removeHelp: cliHelp{ - example: `cscli parsers remove crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, - }, - upgradeHelp: cliHelp{ - example: `cscli parsers upgrade crowdsecurity/caddy-logs crowdsecurity/sshd-logs`, - }, - inspectHelp: cliHelp{ - example: `cscli parsers inspect crowdsecurity/httpd-logs crowdsecurity/sshd-logs`, - }, - listHelp: cliHelp{ - example: `cscli parsers list -cscli parsers list -a -cscli parsers list crowdsecurity/caddy-logs crowdsecurity/sshd-logs - -List only enabled parsers unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/postoverflow.go b/cmd/crowdsec-cli/cliitem/postoverflow.go deleted file mode 100644 index ea53aef327d..00000000000 --- a/cmd/crowdsec-cli/cliitem/postoverflow.go +++ /dev/null @@ -1,41 +0,0 @@ -package cliitem - -import ( - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func NewPostOverflow(cfg configGetter) *cliItem { - return &cliItem{ - cfg: cfg, - name: cwhub.POSTOVERFLOWS, - singular: "postoverflow", - oneOrMore: "postoverflow(s)", - help: cliHelp{ - example: `cscli postoverflows list -a -cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns -cscli postoverflows inspect crowdsecurity/cdn-whitelist crowdsecurity/rdns -cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdns -cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns -`, - }, - installHelp: cliHelp{ - example: `cscli postoverflows install crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - removeHelp: cliHelp{ - example: `cscli postoverflows remove crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - upgradeHelp: cliHelp{ - example: `cscli postoverflows upgrade crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - inspectHelp: cliHelp{ - example: `cscli postoverflows inspect crowdsecurity/cdn-whitelist crowdsecurity/rdns`, - }, - listHelp: cliHelp{ - example: `cscli postoverflows list -cscli postoverflows list -a -cscli postoverflows list crowdsecurity/cdn-whitelist crowdsecurity/rdns - -List only enabled postoverflows unless "-a" or names are specified.`, - }, - } -} diff --git a/cmd/crowdsec-cli/cliitem/suggest.go b/cmd/crowdsec-cli/cliitem/suggest.go deleted file mode 100644 index 5b080722af9..00000000000 --- a/cmd/crowdsec-cli/cliitem/suggest.go +++ /dev/null @@ -1,77 +0,0 @@ -package cliitem - -import ( - "fmt" - "slices" - "strings" - - "github.com/agext/levenshtein" - "github.com/spf13/cobra" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -// suggestNearestMessage returns a message with the most similar item name, if one is found -func suggestNearestMessage(hub *cwhub.Hub, itemType string, itemName string) string { - const maxDistance = 7 - - score := 100 - nearest := "" - - for _, item := range hub.GetItemsByType(itemType, false) { - d := levenshtein.Distance(itemName, item.Name, nil) - if d < score { - score = d - nearest = item.Name - } - } - - msg := fmt.Sprintf("can't find '%s' in %s", itemName, itemType) - - if score < maxDistance { - msg += fmt.Sprintf(", did you mean '%s'?", nearest) - } - - return msg -} - -func compAllItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(cfg(), nil, nil) - if err != nil { - return nil, cobra.ShellCompDirectiveDefault - } - - comp := make([]string, 0) - - for _, item := range hub.GetItemsByType(itemType, false) { - if !slices.Contains(args, item.Name) && strings.Contains(item.Name, toComplete) { - comp = append(comp, item.Name) - } - } - - cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) - - return comp, cobra.ShellCompDirectiveNoFileComp -} - -func compInstalledItems(itemType string, args []string, toComplete string, cfg configGetter) ([]string, cobra.ShellCompDirective) { - hub, err := require.Hub(cfg(), nil, nil) - if err != nil { - return nil, cobra.ShellCompDirectiveDefault - } - - items := hub.GetInstalledByType(itemType, true) - - comp := make([]string, 0) - - for _, item := range items { - if strings.Contains(item.Name, toComplete) { - comp = append(comp, item.Name) - } - } - - cobra.CompDebugln(fmt.Sprintf("%s: %+v", itemType, comp), true) - - return comp, cobra.ShellCompDirectiveNoFileComp -} diff --git a/cmd/crowdsec-cli/clilapi/context.go b/cmd/crowdsec-cli/clilapi/context.go index 20ceb2b9596..0730ba2b2a9 100644 --- a/cmd/crowdsec-cli/clilapi/context.go +++ b/cmd/crowdsec-cli/clilapi/context.go @@ -59,7 +59,7 @@ cscli lapi context add --value evt.Meta.source_ip --value evt.Meta.target_user `, DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } @@ -101,7 +101,7 @@ func (cli *cliLapi) newContextStatusCmd() *cobra.Command { DisableAutoGenTag: true, RunE: func(_ *cobra.Command, _ []string) error { cfg := cli.cfg() - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { return err } @@ -153,7 +153,7 @@ cscli lapi context detect crowdsecurity/sshd-logs return fmt.Errorf("failed to init expr helpers: %w", err) } - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/clilapi/register.go b/cmd/crowdsec-cli/clilapi/register.go index 4c9b0f39903..7430c73c3c8 100644 --- a/cmd/crowdsec-cli/clilapi/register.go +++ b/cmd/crowdsec-cli/clilapi/register.go @@ -28,7 +28,12 @@ func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile stri } } - password := strfmt.Password(idgen.GeneratePassword(idgen.PasswordLength)) + pstr, err := idgen.GeneratePassword(idgen.PasswordLength) + if err != nil { + return err + } + + password := strfmt.Password(pstr) apiurl, err := prepareAPIURL(cfg.API.Client, apiURL) if err != nil { @@ -82,7 +87,9 @@ func (cli *cliLapi) register(ctx context.Context, apiURL string, outputFile stri fmt.Printf("%s\n", string(apiConfigDump)) } - log.Warning(reload.Message) + if msg := reload.UserMessage(); msg != "" { + log.Warning(msg) + } return nil } diff --git a/cmd/crowdsec-cli/clilapi/status.go b/cmd/crowdsec-cli/clilapi/status.go index 6ff88834602..039c75e585d 100644 --- a/cmd/crowdsec-cli/clilapi/status.go +++ b/cmd/crowdsec-cli/clilapi/status.go @@ -102,7 +102,7 @@ func (cli *cliLapi) newStatusCmd() *cobra.Command { Args: cobra.MinimumNArgs(0), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/climachine/add.go b/cmd/crowdsec-cli/climachine/add.go index afddb4e4b65..b2595583823 100644 --- a/cmd/crowdsec-cli/climachine/add.go +++ b/cmd/crowdsec-cli/climachine/add.go @@ -65,12 +65,17 @@ func (cli *cliMachines) add(ctx context.Context, args []string, machinePassword return errors.New("please specify a password with --password or use --auto") } - machinePassword = idgen.GeneratePassword(idgen.PasswordLength) + machinePassword, err = idgen.GeneratePassword(idgen.PasswordLength) + if err != nil { + return err + } } else if machinePassword == "" && interactive { qs := &survey.Password{ Message: "Please provide a password for the machine:", } - survey.AskOne(qs, &machinePassword) + if err := survey.AskOne(qs, &machinePassword); err != nil { + return err + } } password := strfmt.Password(machinePassword) @@ -144,9 +149,9 @@ cscli machines add -f- --auto > /tmp/mycreds.yaml`, flags.VarP(&password, "password", "p", "machine password to login to the API") flags.StringVarP(&dumpFile, "file", "f", "", "output file destination (defaults to "+csconfig.DefaultConfigPath("local_api_credentials.yaml")+")") flags.StringVarP(&apiURL, "url", "u", "", "URL of the local API") - flags.BoolVarP(&interactive, "interactive", "i", false, "interfactive mode to enter the password") + flags.BoolVarP(&interactive, "interactive", "i", false, "interactive mode to enter the password") flags.BoolVarP(&autoAdd, "auto", "a", false, "automatically generate password (and username if not provided)") - flags.BoolVar(&force, "force", false, "will force add the machine if it already exist") + flags.BoolVar(&force, "force", false, "will force add the machine if it already exists") return cmd } diff --git a/cmd/crowdsec-cli/climachine/inspect.go b/cmd/crowdsec-cli/climachine/inspect.go index b08f2f62794..e973d07e96b 100644 --- a/cmd/crowdsec-cli/climachine/inspect.go +++ b/cmd/crowdsec-cli/climachine/inspect.go @@ -44,7 +44,7 @@ func (cli *cliMachines) inspectHubHuman(out io.Writer, machine *ent.Machine) { t.AppendHeader(table.Row{"Name", "Status", "Version"}) t.SetTitle(itemType) t.AppendRows(rows) - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } } @@ -80,7 +80,7 @@ func (cli *cliMachines) inspectHuman(out io.Writer, machine *ent.Machine) { t.AppendRow(table.Row{"Collections", coll.Name}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } func (cli *cliMachines) inspect(machine *ent.Machine) error { diff --git a/cmd/crowdsec-cli/climachine/list.go b/cmd/crowdsec-cli/climachine/list.go index 6bedb2ad807..6fb45166aa2 100644 --- a/cmd/crowdsec-cli/climachine/list.go +++ b/cmd/crowdsec-cli/climachine/list.go @@ -55,7 +55,7 @@ func (cli *cliMachines) listHuman(out io.Writer, machines ent.Machines) { t.AppendRow(table.Row{m.MachineId, m.IpAddress, m.UpdatedAt.Format(time.RFC3339), validated, m.Version, clientinfo.GetOSNameAndVersion(m), m.AuthType, hb}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) } func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { @@ -90,7 +90,6 @@ func (cli *cliMachines) listCSV(out io.Writer, machines ent.Machines) error { func (cli *cliMachines) List(ctx context.Context, out io.Writer, db *database.Client) error { // XXX: must use the provided db object, the one in the struct might be nil // (calling List directly skips the PersistentPreRunE) - machines, err := db.ListMachines(ctx) if err != nil { return fmt.Errorf("unable to list machines: %w", err) diff --git a/cmd/crowdsec-cli/climetrics/list.go b/cmd/crowdsec-cli/climetrics/list.go index 27fa99710c8..32e2f8e0a80 100644 --- a/cmd/crowdsec-cli/climetrics/list.go +++ b/cmd/crowdsec-cli/climetrics/list.go @@ -3,7 +3,6 @@ package climetrics import ( "encoding/json" "fmt" - "io" "github.com/fatih/color" "github.com/jedib0t/go-pretty/v6/table" @@ -64,7 +63,7 @@ func (cli *cliMetrics) list() error { t.AppendRow(table.Row{metric.Type, metric.Title, metric.Description}) } - io.WriteString(out, t.Render()+"\n") + fmt.Fprintln(out, t.Render()) case "json": x, err := json.MarshalIndent(allMetrics, "", " ") if err != nil { diff --git a/cmd/crowdsec-cli/climetrics/show.go b/cmd/crowdsec-cli/climetrics/show.go index 045959048f6..172d3799435 100644 --- a/cmd/crowdsec-cli/climetrics/show.go +++ b/cmd/crowdsec-cli/climetrics/show.go @@ -4,11 +4,15 @@ import ( "context" "errors" "fmt" + "slices" + "strings" "github.com/fatih/color" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/crowdsecurity/go-cs-lib/maptools" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" ) @@ -99,6 +103,17 @@ cscli metrics list; cscli metrics list -o json cscli metrics show acquisition parsers scenarios stash -o json`, // Positional args are optional DisableAutoGenTag: true, + ValidArgsFunction: func(_ *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + ms := NewMetricStore() + ret := []string{} + for _, section := range maptools.SortedKeys(ms) { + if !slices.Contains(args, section) && strings.Contains(section, toComplete) { + ret = append(ret, section) + } + } + + return ret, cobra.ShellCompDirectiveNoFileComp + }, RunE: func(cmd *cobra.Command, args []string) error { args = expandAlias(args) return cli.show(cmd.Context(), args, url, noUnit) diff --git a/cmd/crowdsec-cli/climetrics/statacquis.go b/cmd/crowdsec-cli/climetrics/statacquis.go index 0af2e796f40..da17b1d9480 100644 --- a/cmd/crowdsec-cli/climetrics/statacquis.go +++ b/cmd/crowdsec-cli/climetrics/statacquis.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -37,8 +38,7 @@ func (s statAcquis) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting acquis stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statalert.go b/cmd/crowdsec-cli/climetrics/statalert.go index 942eceaa75c..416b78f0508 100644 --- a/cmd/crowdsec-cli/climetrics/statalert.go +++ b/cmd/crowdsec-cli/climetrics/statalert.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -38,8 +39,7 @@ func (s statAlert) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecengine.go b/cmd/crowdsec-cli/climetrics/statappsecengine.go index d924375247f..93cc1283c96 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecengine.go +++ b/cmd/crowdsec-cli/climetrics/statappsecengine.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -34,8 +35,7 @@ func (s statAppsecEngine) Table(out io.Writer, wantColor string, noUnit bool, sh log.Warningf("while collecting appsec stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statappsecrule.go b/cmd/crowdsec-cli/climetrics/statappsecrule.go index e06a7c2e2b3..8e243aba642 100644 --- a/cmd/crowdsec-cli/climetrics/statappsecrule.go +++ b/cmd/crowdsec-cli/climetrics/statappsecrule.go @@ -40,9 +40,8 @@ func (s statAppsecRule) Table(out io.Writer, wantColor string, noUnit bool, show if numRows, err := metricsToTable(t, appsecEngineRulesStats, keys, noUnit); err != nil { log.Warningf("while collecting appsec rules stats: %s", err) } else if numRows > 0 || showEmpty { - io.WriteString(out, fmt.Sprintf("Appsec '%s' Rules Metrics:\n", appsecEngine)) - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(fmt.Sprintf("Appsec '%s' Rules Metrics", appsecEngine)) + fmt.Fprintln(out, t.Render()) } } } diff --git a/cmd/crowdsec-cli/climetrics/statbouncer.go b/cmd/crowdsec-cli/climetrics/statbouncer.go index bc0da152d6d..ac79074d506 100644 --- a/cmd/crowdsec-cli/climetrics/statbouncer.go +++ b/cmd/crowdsec-cli/climetrics/statbouncer.go @@ -176,17 +176,20 @@ func (*statBouncer) extractRawMetrics(metrics []*ent.Metric) ([]bouncerMetricIte if item.Name == nil { logWarningOnce(warningsLogged, "missing 'name' field in metrics reported by "+bouncerName) + // no continue - keep checking the rest valid = false } if item.Unit == nil { logWarningOnce(warningsLogged, "missing 'unit' field in metrics reported by "+bouncerName) + valid = false } if item.Value == nil { logWarningOnce(warningsLogged, "missing 'value' field in metrics reported by "+bouncerName) + valid = false } @@ -439,11 +442,8 @@ func (s *statBouncer) bouncerTable(out io.Writer, bouncerName string, wantColor title = fmt.Sprintf("%s since %s", title, s.oldestTS[bouncerName].String()) } - // don't use SetTitle() because it draws the title inside table box - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - // empty line between tables - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } // Table displays a table of metrics for each bouncer @@ -452,10 +452,11 @@ func (s *statBouncer) Table(out io.Writer, wantColor string, noUnit bool, showEm for _, bouncerName := range maptools.SortedKeys(s.aggOverOrigin) { s.bouncerTable(out, bouncerName, wantColor, noUnit) + found = true } if !found && showEmpty { - io.WriteString(out, "No bouncer metrics found.\n\n") + fmt.Fprintln(out, "No bouncer metrics found.") } } diff --git a/cmd/crowdsec-cli/climetrics/statbucket.go b/cmd/crowdsec-cli/climetrics/statbucket.go index 1882fe21df1..4cddfeb3731 100644 --- a/cmd/crowdsec-cli/climetrics/statbucket.go +++ b/cmd/crowdsec-cli/climetrics/statbucket.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -35,8 +36,7 @@ func (s statBucket) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting scenario stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statdecision.go b/cmd/crowdsec-cli/climetrics/statdecision.go index b862f49ff12..2f27410f56f 100644 --- a/cmd/crowdsec-cli/climetrics/statdecision.go +++ b/cmd/crowdsec-cli/climetrics/statdecision.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -53,8 +54,7 @@ func (s statDecision) Table(out io.Writer, wantColor string, noUnit bool, showEm if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapi.go b/cmd/crowdsec-cli/climetrics/statlapi.go index 9559eacf0f4..2f460ca5a71 100644 --- a/cmd/crowdsec-cli/climetrics/statlapi.go +++ b/cmd/crowdsec-cli/climetrics/statlapi.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -49,8 +50,7 @@ func (s statLapi) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapibouncer.go b/cmd/crowdsec-cli/climetrics/statlapibouncer.go index 5e5f63a79d3..2ea6b67cd0a 100644 --- a/cmd/crowdsec-cli/climetrics/statlapibouncer.go +++ b/cmd/crowdsec-cli/climetrics/statlapibouncer.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -35,8 +36,7 @@ func (s statLapiBouncer) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapidecision.go b/cmd/crowdsec-cli/climetrics/statlapidecision.go index 44f0e8f4b87..3371cb0e8ff 100644 --- a/cmd/crowdsec-cli/climetrics/statlapidecision.go +++ b/cmd/crowdsec-cli/climetrics/statlapidecision.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -57,8 +58,7 @@ func (s statLapiDecision) Table(out io.Writer, wantColor string, noUnit bool, sh if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statlapimachine.go b/cmd/crowdsec-cli/climetrics/statlapimachine.go index 0e6693bea82..04fbb98ae8e 100644 --- a/cmd/crowdsec-cli/climetrics/statlapimachine.go +++ b/cmd/crowdsec-cli/climetrics/statlapimachine.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -35,8 +36,7 @@ func (s statLapiMachine) Table(out io.Writer, wantColor string, noUnit bool, sho if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statparser.go b/cmd/crowdsec-cli/climetrics/statparser.go index 520e68f9adf..bdc9caa8597 100644 --- a/cmd/crowdsec-cli/climetrics/statparser.go +++ b/cmd/crowdsec-cli/climetrics/statparser.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -36,8 +37,7 @@ func (s statParser) Table(out io.Writer, wantColor string, noUnit bool, showEmpt log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statstash.go b/cmd/crowdsec-cli/climetrics/statstash.go index 2729de931a1..496deaf0535 100644 --- a/cmd/crowdsec-cli/climetrics/statstash.go +++ b/cmd/crowdsec-cli/climetrics/statstash.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "strconv" @@ -52,8 +53,7 @@ func (s statStash) Table(out io.Writer, wantColor string, noUnit bool, showEmpty if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/statwhitelist.go b/cmd/crowdsec-cli/climetrics/statwhitelist.go index 7f533b45b4b..a42f653d50d 100644 --- a/cmd/crowdsec-cli/climetrics/statwhitelist.go +++ b/cmd/crowdsec-cli/climetrics/statwhitelist.go @@ -1,6 +1,7 @@ package climetrics import ( + "fmt" "io" "github.com/jedib0t/go-pretty/v6/table" @@ -36,8 +37,7 @@ func (s statWhitelist) Table(out io.Writer, wantColor string, noUnit bool, showE log.Warningf("while collecting parsers stats: %s", err) } else if numRows > 0 || showEmpty { title, _ := s.Description() - io.WriteString(out, title+":\n") - io.WriteString(out, t.Render()+"\n") - io.WriteString(out, "\n") + t.SetTitle(title) + fmt.Fprintln(out, t.Render()) } } diff --git a/cmd/crowdsec-cli/climetrics/store.go b/cmd/crowdsec-cli/climetrics/store.go index 55fab5dbd7f..6c402447901 100644 --- a/cmd/crowdsec-cli/climetrics/store.go +++ b/cmd/crowdsec-cli/climetrics/store.go @@ -262,7 +262,8 @@ func (ms metricStore) Format(out io.Writer, wantColor string, sections []string, if err != nil { return fmt.Errorf("failed to serialize metrics: %w", err) } - out.Write(x) + + fmt.Fprint(out, string(x)) default: return fmt.Errorf("output format '%s' not supported for this command", outputFormat) } diff --git a/cmd/crowdsec-cli/clinotifications/notifications.go b/cmd/crowdsec-cli/clinotifications/notifications.go index baf899c10cf..80ffebeaa23 100644 --- a/cmd/crowdsec-cli/clinotifications/notifications.go +++ b/cmd/crowdsec-cli/clinotifications/notifications.go @@ -260,7 +260,7 @@ func (cli *cliNotifications) notificationConfigFilter(cmd *cobra.Command, args [ return ret, cobra.ShellCompDirectiveNoFileComp } -func (cli cliNotifications) newTestCmd() *cobra.Command { +func (cli *cliNotifications) newTestCmd() *cobra.Command { var ( pluginBroker csplugin.PluginBroker pluginTomb tomb.Tomb diff --git a/cmd/crowdsec-cli/clisetup/setup.go b/cmd/crowdsec-cli/clisetup/setup.go index 269cdfb78e9..77c357e7251 100644 --- a/cmd/crowdsec-cli/clisetup/setup.go +++ b/cmd/crowdsec-cli/clisetup/setup.go @@ -94,7 +94,10 @@ func (cli *cliSetup) newDetectCmd() *cobra.Command { } func (cli *cliSetup) newInstallHubCmd() *cobra.Command { - var dryRun bool + var ( + yes bool + dryRun bool + ) cmd := &cobra.Command{ Use: "install-hub [setup_file] [flags]", @@ -102,12 +105,14 @@ func (cli *cliSetup) newInstallHubCmd() *cobra.Command { Args: cobra.ExactArgs(1), DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return cli.install(cmd.Context(), dryRun, args[0]) + return cli.install(cmd.Context(), yes, dryRun, args[0]) }, } flags := cmd.Flags() + flags.BoolVarP(&yes, "yes", "y", false, "confirm execution without prompt") flags.BoolVar(&dryRun, "dry-run", false, "don't install anything; print out what would have been") + cmd.MarkFlagsMutuallyExclusive("yes", "dry-run") return cmd } @@ -276,7 +281,7 @@ func (cli *cliSetup) dataSources(fromFile string, toDir string) error { return nil } -func (cli *cliSetup) install(ctx context.Context, dryRun bool, fromFile string) error { +func (cli *cliSetup) install(ctx context.Context, yes bool, dryRun bool, fromFile string) error { input, err := os.ReadFile(fromFile) if err != nil { return fmt.Errorf("while reading file %s: %w", fromFile, err) @@ -284,12 +289,16 @@ func (cli *cliSetup) install(ctx context.Context, dryRun bool, fromFile string) cfg := cli.cfg() - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), log.StandardLogger()) + hub, err := require.Hub(cfg, log.StandardLogger()) if err != nil { return err } - return setup.InstallHubItems(ctx, hub, input, dryRun) + verbose := (cfg.Cscli.Output == "raw") + + contentProvider := require.HubDownloader(ctx, cfg) + + return setup.InstallHubItems(ctx, hub, contentProvider, input, yes, dryRun, verbose) } func (cli *cliSetup) validate(fromFile string) error { diff --git a/cmd/crowdsec-cli/clisimulation/simulation.go b/cmd/crowdsec-cli/clisimulation/simulation.go index 8136aa213c3..1b46c70c90a 100644 --- a/cmd/crowdsec-cli/clisimulation/simulation.go +++ b/cmd/crowdsec-cli/clisimulation/simulation.go @@ -47,8 +47,8 @@ cscli simulation disable crowdsecurity/ssh-bf`, return nil }, PersistentPostRun: func(cmd *cobra.Command, _ []string) { - if cmd.Name() != "status" { - log.Info(reload.Message) + if msg := reload.UserMessage(); msg != "" && cmd.Name() != "status" { + log.Info(msg) } }, } @@ -71,7 +71,7 @@ func (cli *cliSimulation) newEnableCmd() *cobra.Command { Example: `cscli simulation enable`, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) + hub, err := require.Hub(cli.cfg(), nil) if err != nil { return err } diff --git a/cmd/crowdsec-cli/clisupport/support.go b/cmd/crowdsec-cli/clisupport/support.go index 4474f5c8f11..eb3e03df253 100644 --- a/cmd/crowdsec-cli/clisupport/support.go +++ b/cmd/crowdsec-cli/clisupport/support.go @@ -290,7 +290,7 @@ func (cli *cliSupport) dumpConfigYAML(zw *zip.Writer) error { cfg := cli.cfg() - config, err := os.ReadFile(*cfg.FilePath) + config, err := os.ReadFile(cfg.FilePath) if err != nil { return fmt.Errorf("could not read config file: %w", err) } @@ -314,7 +314,7 @@ func (cli *cliSupport) dumpPprof(ctx context.Context, zw *zip.Writer, prometheus ctx, http.MethodGet, fmt.Sprintf( - "http://%s/debug/pprof/%s?debug=1", + "http://%s/debug/pprof/%s", net.JoinHostPort( prometheusCfg.ListenAddr, strconv.Itoa(prometheusCfg.ListenPort), @@ -491,9 +491,9 @@ func (cli *cliSupport) dump(ctx context.Context, outFile string) error { skipAgent = true } - hub, err := require.Hub(cfg, nil, nil) + hub, err := require.Hub(cfg, nil) if err != nil { - log.Warn("Could not init hub, running on LAPI ? Hub related information will not be collected") + log.Warn("Could not init hub, running on LAPI? Hub related information will not be collected") // XXX: lapi status check requires scenarios, will return an error } diff --git a/cmd/crowdsec-cli/completion.go b/cmd/crowdsec-cli/completion.go index 7b6531f5516..fb60f9afab0 100644 --- a/cmd/crowdsec-cli/completion.go +++ b/cmd/crowdsec-cli/completion.go @@ -71,13 +71,13 @@ func NewCompletionCmd() *cobra.Command { Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": - cmd.Root().GenBashCompletion(os.Stdout) + _ = cmd.Root().GenBashCompletion(os.Stdout) case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) + _ = cmd.Root().GenZshCompletion(os.Stdout) case "powershell": - cmd.Root().GenPowerShellCompletion(os.Stdout) + _ = cmd.Root().GenPowerShellCompletion(os.Stdout) case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) + _ = cmd.Root().GenFishCompletion(os.Stdout, true) } }, } diff --git a/cmd/crowdsec-cli/config_backup.go b/cmd/crowdsec-cli/config_backup.go deleted file mode 100644 index d23aff80a78..00000000000 --- a/cmd/crowdsec-cli/config_backup.go +++ /dev/null @@ -1,236 +0,0 @@ -package main - -import ( - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func (cli *cliConfig) backupHub(dirPath string) error { - hub, err := require.Hub(cli.cfg(), nil, nil) - if err != nil { - return err - } - - for _, itemType := range cwhub.ItemTypes { - clog := log.WithField("type", itemType) - - itemMap := hub.GetItemMap(itemType) - if itemMap == nil { - clog.Infof("No %s to backup.", itemType) - continue - } - - itemDirectory := fmt.Sprintf("%s/%s/", dirPath, itemType) - if err = os.MkdirAll(itemDirectory, os.ModePerm); err != nil { - return fmt.Errorf("error while creating %s: %w", itemDirectory, err) - } - - upstreamParsers := []string{} - - for k, v := range itemMap { - clog = clog.WithField("file", v.Name) - if !v.State.Installed { // only backup installed ones - clog.Debugf("[%s]: not installed", k) - continue - } - - // for the local/tainted ones, we back up the full file - if v.State.Tainted || v.State.IsLocal() || !v.State.UpToDate { - // we need to backup stages for parsers - if itemType == cwhub.PARSERS || itemType == cwhub.POSTOVERFLOWS { - fstagedir := fmt.Sprintf("%s%s", itemDirectory, v.Stage) - if err = os.MkdirAll(fstagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage dir %s: %w", fstagedir, err) - } - } - - clog.Debugf("[%s]: backing up file (tainted:%t local:%t up-to-date:%t)", k, v.State.Tainted, v.State.IsLocal(), v.State.UpToDate) - - tfile := fmt.Sprintf("%s%s/%s", itemDirectory, v.Stage, v.FileName) - if err = CopyFile(v.State.LocalPath, tfile); err != nil { - return fmt.Errorf("failed copy %s %s to %s: %w", itemType, v.State.LocalPath, tfile, err) - } - - clog.Infof("local/tainted saved %s to %s", v.State.LocalPath, tfile) - - continue - } - - clog.Debugf("[%s]: from hub, just backup name (up-to-date:%t)", k, v.State.UpToDate) - clog.Infof("saving, version:%s, up-to-date:%t", v.Version, v.State.UpToDate) - upstreamParsers = append(upstreamParsers, v.Name) - } - // write the upstream items - upstreamParsersFname := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itemType) - - upstreamParsersContent, err := json.MarshalIndent(upstreamParsers, "", " ") - if err != nil { - return fmt.Errorf("failed to serialize upstream parsers: %w", err) - } - - err = os.WriteFile(upstreamParsersFname, upstreamParsersContent, 0o644) - if err != nil { - return fmt.Errorf("unable to write to %s %s: %w", itemType, upstreamParsersFname, err) - } - - clog.Infof("Wrote %d entries for %s to %s", len(upstreamParsers), itemType, upstreamParsersFname) - } - - return nil -} - -/* - Backup crowdsec configurations to directory : - -- Main config (config.yaml) -- Profiles config (profiles.yaml) -- Simulation config (simulation.yaml) -- Backup of API credentials (local API and online API) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Acquisition files (acquis.yaml, acquis.d/*.yaml) -*/ -func (cli *cliConfig) backup(dirPath string) error { - var err error - - cfg := cli.cfg() - - if dirPath == "" { - return errors.New("directory path can't be empty") - } - - log.Infof("Starting configuration backup") - - /*if parent directory doesn't exist, bail out. create final dir with Mkdir*/ - parentDir := filepath.Dir(dirPath) - if _, err = os.Stat(parentDir); err != nil { - return fmt.Errorf("while checking parent directory %s existence: %w", parentDir, err) - } - - if err = os.Mkdir(dirPath, 0o700); err != nil { - return fmt.Errorf("while creating %s: %w", dirPath, err) - } - - if cfg.ConfigPaths.SimulationFilePath != "" { - backupSimulation := filepath.Join(dirPath, "simulation.yaml") - if err = CopyFile(cfg.ConfigPaths.SimulationFilePath, backupSimulation); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.ConfigPaths.SimulationFilePath, backupSimulation, err) - } - - log.Infof("Saved simulation to %s", backupSimulation) - } - - /* - - backup AcquisitionFilePath - - backup the other files of acquisition directory - */ - if cfg.Crowdsec != nil && cfg.Crowdsec.AcquisitionFilePath != "" { - backupAcquisition := filepath.Join(dirPath, "acquis.yaml") - if err = CopyFile(cfg.Crowdsec.AcquisitionFilePath, backupAcquisition); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.Crowdsec.AcquisitionFilePath, backupAcquisition, err) - } - } - - acquisBackupDir := filepath.Join(dirPath, "acquis") - if err = os.Mkdir(acquisBackupDir, 0o700); err != nil { - return fmt.Errorf("error while creating %s: %w", acquisBackupDir, err) - } - - if cfg.Crowdsec != nil && len(cfg.Crowdsec.AcquisitionFiles) > 0 { - for _, acquisFile := range cfg.Crowdsec.AcquisitionFiles { - /*if it was the default one, it was already backup'ed*/ - if cfg.Crowdsec.AcquisitionFilePath == acquisFile { - continue - } - - targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile))) - if err != nil { - return fmt.Errorf("while saving %s to %s: %w", acquisFile, acquisBackupDir, err) - } - - if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) - } - - log.Infof("Saved acquis %s to %s", acquisFile, targetFname) - } - } - - if ConfigFilePath != "" { - backupMain := fmt.Sprintf("%s/config.yaml", dirPath) - if err = CopyFile(ConfigFilePath, backupMain); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", ConfigFilePath, backupMain, err) - } - - log.Infof("Saved default yaml to %s", backupMain) - } - - if cfg.API != nil && cfg.API.Server != nil && cfg.API.Server.OnlineClient != nil && cfg.API.Server.OnlineClient.CredentialsFilePath != "" { - backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) - if err = CopyFile(cfg.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Server.OnlineClient.CredentialsFilePath, backupCAPICreds, err) - } - - log.Infof("Saved online API credentials to %s", backupCAPICreds) - } - - if cfg.API != nil && cfg.API.Client != nil && cfg.API.Client.CredentialsFilePath != "" { - backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) - if err = CopyFile(cfg.API.Client.CredentialsFilePath, backupLAPICreds); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Client.CredentialsFilePath, backupLAPICreds, err) - } - - log.Infof("Saved local API credentials to %s", backupLAPICreds) - } - - if cfg.API != nil && cfg.API.Server != nil && cfg.API.Server.ProfilesPath != "" { - backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) - if err = CopyFile(cfg.API.Server.ProfilesPath, backupProfiles); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", cfg.API.Server.ProfilesPath, backupProfiles, err) - } - - log.Infof("Saved profiles to %s", backupProfiles) - } - - if err = cli.backupHub(dirPath); err != nil { - return fmt.Errorf("failed to backup hub config: %w", err) - } - - return nil -} - -func (cli *cliConfig) newBackupCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: `backup "directory"`, - Short: "Backup current config", - Long: `Backup the current crowdsec configuration including : - -- Main config (config.yaml) -- Simulation config (simulation.yaml) -- Profiles config (profiles.yaml) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Backup of API credentials (local API and online API)`, - Example: `cscli config backup ./my-backup`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(_ *cobra.Command, args []string) error { - if err := cli.backup(args[0]); err != nil { - return fmt.Errorf("failed to backup config: %w", err) - } - - return nil - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/config_restore.go b/cmd/crowdsec-cli/config_restore.go deleted file mode 100644 index c32328485ec..00000000000 --- a/cmd/crowdsec-cli/config_restore.go +++ /dev/null @@ -1,274 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "os" - "path/filepath" - - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - - "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/require" - "github.com/crowdsecurity/crowdsec/pkg/cwhub" -) - -func (cli *cliConfig) restoreHub(ctx context.Context, dirPath string) error { - cfg := cli.cfg() - - hub, err := require.Hub(cfg, require.RemoteHub(ctx, cfg), nil) - if err != nil { - return err - } - - for _, itype := range cwhub.ItemTypes { - itemDirectory := fmt.Sprintf("%s/%s/", dirPath, itype) - if _, err = os.Stat(itemDirectory); err != nil { - log.Infof("no %s in backup", itype) - continue - } - /*restore the upstream items*/ - upstreamListFN := fmt.Sprintf("%s/upstream-%s.json", itemDirectory, itype) - - file, err := os.ReadFile(upstreamListFN) - if err != nil { - return fmt.Errorf("error while opening %s: %w", upstreamListFN, err) - } - - var upstreamList []string - - err = json.Unmarshal(file, &upstreamList) - if err != nil { - return fmt.Errorf("error parsing %s: %w", upstreamListFN, err) - } - - for _, toinstall := range upstreamList { - item := hub.GetItem(itype, toinstall) - if item == nil { - log.Errorf("Item %s/%s not found in hub", itype, toinstall) - continue - } - - if err = item.Install(ctx, false, false); err != nil { - log.Errorf("Error while installing %s : %s", toinstall, err) - } - } - - /*restore the local and tainted items*/ - files, err := os.ReadDir(itemDirectory) - if err != nil { - return fmt.Errorf("failed enumerating files of %s: %w", itemDirectory, err) - } - - for _, file := range files { - // this was the upstream data - if file.Name() == fmt.Sprintf("upstream-%s.json", itype) { - continue - } - - if itype == cwhub.PARSERS || itype == cwhub.POSTOVERFLOWS { - // we expect a stage here - if !file.IsDir() { - continue - } - - stage := file.Name() - stagedir := fmt.Sprintf("%s/%s/%s/", cfg.ConfigPaths.ConfigDir, itype, stage) - log.Debugf("Found stage %s in %s, target directory : %s", stage, itype, stagedir) - - if err = os.MkdirAll(stagedir, os.ModePerm); err != nil { - return fmt.Errorf("error while creating stage directory %s: %w", stagedir, err) - } - - // find items - ifiles, err := os.ReadDir(itemDirectory + "/" + stage + "/") - if err != nil { - return fmt.Errorf("failed enumerating files of %s: %w", itemDirectory+"/"+stage, err) - } - - // finally copy item - for _, tfile := range ifiles { - log.Infof("Going to restore local/tainted [%s]", tfile.Name()) - sourceFile := fmt.Sprintf("%s/%s/%s", itemDirectory, stage, tfile.Name()) - - destinationFile := fmt.Sprintf("%s%s", stagedir, tfile.Name()) - if err = CopyFile(sourceFile, destinationFile); err != nil { - return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) - } - - log.Infof("restored %s to %s", sourceFile, destinationFile) - } - } else { - log.Infof("Going to restore local/tainted [%s]", file.Name()) - sourceFile := fmt.Sprintf("%s/%s", itemDirectory, file.Name()) - destinationFile := fmt.Sprintf("%s/%s/%s", cfg.ConfigPaths.ConfigDir, itype, file.Name()) - - if err = CopyFile(sourceFile, destinationFile); err != nil { - return fmt.Errorf("failed copy %s %s to %s: %w", itype, sourceFile, destinationFile, err) - } - - log.Infof("restored %s to %s", sourceFile, destinationFile) - } - } - } - - return nil -} - -/* - Restore crowdsec configurations to directory : - -- Main config (config.yaml) -- Profiles config (profiles.yaml) -- Simulation config (simulation.yaml) -- Backup of API credentials (local API and online API) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Acquisition files (acquis.yaml, acquis.d/*.yaml) -*/ -func (cli *cliConfig) restore(ctx context.Context, dirPath string) error { - var err error - - cfg := cli.cfg() - - backupMain := fmt.Sprintf("%s/config.yaml", dirPath) - if _, err = os.Stat(backupMain); err == nil { - if cfg.ConfigPaths != nil && cfg.ConfigPaths.ConfigDir != "" { - if err = CopyFile(backupMain, fmt.Sprintf("%s/config.yaml", cfg.ConfigPaths.ConfigDir)); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupMain, cfg.ConfigPaths.ConfigDir, err) - } - } - } - - // Now we have config.yaml, we should regenerate config struct to have rights paths etc - ConfigFilePath = fmt.Sprintf("%s/config.yaml", cfg.ConfigPaths.ConfigDir) - - log.Debug("Reloading configuration") - - csConfig, _, err = loadConfigFor("config") - if err != nil { - return fmt.Errorf("failed to reload configuration: %w", err) - } - - cfg = cli.cfg() - - backupCAPICreds := fmt.Sprintf("%s/online_api_credentials.yaml", dirPath) - if _, err = os.Stat(backupCAPICreds); err == nil { - if err = CopyFile(backupCAPICreds, cfg.API.Server.OnlineClient.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupCAPICreds, cfg.API.Server.OnlineClient.CredentialsFilePath, err) - } - } - - backupLAPICreds := fmt.Sprintf("%s/local_api_credentials.yaml", dirPath) - if _, err = os.Stat(backupLAPICreds); err == nil { - if err = CopyFile(backupLAPICreds, cfg.API.Client.CredentialsFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupLAPICreds, cfg.API.Client.CredentialsFilePath, err) - } - } - - backupProfiles := fmt.Sprintf("%s/profiles.yaml", dirPath) - if _, err = os.Stat(backupProfiles); err == nil { - if err = CopyFile(backupProfiles, cfg.API.Server.ProfilesPath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupProfiles, cfg.API.Server.ProfilesPath, err) - } - } - - backupSimulation := fmt.Sprintf("%s/simulation.yaml", dirPath) - if _, err = os.Stat(backupSimulation); err == nil { - if err = CopyFile(backupSimulation, cfg.ConfigPaths.SimulationFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupSimulation, cfg.ConfigPaths.SimulationFilePath, err) - } - } - - /*if there is a acquisition dir, restore its content*/ - if cfg.Crowdsec.AcquisitionDirPath != "" { - if err = os.MkdirAll(cfg.Crowdsec.AcquisitionDirPath, 0o700); err != nil { - return fmt.Errorf("error while creating %s: %w", cfg.Crowdsec.AcquisitionDirPath, err) - } - } - - // if there was a single one - backupAcquisition := fmt.Sprintf("%s/acquis.yaml", dirPath) - if _, err = os.Stat(backupAcquisition); err == nil { - log.Debugf("restoring backup'ed %s", backupAcquisition) - - if err = CopyFile(backupAcquisition, cfg.Crowdsec.AcquisitionFilePath); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", backupAcquisition, cfg.Crowdsec.AcquisitionFilePath, err) - } - } - - // if there are files in the acquis backup dir, restore them - acquisBackupDir := filepath.Join(dirPath, "acquis", "*.yaml") - if acquisFiles, err := filepath.Glob(acquisBackupDir); err == nil { - for _, acquisFile := range acquisFiles { - targetFname, err := filepath.Abs(cfg.Crowdsec.AcquisitionDirPath + "/" + filepath.Base(acquisFile)) - if err != nil { - return fmt.Errorf("while saving %s to %s: %w", acquisFile, targetFname, err) - } - - log.Debugf("restoring %s to %s", acquisFile, targetFname) - - if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) - } - } - } - - if cfg.Crowdsec != nil && len(cfg.Crowdsec.AcquisitionFiles) > 0 { - for _, acquisFile := range cfg.Crowdsec.AcquisitionFiles { - log.Infof("backup filepath from dir -> %s", acquisFile) - - // if it was the default one, it has already been backed up - if cfg.Crowdsec.AcquisitionFilePath == acquisFile { - log.Infof("skip this one") - continue - } - - targetFname, err := filepath.Abs(filepath.Join(acquisBackupDir, filepath.Base(acquisFile))) - if err != nil { - return fmt.Errorf("while saving %s to %s: %w", acquisFile, acquisBackupDir, err) - } - - if err = CopyFile(acquisFile, targetFname); err != nil { - return fmt.Errorf("failed copy %s to %s: %w", acquisFile, targetFname, err) - } - - log.Infof("Saved acquis %s to %s", acquisFile, targetFname) - } - } - - if err = cli.restoreHub(ctx, dirPath); err != nil { - return fmt.Errorf("failed to restore hub config: %w", err) - } - - return nil -} - -func (cli *cliConfig) newRestoreCmd() *cobra.Command { - cmd := &cobra.Command{ - Use: `restore "directory"`, - Short: `Restore config in backup "directory"`, - Long: `Restore the crowdsec configuration from specified backup "directory" including: - -- Main config (config.yaml) -- Simulation config (simulation.yaml) -- Profiles config (profiles.yaml) -- List of scenarios, parsers, postoverflows and collections that are up-to-date -- Tainted/local/out-of-date scenarios, parsers, postoverflows and collections -- Backup of API credentials (local API and online API)`, - Args: cobra.ExactArgs(1), - DisableAutoGenTag: true, - RunE: func(cmd *cobra.Command, args []string) error { - dirPath := args[0] - - if err := cli.restore(cmd.Context(), dirPath); err != nil { - return fmt.Errorf("failed to restore config from %s: %w", dirPath, err) - } - - return nil - }, - } - - return cmd -} diff --git a/cmd/crowdsec-cli/copyfile.go b/cmd/crowdsec-cli/copyfile.go deleted file mode 100644 index 272fb3f7851..00000000000 --- a/cmd/crowdsec-cli/copyfile.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" - "path/filepath" - - log "github.com/sirupsen/logrus" -) - -/*help to copy the file, ioutil doesn't offer the feature*/ - -func copyFileContents(src, dst string) (err error) { - in, err := os.Open(src) - if err != nil { - return - } - defer in.Close() - - out, err := os.Create(dst) - if err != nil { - return - } - - defer func() { - cerr := out.Close() - if err == nil { - err = cerr - } - }() - - if _, err = io.Copy(out, in); err != nil { - return - } - - err = out.Sync() - - return -} - -/*copy the file, ioutile doesn't offer the feature*/ -func CopyFile(sourceSymLink, destinationFile string) error { - sourceFile, err := filepath.EvalSymlinks(sourceSymLink) - if err != nil { - log.Infof("Not a symlink : %s", err) - - sourceFile = sourceSymLink - } - - sourceFileStat, err := os.Stat(sourceFile) - if err != nil { - return err - } - - if !sourceFileStat.Mode().IsRegular() { - // cannot copy non-regular files (e.g., directories, - // symlinks, devices, etc.) - return fmt.Errorf("copyFile: non-regular source file %s (%q)", sourceFileStat.Name(), sourceFileStat.Mode().String()) - } - - destinationFileStat, err := os.Stat(destinationFile) - if err != nil { - if !os.IsNotExist(err) { - return err - } - } else { - if !(destinationFileStat.Mode().IsRegular()) { - return fmt.Errorf("copyFile: non-regular destination file %s (%q)", destinationFileStat.Name(), destinationFileStat.Mode().String()) - } - - if os.SameFile(sourceFileStat, destinationFileStat) { - return err - } - } - - if err = os.Link(sourceFile, destinationFile); err != nil { - err = copyFileContents(sourceFile, destinationFile) - } - - return err -} diff --git a/cmd/crowdsec-cli/dashboard.go b/cmd/crowdsec-cli/dashboard.go index 53a7dff85a0..c3c974eb9b8 100644 --- a/cmd/crowdsec-cli/dashboard.go +++ b/cmd/crowdsec-cli/dashboard.go @@ -36,10 +36,11 @@ var ( metabaseConfigFile = "metabase.yaml" metabaseImage = "metabase/metabase:v0.46.6.1" /**/ - metabaseListenAddress = "127.0.0.1" - metabaseListenPort = "3000" - metabaseContainerID = "crowdsec-metabase" - crowdsecGroup = "crowdsec" + metabaseListenAddress = "127.0.0.1" + metabaseListenPort = "3000" + metabaseContainerID = "crowdsec-metabase" + metabaseContainerEnvironmentVariables []string + crowdsecGroup = "crowdsec" forceYes bool @@ -144,7 +145,11 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if metabasePassword == "" { isValid := passwordIsValid(metabasePassword) for !isValid { - metabasePassword = idgen.GeneratePassword(16) + var err error + metabasePassword, err = idgen.GeneratePassword(16) + if err != nil { + return err + } isValid = passwordIsValid(metabasePassword) } } @@ -162,7 +167,9 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password if err = cli.chownDatabase(dockerGroup.Gid); err != nil { return err } - mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, metabaseListenPort, metabaseUser, metabasePassword, metabaseDBPath, dockerGroup.Gid, metabaseContainerID, metabaseImage) + mb, err := metabase.SetupMetabase(cli.cfg().API.Server.DbConfig, metabaseListenAddress, + metabaseListenPort, metabaseUser, metabasePassword, metabaseDBPath, dockerGroup.Gid, + metabaseContainerID, metabaseImage, metabaseContainerEnvironmentVariables) if err != nil { return err } @@ -189,6 +196,7 @@ cscli dashboard setup -l 0.0.0.0 -p 443 --password flags.BoolVarP(&forceYes, "yes", "y", false, "force yes") // flags.StringVarP(&metabaseUser, "user", "u", "crowdsec@crowdsec.net", "metabase user") flags.StringVar(&metabasePassword, "password", "", "metabase password") + flags.StringSliceVarP(&metabaseContainerEnvironmentVariables, "env", "e", nil, "Additional environment variables to pass to the metabase container") return cmd } @@ -243,7 +251,8 @@ func (cli *cliDashboard) newStopCmd() *cobra.Command { } func (cli *cliDashboard) newShowPasswordCmd() *cobra.Command { - cmd := &cobra.Command{Use: "show-password", + cmd := &cobra.Command{ + Use: "show-password", Short: "displays password of metabase.", Args: cobra.NoArgs, DisableAutoGenTag: true, @@ -457,7 +466,6 @@ func checkGroups(forceYes *bool) (*user.Group, error) { func (cli *cliDashboard) chownDatabase(gid string) error { cfg := cli.cfg() intID, err := strconv.Atoi(gid) - if err != nil { return fmt.Errorf("unable to convert group ID to int: %s", err) } diff --git a/cmd/crowdsec-cli/idgen/machineid.go b/cmd/crowdsec-cli/idgen/machineid.go index 4bd356b3abc..434f79128e9 100644 --- a/cmd/crowdsec-cli/idgen/machineid.go +++ b/cmd/crowdsec-cli/idgen/machineid.go @@ -42,7 +42,11 @@ func GenerateMachineID(prefix string) (string, error) { } prefix = strings.ReplaceAll(prefix, "-", "")[:32] - suffix := GeneratePassword(16) + + suffix, err := GeneratePassword(16) + if err != nil { + return "", err + } return prefix + suffix, nil } diff --git a/cmd/crowdsec-cli/idgen/password.go b/cmd/crowdsec-cli/idgen/password.go index e0faa4daacc..9f1925288ce 100644 --- a/cmd/crowdsec-cli/idgen/password.go +++ b/cmd/crowdsec-cli/idgen/password.go @@ -2,14 +2,13 @@ package idgen import ( saferand "crypto/rand" + "fmt" "math/big" - - log "github.com/sirupsen/logrus" ) const PasswordLength = 64 -func GeneratePassword(length int) string { +func GeneratePassword(length int) (string, error) { upper := "ABCDEFGHIJKLMNOPQRSTUVWXY" lower := "abcdefghijklmnopqrstuvwxyz" digits := "0123456789" @@ -22,11 +21,11 @@ func GeneratePassword(length int) string { for i := range length { rInt, err := saferand.Int(saferand.Reader, big.NewInt(int64(charsetLength))) if err != nil { - log.Fatalf("failed getting data from prng for password generation : %s", err) + return "", fmt.Errorf("prng failed to generate unique id or password: %w", err) } buf[i] = charset[rInt.Int64()] } - return string(buf) + return string(buf), nil } diff --git a/cmd/crowdsec-cli/main.go b/cmd/crowdsec-cli/main.go index 1cca03b1d3d..a17bafb96d8 100644 --- a/cmd/crowdsec-cli/main.go +++ b/cmd/crowdsec-cli/main.go @@ -17,6 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clialert" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clibouncer" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clicapi" + "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconfig" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliconsole" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/clidecision" "github.com/crowdsecurity/crowdsec/cmd/crowdsec-cli/cliexplain" @@ -91,7 +92,6 @@ func loadConfigFor(command string) (*csconfig.Config, string, error) { "help", "completion", "version", - "hubtest", } if !slices.Contains(noNeedConfig, command) { @@ -146,7 +146,10 @@ func (cli *cliRoot) initialize() error { return fmt.Errorf("output format '%s' not supported: must be one of human, json, raw", csConfig.Cscli.Output) } - log.SetFormatter(&log.TextFormatter{DisableTimestamp: true}) + log.SetFormatter(&log.TextFormatter{ + DisableTimestamp: true, + DisableLevelTruncation: true, + }) if csConfig.Cscli.Output == "json" { log.SetFormatter(&log.JSONFormatter{}) @@ -254,7 +257,7 @@ It is meant to allow you to manage bans, parsers/scenarios/etc, api and generall cmd.AddCommand(NewCLIDoc().NewCommand(cmd)) cmd.AddCommand(NewCLIVersion().NewCommand()) - cmd.AddCommand(NewCLIConfig(cli.cfg).NewCommand()) + cmd.AddCommand(cliconfig.New(cli.cfg).NewCommand(func() string { return mergedConfig })) cmd.AddCommand(clihub.New(cli.cfg).NewCommand()) cmd.AddCommand(climetrics.New(cli.cfg).NewCommand()) cmd.AddCommand(NewCLIDashboard(cli.cfg).NewCommand()) @@ -302,6 +305,8 @@ func main() { } if err := cmd.Execute(); err != nil { - log.Fatal(err) + red := color.New(color.FgRed).SprintFunc() + fmt.Fprintln(os.Stderr, red("Error:"), err) + os.Exit(1) } } diff --git a/cmd/crowdsec-cli/reload/message.go b/cmd/crowdsec-cli/reload/message.go new file mode 100644 index 00000000000..cd8e7d4795f --- /dev/null +++ b/cmd/crowdsec-cli/reload/message.go @@ -0,0 +1,6 @@ +//go:build !windows && !freebsd && !linux + +package reload + +// generic message since we don't know the platform +const message = "Please reload the crowdsec process for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_freebsd.go b/cmd/crowdsec-cli/reload/message_freebsd.go similarity index 64% rename from cmd/crowdsec-cli/reload/reload_freebsd.go rename to cmd/crowdsec-cli/reload/message_freebsd.go index 0dac99f2315..9328f935be8 100644 --- a/cmd/crowdsec-cli/reload/reload_freebsd.go +++ b/cmd/crowdsec-cli/reload/message_freebsd.go @@ -1,4 +1,4 @@ package reload // actually sudo is not that popular on freebsd, but this will do -const Message = "Run 'sudo service crowdsec reload' for the new configuration to be effective." +const message = "Run 'sudo service crowdsec reload' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload_linux.go b/cmd/crowdsec-cli/reload/message_linux.go similarity index 62% rename from cmd/crowdsec-cli/reload/reload_linux.go rename to cmd/crowdsec-cli/reload/message_linux.go index fbe16e5f168..11c95165372 100644 --- a/cmd/crowdsec-cli/reload/reload_linux.go +++ b/cmd/crowdsec-cli/reload/message_linux.go @@ -1,4 +1,4 @@ package reload // assume systemd, although gentoo and others may differ -const Message = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." +const message = "Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/message_windows.go b/cmd/crowdsec-cli/reload/message_windows.go new file mode 100644 index 00000000000..888cb44b0d2 --- /dev/null +++ b/cmd/crowdsec-cli/reload/message_windows.go @@ -0,0 +1,3 @@ +package reload + +const message = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/reload/reload.go b/cmd/crowdsec-cli/reload/reload.go index fe03af1ea79..44d001fda0c 100644 --- a/cmd/crowdsec-cli/reload/reload.go +++ b/cmd/crowdsec-cli/reload/reload.go @@ -1,6 +1,20 @@ -//go:build !windows && !freebsd && !linux - package reload -// generic message since we don't know the platform -const Message = "Please reload the crowdsec process for the new configuration to be effective." +import ( + "os" + + "github.com/crowdsecurity/go-cs-lib/version" + isatty "github.com/mattn/go-isatty" +) + +func UserMessage() string { + if version.System == "docker" { + if isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) { + return "You may need to restart the container to apply the changes." + } + + return "" + } + + return message +} diff --git a/cmd/crowdsec-cli/reload/reload_windows.go b/cmd/crowdsec-cli/reload/reload_windows.go deleted file mode 100644 index 88642425ae2..00000000000 --- a/cmd/crowdsec-cli/reload/reload_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -package reload - -const Message = "Please restart the crowdsec service for the new configuration to be effective." diff --git a/cmd/crowdsec-cli/require/branch.go b/cmd/crowdsec-cli/require/branch.go index 09acc0fef8a..ab9b8e50bdc 100644 --- a/cmd/crowdsec-cli/require/branch.go +++ b/cmd/crowdsec-cli/require/branch.go @@ -69,7 +69,7 @@ func chooseBranch(ctx context.Context, cfg *csconfig.Config) string { return "master" } - csVersion := cwversion.VersionStrip() + csVersion := cwversion.BaseVersion() if csVersion == "" { log.Warning("Crowdsec version is not set, using hub branch 'master'") return "master" diff --git a/cmd/crowdsec-cli/require/require.go b/cmd/crowdsec-cli/require/require.go index 191eee55bc5..beffa29f3eb 100644 --- a/cmd/crowdsec-cli/require/require.go +++ b/cmd/crowdsec-cli/require/require.go @@ -27,7 +27,7 @@ func LAPI(c *csconfig.Config) error { func CAPI(c *csconfig.Config) error { if c.API.Server.OnlineClient == nil { - return fmt.Errorf("no configuration for Central API (CAPI) in '%s'", *c.FilePath) + return fmt.Errorf("no configuration for Central API (CAPI) in '%s'", c.FilePath) } return nil @@ -82,15 +82,13 @@ func Notifications(c *csconfig.Config) error { return nil } -// RemoteHub returns the configuration required to download hub index and items: url, branch, etc. -func RemoteHub(ctx context.Context, c *csconfig.Config) *cwhub.RemoteHubCfg { +func HubDownloader(ctx context.Context, c *csconfig.Config) *cwhub.Downloader { // set branch in config, and log if necessary branch := HubBranch(ctx, c) urlTemplate := HubURLTemplate(c) - remote := &cwhub.RemoteHubCfg{ + remote := &cwhub.Downloader{ Branch: branch, URLTemplate: urlTemplate, - IndexPath: ".index.json", } return remote @@ -98,7 +96,7 @@ func RemoteHub(ctx context.Context, c *csconfig.Config) *cwhub.RemoteHubCfg { // Hub initializes the hub. If a remote configuration is provided, it can be used to download the index and items. // If no remote parameter is provided, the hub can only be used for local operations. -func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger) (*cwhub.Hub, error) { +func Hub(c *csconfig.Config, logger *logrus.Logger) (*cwhub.Hub, error) { local := c.Hub if local == nil { @@ -110,13 +108,13 @@ func Hub(c *csconfig.Config, remote *cwhub.RemoteHubCfg, logger *logrus.Logger) logger.SetOutput(io.Discard) } - hub, err := cwhub.NewHub(local, remote, logger) + hub, err := cwhub.NewHub(local, logger) if err != nil { return nil, err } if err := hub.Load(); err != nil { - return nil, fmt.Errorf("failed to read Hub index: %w. Run 'sudo cscli hub update' to download the index again", err) + return nil, err } return hub, nil diff --git a/cmd/crowdsec-cli/setup.go b/cmd/crowdsec-cli/setup.go index 66c0d71e777..3581d69f052 100644 --- a/cmd/crowdsec-cli/setup.go +++ b/cmd/crowdsec-cli/setup.go @@ -1,4 +1,5 @@ //go:build !no_cscli_setup + package main import ( diff --git a/cmd/crowdsec/appsec.go b/cmd/crowdsec/appsec.go index cb02b137dcd..4320133b063 100644 --- a/cmd/crowdsec/appsec.go +++ b/cmd/crowdsec/appsec.go @@ -1,4 +1,4 @@ -// +build !no_datasource_appsec +//go:build !no_datasource_appsec package main diff --git a/cmd/crowdsec/fatalhook.go b/cmd/crowdsec/fatalhook.go index 84a57406a21..56e945c84a5 100644 --- a/cmd/crowdsec/fatalhook.go +++ b/cmd/crowdsec/fatalhook.go @@ -2,6 +2,7 @@ package main import ( "io" + "os" log "github.com/sirupsen/logrus" ) @@ -9,16 +10,35 @@ import ( // FatalHook is used to log fatal messages to stderr when the rest goes to a file type FatalHook struct { Writer io.Writer + Formatter log.Formatter LogLevels []log.Level } +func newFatalHook() *FatalHook { + return &FatalHook{ + Writer: os.Stderr, + Formatter: &log.TextFormatter{ + DisableTimestamp: true, + // XXX: logrus.TextFormatter has either key pairs with no colors, + // or "LEVEL [optional timestamp] message", with colors. + // We force colors to make sure we get the latter, even if + // the output is not a terminal. + // There are more flexible formatters that don't conflate the two concepts, + // or we can write our own. + ForceColors: true, + DisableLevelTruncation: true, + }, + LogLevels: []log.Level{log.FatalLevel, log.PanicLevel}, + } +} + func (hook *FatalHook) Fire(entry *log.Entry) error { - line, err := entry.String() + line, err := hook.Formatter.Format(entry) if err != nil { return err } - _, err = hook.Writer.Write([]byte(line)) + _, err = hook.Writer.Write(line) return err } diff --git a/cmd/crowdsec/main.go b/cmd/crowdsec/main.go index 6d8ca24c335..02220e15216 100644 --- a/cmd/crowdsec/main.go +++ b/cmd/crowdsec/main.go @@ -86,20 +86,15 @@ func (f *Flags) haveTimeMachine() bool { type labelsMap map[string]string func LoadBuckets(cConfig *csconfig.Config, hub *cwhub.Hub) error { - var ( - err error - files []string - ) - - for _, hubScenarioItem := range hub.GetInstalledByType(cwhub.SCENARIOS, false) { - files = append(files, hubScenarioItem.State.LocalPath) - } + var err error buckets = leakybucket.NewBuckets() - log.Infof("Loading %d scenario files", len(files)) + scenarios := hub.GetInstalledByType(cwhub.SCENARIOS, false) + + log.Infof("Loading %d scenario files", len(scenarios)) - holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, files, &bucketsTomb, buckets, flags.OrderEvent) + holders, outputEventChan, err = leakybucket.LoadBuckets(cConfig.Crowdsec, hub, scenarios, &bucketsTomb, buckets, flags.OrderEvent) if err != nil { return fmt.Errorf("scenario loading failed: %w", err) } @@ -148,14 +143,14 @@ func (l *labelsMap) String() string { return "labels" } -func (l labelsMap) Set(label string) error { +func (l *labelsMap) Set(label string) error { for _, pair := range strings.Split(label, ",") { split := strings.Split(pair, ":") if len(split) != 2 { return fmt.Errorf("invalid format for label '%s', must be key:value", pair) } - l[split[0]] = split[1] + (*l)[split[0]] = split[1] } return nil @@ -254,16 +249,13 @@ func LoadConfig(configFile string, disableAgent bool, disableAPI bool, quiet boo if err := types.SetDefaultLoggerConfig(cConfig.Common.LogMedia, cConfig.Common.LogDir, *cConfig.Common.LogLevel, cConfig.Common.LogMaxSize, cConfig.Common.LogMaxFiles, - cConfig.Common.LogMaxAge, cConfig.Common.CompressLogs, + cConfig.Common.LogMaxAge, cConfig.Common.LogFormat, cConfig.Common.CompressLogs, cConfig.Common.ForceColorLogs); err != nil { return nil, err } if cConfig.Common.LogMedia != "stdout" { - log.AddHook(&FatalHook{ - Writer: os.Stderr, - LogLevels: []log.Level{log.FatalLevel, log.PanicLevel}, - }) + log.AddHook(newFatalHook()) } if err := csconfig.LoadFeatureFlagsFile(configFile, log.StandardLogger()); err != nil { diff --git a/cmd/crowdsec/pour.go b/cmd/crowdsec/pour.go index 2fc7d7e42c9..4c83b65bd48 100644 --- a/cmd/crowdsec/pour.go +++ b/cmd/crowdsec/pour.go @@ -46,7 +46,7 @@ func runPour(input chan types.Event, holders []leaky.BucketFactory, buckets *lea // here we can bucketify with parsed poured, err := leaky.PourItemToHolders(parsed, holders, buckets) if err != nil { - log.Errorf("bucketify failed for: %v", parsed) + log.Errorf("bucketify failed for: %v with %s", parsed, err) continue } diff --git a/cmd/crowdsec/serve.go b/cmd/crowdsec/serve.go index 14602c425fe..0f7a84ce5c7 100644 --- a/cmd/crowdsec/serve.go +++ b/cmd/crowdsec/serve.go @@ -85,7 +85,7 @@ func reloadHandler(sig os.Signal) (*csconfig.Config, error) { } if !cConfig.DisableAgent { - hub, err := cwhub.NewHub(cConfig.Hub, nil, log.StandardLogger()) + hub, err := cwhub.NewHub(cConfig.Hub, log.StandardLogger()) if err != nil { return nil, err } @@ -387,7 +387,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { } if !cConfig.DisableAgent { - hub, err := cwhub.NewHub(cConfig.Hub, nil, log.StandardLogger()) + hub, err := cwhub.NewHub(cConfig.Hub, log.StandardLogger()) if err != nil { return err } @@ -419,7 +419,7 @@ func Serve(cConfig *csconfig.Config, agentReady chan bool) error { } if cConfig.Common != nil && cConfig.Common.Daemonize { - csdaemon.Notify(csdaemon.Ready, log.StandardLogger()) + _ = csdaemon.Notify(csdaemon.Ready, log.StandardLogger()) // wait for signals return HandleSignals(cConfig) } diff --git a/cmd/crowdsec/win_service.go b/cmd/crowdsec/win_service.go index 6aa363ca3a7..ae48e77447c 100644 --- a/cmd/crowdsec/win_service.go +++ b/cmd/crowdsec/win_service.go @@ -67,7 +67,7 @@ func runService(name string) error { // All the calls to logging before the logger is configured are pretty much useless, but we keep them for clarity err := eventlog.InstallAsEventCreate("CrowdSec", eventlog.Error|eventlog.Warning|eventlog.Info) if err != nil { - if errno, ok := err.(syscall.Errno); ok { + if errno, ok := err.(syscall.Errno); ok { //nolint:errorlint if errno == windows.ERROR_ACCESS_DENIED { log.Warnf("Access denied when installing event source, running as non-admin ?") } else { diff --git a/cmd/notification-email/main.go b/cmd/notification-email/main.go index 5fc02cdd1d7..b61644611b4 100644 --- a/cmd/notification-email/main.go +++ b/cmd/notification-email/main.go @@ -68,7 +68,7 @@ func (n *EmailPlugin) Configure(ctx context.Context, config *protobufs.Config) ( EncryptionType: "ssltls", AuthType: "login", SenderEmail: "crowdsec@crowdsec.local", - HeloHost: "localhost", + HeloHost: "localhost", } if err := yaml.Unmarshal(config.Config, &d); err != nil { diff --git a/debian/install b/debian/install index fa422cac8d9..2d4cc6e1a7f 100644 --- a/debian/install +++ b/debian/install @@ -3,7 +3,6 @@ config/profiles.yaml etc/crowdsec/ config/simulation.yaml etc/crowdsec/ config/patterns/* etc/crowdsec/patterns -config/crowdsec.service lib/systemd/system # Referenced configs: cmd/notification-slack/slack.yaml etc/crowdsec/notifications/ diff --git a/debian/postinst b/debian/postinst index 77f2511f556..b73619b9e6f 100644 --- a/debian/postinst +++ b/debian/postinst @@ -11,14 +11,6 @@ if [ "$1" = configure ]; then mkdir -p /var/lib/crowdsec/data fi - if [[ -d /var/lib/crowdsec/backup ]]; then - cscli config restore /var/lib/crowdsec/backup/backup.config - rm -rf /var/lib/crowdsec/backup - /usr/bin/cscli hub update - /usr/bin/cscli hub upgrade - systemctl start crowdsec - fi - . /usr/share/crowdsec/wizard.sh -n if ! [[ -f /etc/crowdsec/acquis.yaml ]]; then echo Creating /etc/crowdsec/acquis.yaml @@ -76,18 +68,14 @@ if [ "$1" = configure ]; then echo Updating hub /usr/bin/cscli hub update + /usr/bin/cscli hub upgrade + if [ "$COLLECTIONS" = true ]; then set +e CSCLI_BIN_INSTALLED="/usr/bin/cscli" SILENT=true install_collection set -e fi - - if [[ -f /var/lib/crowdsec/data/crowdsec.db.backup ]]; then - cp /var/lib/crowdsec/data/crowdsec.db.backup /var/lib/crowdsec/data/crowdsec.db - rm -f /var/lib/crowdsec/data/crowdsec.db.backup - fi - systemctl --quiet is-enabled crowdsec || systemctl unmask crowdsec && systemctl enable crowdsec API=$(cscli config show --key "Config.API.Server") @@ -103,12 +91,18 @@ if [ "$1" = configure ]; then echo "This port is configured through /etc/crowdsec/config.yaml and /etc/crowdsec/local_api_credentials.yaml" fi - echo "Get started with CrowdSec:" - echo " * Detailed guides are available in our documentation: https://docs.crowdsec.net" - echo " * Configuration items created by the community can be found at the Hub: https://hub.crowdsec.net" - echo " * Gain insights into your use of CrowdSec with the help of the console https://app.crowdsec.net" - - + GREEN='\033[0;32m' + BOLD='\033[1m' + RESET='\033[0m' + + echo -e "${BOLD}Get started with CrowdSec:${RESET}" + echo -e " * Go further by following our ${BOLD}post installation steps${RESET} : ${GREEN}${BOLD}https://docs.crowdsec.net/u/getting_started/next_steps${RESET}" + echo -e "====================================================================================================================" + echo -e " * Install a ${BOLD}remediation component${RESET} to block attackers: ${GREEN}${BOLD}https://docs.crowdsec.net/u/bouncers/intro${RESET}" + echo -e "====================================================================================================================" + echo -e " * Find more ${BOLD}collections${RESET}, ${BOLD}parsers${RESET} and ${BOLD}scenarios${RESET} created by the community with the Hub: ${GREEN}${BOLD}https://hub.crowdsec.net${RESET}" + echo -e "====================================================================================================================" + echo -e " * Subscribe to ${BOLD}additional blocklists${RESET}, ${BOLD}visualize${RESET} your alerts and more with the console: ${GREEN}${BOLD}https://app.crowdsec.net${RESET}" fi echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c'" diff --git a/debian/preinst b/debian/preinst deleted file mode 100644 index 217b836caa6..00000000000 --- a/debian/preinst +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -set -e - -# Source debconf library. -. /usr/share/debconf/confmodule - - -OLD_MAJOR_VERSION=$(echo $2 | cut -d'.' -f1) -OLD_MINOR_VERSION=$(echo $2 | cut -d'.' -f2) -OLD_PATCH_VERSION=$(echo $2 | cut -d'.' -f3|cut -d'-' -f1) - -NEW_MAJOR_VERSION=$(echo $3 | cut -d'.' -f1) -NEW_MINOR_VERSION=$(echo $3 | cut -d'.' -f2) -NEW_PATCH_VERSION=$(echo $3 | cut -d'.' -f3|cut -d'-' -f1) - - - -if [ "$1" = upgrade ]; then - - OLD_MAJOR_VERSION=$(echo $2 | cut -d'.' -f1) - OLD_MINOR_VERSION=$(echo $2 | cut -d'.' -f2) - OLD_PATCH_VERSION=$(echo $2 | cut -d'.' -f3|cut -d'-' -f1) - - NEW_MAJOR_VERSION=$(echo $3 | cut -d'.' -f1) - NEW_MINOR_VERSION=$(echo $3 | cut -d'.' -f2) - NEW_PATCH_VERSION=$(echo $3 | cut -d'.' -f3|cut -d'-' -f1) - - - if [[ $OLD_MAJOR_VERSION -eq "1" ]] && [[ $OLD_MINOR_VERSION -eq "0" ]] && [[ $OLD_PATCH_VERSION -lt "9" ]]; then - if [[ -f /var/lib/crowdsec/data/crowdsec.db ]]; then - cp /var/lib/crowdsec/data/crowdsec.db /var/lib/crowdsec/data/crowdsec.db.backup - fi - fi - - if [[ $NEW_MAJOR_VERSION -gt $OLD_MAJOR_VERSION ]]; then - echo "Stopping crowdsec" - systemctl stop crowdsec || true - cscli config backup /var/lib/crowdsec/backup - fi -fi - -echo "You can always run the configuration again interactively by using '/usr/share/crowdsec/wizard.sh -c'" diff --git a/debian/prerm b/debian/prerm index a463a6a1c80..10afcf1906d 100644 --- a/debian/prerm +++ b/debian/prerm @@ -1,9 +1,8 @@ if [ "$1" = "remove" ]; then - cscli dashboard remove -f -y --error || echo "Ignore the above error if you never installed the local dashboard." systemctl stop crowdsec systemctl disable crowdsec fi if [ "$1" = "upgrade" ]; then systemctl stop crowdsec -fi \ No newline at end of file +fi diff --git a/debian/rules b/debian/rules index 5b8d6fc51f8..ec80caff985 100755 --- a/debian/rules +++ b/debian/rules @@ -1,6 +1,6 @@ #!/usr/bin/make -f -export DEB_VERSION=$(shell dpkg-parsechangelog | grep -E '^Version:' | cut -f 2 -d ' ') +export DEB_VERSION=$(shell dpkg-parsechangelog -SVersion) export BUILD_VERSION=v${DEB_VERSION}-debian-pragmatic export GO111MODULE=on diff --git a/docker/test/.python-version b/docker/test/.python-version new file mode 100644 index 00000000000..e4fba218358 --- /dev/null +++ b/docker/test/.python-version @@ -0,0 +1 @@ +3.12 diff --git a/docker/test/Pipfile b/docker/test/Pipfile deleted file mode 100644 index c57ccb628e8..00000000000 --- a/docker/test/Pipfile +++ /dev/null @@ -1,11 +0,0 @@ -[packages] -pytest-dotenv = "0.5.2" -pytest-xdist = "3.5.0" -pytest-cs = {ref = "0.7.19", git = "https://github.com/crowdsecurity/pytest-cs.git"} - -[dev-packages] -gnureadline = "8.1.2" -ipdb = "0.13.13" - -[requires] -python_version = "*" diff --git a/docker/test/Pipfile.lock b/docker/test/Pipfile.lock deleted file mode 100644 index 99184d9f2a2..00000000000 --- a/docker/test/Pipfile.lock +++ /dev/null @@ -1,604 +0,0 @@ -{ - "_meta": { - "hash": { - "sha256": "b5d25a7199d15a900b285be1af97cf7b7083c6637d631ad777b454471c8319fe" - }, - "pipfile-spec": 6, - "requires": { - "python_version": "*" - }, - "sources": [ - { - "name": "pypi", - "url": "https://pypi.org/simple", - "verify_ssl": true - } - ] - }, - "default": { - "certifi": { - "hashes": [ - "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8", - "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9" - ], - "markers": "python_version >= '3.6'", - "version": "==2024.8.30" - }, - "cffi": { - "hashes": [ - "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8", - "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", - "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1", - "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15", - "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", - "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", - "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8", - "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36", - "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17", - "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf", - "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc", - "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", - "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", - "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702", - "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1", - "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", - "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", - "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6", - "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d", - "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b", - "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e", - "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be", - "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c", - "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", - "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", - "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", - "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8", - "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1", - "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", - "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655", - "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67", - "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595", - "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0", - "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", - "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41", - "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6", - "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401", - "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6", - "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3", - "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16", - "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", - "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e", - "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", - "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964", - "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c", - "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576", - "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0", - "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3", - "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662", - "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", - "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", - "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", - "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", - "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f", - "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", - "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14", - "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", - "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9", - "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7", - "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382", - "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a", - "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", - "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", - "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4", - "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", - "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87", - "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b" - ], - "markers": "platform_python_implementation != 'PyPy'", - "version": "==1.17.1" - }, - "charset-normalizer": { - "hashes": [ - "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027", - "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087", - "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786", - "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8", - "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09", - "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185", - "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574", - "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e", - "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519", - "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898", - "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269", - "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3", - "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f", - "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6", - "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8", - "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a", - "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73", - "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc", - "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714", - "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2", - "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc", - "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce", - "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d", - "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e", - "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6", - "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269", - "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96", - "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d", - "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a", - "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4", - "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77", - "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d", - "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0", - "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed", - "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068", - "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac", - "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25", - "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8", - "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab", - "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26", - "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2", - "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db", - "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f", - "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5", - "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99", - "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c", - "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d", - "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811", - "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa", - "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a", - "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03", - "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b", - "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04", - "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c", - "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001", - "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458", - "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389", - "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99", - "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985", - "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537", - "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238", - "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f", - "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d", - "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796", - "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a", - "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143", - "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8", - "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c", - "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5", - "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5", - "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711", - "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4", - "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6", - "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c", - "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7", - "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4", - "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b", - "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae", - "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12", - "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c", - "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae", - "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8", - "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887", - "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b", - "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4", - "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f", - "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5", - "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33", - "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519", - "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561" - ], - "markers": "python_full_version >= '3.7.0'", - "version": "==3.3.2" - }, - "cryptography": { - "hashes": [ - "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494", - "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806", - "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d", - "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062", - "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2", - "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4", - "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1", - "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85", - "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84", - "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042", - "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d", - "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962", - "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2", - "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa", - "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d", - "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365", - "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96", - "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47", - "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d", - "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d", - "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c", - "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb", - "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277", - "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172", - "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034", - "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a", - "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289" - ], - "markers": "python_version >= '3.7'", - "version": "==43.0.1" - }, - "docker": { - "hashes": [ - "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", - "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0" - ], - "markers": "python_version >= '3.8'", - "version": "==7.1.0" - }, - "execnet": { - "hashes": [ - "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", - "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3" - ], - "markers": "python_version >= '3.8'", - "version": "==2.1.1" - }, - "idna": { - "hashes": [ - "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", - "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3" - ], - "markers": "python_version >= '3.6'", - "version": "==3.10" - }, - "iniconfig": { - "hashes": [ - "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", - "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374" - ], - "markers": "python_version >= '3.7'", - "version": "==2.0.0" - }, - "packaging": { - "hashes": [ - "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", - "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124" - ], - "markers": "python_version >= '3.8'", - "version": "==24.1" - }, - "pluggy": { - "hashes": [ - "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", - "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669" - ], - "markers": "python_version >= '3.8'", - "version": "==1.5.0" - }, - "psutil": { - "hashes": [ - "sha256:02b69001f44cc73c1c5279d02b30a817e339ceb258ad75997325e0e6169d8b35", - "sha256:1287c2b95f1c0a364d23bc6f2ea2365a8d4d9b726a3be7294296ff7ba97c17f0", - "sha256:1e7c870afcb7d91fdea2b37c24aeb08f98b6d67257a5cb0a8bc3ac68d0f1a68c", - "sha256:21f1fb635deccd510f69f485b87433460a603919b45e2a324ad65b0cc74f8fb1", - "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3", - "sha256:34859b8d8f423b86e4385ff3665d3f4d94be3cdf48221fbe476e883514fdb71c", - "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd", - "sha256:6ec7588fb3ddaec7344a825afe298db83fe01bfaaab39155fa84cf1c0d6b13c3", - "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0", - "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2", - "sha256:a021da3e881cd935e64a3d0a20983bda0bb4cf80e4f74fa9bfcb1bc5785360c6", - "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d", - "sha256:a9a3dbfb4de4f18174528d87cc352d1f788b7496991cca33c6996f40c9e3c92c", - "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0", - "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132", - "sha256:fc8c9510cde0146432bbdb433322861ee8c3efbf8589865c8bf8d21cb30c4d14", - "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", - "version": "==6.0.0" - }, - "pycparser": { - "hashes": [ - "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", - "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc" - ], - "markers": "python_version >= '3.8'", - "version": "==2.22" - }, - "pytest": { - "hashes": [ - "sha256:70b98107bd648308a7952b06e6ca9a50bc660be218d53c257cc1fc94fda10181", - "sha256:a6853c7375b2663155079443d2e45de913a911a11d669df02a50814944db57b2" - ], - "markers": "python_version >= '3.8'", - "version": "==8.3.3" - }, - "pytest-cs": { - "git": "https://github.com/crowdsecurity/pytest-cs.git", - "ref": "aea7e8549faa32f5e1d1f17755a5db3712396a2a" - }, - "pytest-datadir": { - "hashes": [ - "sha256:1617ed92f9afda0c877e4eac91904b5f779d24ba8f5e438752e3ae39d8d2ee3f", - "sha256:34adf361bcc7b37961bbc1dfa8d25a4829e778bab461703c38a5c50ca9c36dc8" - ], - "markers": "python_version >= '3.8'", - "version": "==1.5.0" - }, - "pytest-dotenv": { - "hashes": [ - "sha256:2dc6c3ac6d8764c71c6d2804e902d0ff810fa19692e95fe138aefc9b1aa73732", - "sha256:40a2cece120a213898afaa5407673f6bd924b1fa7eafce6bda0e8abffe2f710f" - ], - "index": "pypi", - "version": "==0.5.2" - }, - "pytest-xdist": { - "hashes": [ - "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a", - "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24" - ], - "index": "pypi", - "markers": "python_version >= '3.7'", - "version": "==3.5.0" - }, - "python-dotenv": { - "hashes": [ - "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", - "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a" - ], - "markers": "python_version >= '3.8'", - "version": "==1.0.1" - }, - "pyyaml": { - "hashes": [ - "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff", - "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", - "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086", - "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e", - "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", - "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5", - "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", - "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee", - "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", - "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68", - "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a", - "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf", - "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99", - "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8", - "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85", - "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19", - "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", - "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a", - "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", - "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317", - "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c", - "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631", - "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d", - "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", - "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", - "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e", - "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b", - "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", - "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", - "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706", - "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", - "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237", - "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", - "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083", - "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180", - "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", - "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e", - "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f", - "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725", - "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", - "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", - "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774", - "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", - "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", - "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5", - "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d", - "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290", - "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44", - "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed", - "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", - "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", - "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12", - "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4" - ], - "markers": "python_version >= '3.8'", - "version": "==6.0.2" - }, - "requests": { - "hashes": [ - "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", - "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6" - ], - "markers": "python_version >= '3.8'", - "version": "==2.32.3" - }, - "trustme": { - "hashes": [ - "sha256:5375ad7fb427074bec956592e0d4ee2a4cf4da68934e1ba4bcf4217126bc45e6", - "sha256:ce105b68fb9f6d7ac7a9ee6e95bb2347a22ce4d3be78ef9a6494d5ef890e1e16" - ], - "markers": "python_version >= '3.8'", - "version": "==1.1.0" - }, - "urllib3": { - "hashes": [ - "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac", - "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9" - ], - "markers": "python_version >= '3.8'", - "version": "==2.2.3" - } - }, - "develop": { - "asttokens": { - "hashes": [ - "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24", - "sha256:b03869718ba9a6eb027e134bfdf69f38a236d681c83c160d510768af11254ba0" - ], - "version": "==2.4.1" - }, - "decorator": { - "hashes": [ - "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", - "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186" - ], - "markers": "python_version >= '3.11'", - "version": "==5.1.1" - }, - "executing": { - "hashes": [ - "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf", - "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab" - ], - "markers": "python_version >= '3.8'", - "version": "==2.1.0" - }, - "gnureadline": { - "hashes": [ - "sha256:17a651e0c49d4b44e8ccf8992edc5a544e33ed9695d3b940ef002858c2215744", - "sha256:194bafa818d0fc3d46f8d71a8811a297a493c1264d3e2d0a71b1b1ff05f8fc15", - "sha256:1e3a8aaf1d61d351c16ad2d3425caf5768603ff5d0e86ba61da9b8756bdd1b95", - "sha256:264f22e865975a3c2ac1183f431dddd8ff7de5a645b89a801c6a276d800f49f3", - "sha256:2753aa1e46b4260b38da424c6a7da7a3ddac161a0b4e6fb71c1093e9ef3d2e73", - "sha256:2816bac8be6bc0e3aa2301acac76e308137eeef1b618c9e0c95c1f89a139a4d8", - "sha256:2ce5c49ecc54e1df0193e90422806a5940f908553206689aeaa04bc959d3aa9a", - "sha256:33ea248385e0d87a3fada38c9164a5756861aa59d6ee010c8be30eeb41f41b49", - "sha256:3903cba2987d42340f1d85c38d3780e954c95e64bfe1839002c7818aa63f8ac3", - "sha256:4262a6aa356ab22ef642f43a7f94eb42a72d6f0c532edb4e8c6b933f573056d2", - "sha256:49df5a432e4ff39cee1b0632c6d0e5fb304757113e502d70b50e33d9ffa47372", - "sha256:4ad9b10409d969ba42acbf89e58352cf3043a5155c2ee677d061e292336b5479", - "sha256:5e1e2d34b0c4ad81c7b00019fafa6de2faf6969c55fa58229e26267cae34047e", - "sha256:5fde3e6417d9004381e8e9835e0a89d81d2d77eeace9364d2e3d9fb64054d449", - "sha256:72da8bac1eb24b6c8237a33d7019a3f004a3d5ba867337175ed764831d9a2c99", - "sha256:74f2538ac15ff4ef9534823abdef077bb34c7dd343e204a36d978f09e168462f", - "sha256:861936c9b362d96152af2d73ccb6f3e901e70f0e4a2e7e62f4e226e91d349edb", - "sha256:8c4690d6c89dbead0958b19263ae67ef995e6109d6bc880cb0e40720cb1ba301", - "sha256:aa29a18594277ea691f92b0c6627d594c0f3387a6685e2e42038ab3f718c794e", - "sha256:b422ff3a78e281ee2e19b0eff70efa48396284bbefa86b83438d668ea9d038a3", - "sha256:c1bcb32e3b63442570d6425055aa6d5c3b6e8b09b9c7d1f8333e70203166a5a3", - "sha256:c402bc6e107beb015ae18c3d2e11f28375f049e464423ead88b35affe80f9be0", - "sha256:c7971653083a48049abd52baa9c8c0188aee362e7b2dd236fe51ecd4e6bc9bbe", - "sha256:de3d8ea66f1b5d00ed843b8925fc07476b8c838c38e584af8639c6a976a43d08", - "sha256:deb921c2cbc14671bb81f3f33d9363a9d0720203b5d716baee32e51c399e914b", - "sha256:e84e903de1514043e6a22866a1973c2ad5f5717f78e9d54e4d6809c48fbd3d81", - "sha256:ecdc4368bd2f7ae9a22de31b024455222082cb49b98ee69ffd0a59734bf648e1" - ], - "index": "pypi", - "version": "==8.1.2" - }, - "ipdb": { - "hashes": [ - "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4", - "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726" - ], - "index": "pypi", - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==0.13.13" - }, - "ipython": { - "hashes": [ - "sha256:0d0d15ca1e01faeb868ef56bc7ee5a0de5bd66885735682e8a322ae289a13d1a", - "sha256:530ef1e7bb693724d3cdc37287c80b07ad9b25986c007a53aa1857272dac3f35" - ], - "markers": "python_version >= '3.11'", - "version": "==8.28.0" - }, - "jedi": { - "hashes": [ - "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd", - "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0" - ], - "markers": "python_version >= '3.6'", - "version": "==0.19.1" - }, - "matplotlib-inline": { - "hashes": [ - "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", - "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca" - ], - "markers": "python_version >= '3.8'", - "version": "==0.1.7" - }, - "parso": { - "hashes": [ - "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", - "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d" - ], - "markers": "python_version >= '3.6'", - "version": "==0.8.4" - }, - "pexpect": { - "hashes": [ - "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", - "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f" - ], - "markers": "sys_platform != 'win32' and sys_platform != 'emscripten'", - "version": "==4.9.0" - }, - "prompt-toolkit": { - "hashes": [ - "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90", - "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e" - ], - "markers": "python_full_version >= '3.7.0'", - "version": "==3.0.48" - }, - "ptyprocess": { - "hashes": [ - "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", - "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220" - ], - "version": "==0.7.0" - }, - "pure-eval": { - "hashes": [ - "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", - "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42" - ], - "version": "==0.2.3" - }, - "pygments": { - "hashes": [ - "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199", - "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a" - ], - "markers": "python_version >= '3.8'", - "version": "==2.18.0" - }, - "six": { - "hashes": [ - "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", - "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" - ], - "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", - "version": "==1.16.0" - }, - "stack-data": { - "hashes": [ - "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", - "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695" - ], - "version": "==0.6.3" - }, - "traitlets": { - "hashes": [ - "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", - "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f" - ], - "markers": "python_version >= '3.8'", - "version": "==5.14.3" - }, - "wcwidth": { - "hashes": [ - "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", - "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5" - ], - "version": "==0.2.13" - } - } -} diff --git a/docker/test/README.md b/docker/test/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/docker/test/pyproject.toml b/docker/test/pyproject.toml new file mode 100644 index 00000000000..d32d184424f --- /dev/null +++ b/docker/test/pyproject.toml @@ -0,0 +1,41 @@ +[project] +name = "crowdsec-docker-tests" +version = "0.1.0" +description = "Docker tests for Crowdsec" +readme = "README.md" +requires-python = ">=3.12" +dependencies = [ + "pytest>=8.3.4", + "pytest-cs", + "pytest-dotenv>=0.5.2", + "pytest-xdist>=3.6.1", +] + +[dependency-groups] +dev = [ + "ipdb>=0.13.13", + "ruff>=0.9.3", +] + +[tool.uv.sources] +pytest-cs = { git = "https://github.com/crowdsecurity/pytest-cs" } + +[tool.ruff] + +line-length = 120 + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort + "C", # flake8-comprehensions + "B", # flake8-bugbear + "UP", # pyupgrade + "C90", # macabe +] + +ignore = [ + "B008", # do not perform function calls in argument defaults +] diff --git a/docker/test/tests/conftest.py b/docker/test/tests/conftest.py index 3498da82660..d32ffa28c37 100644 --- a/docker/test/tests/conftest.py +++ b/docker/test/tests/conftest.py @@ -1,11 +1,6 @@ - pytest_plugins = ("cs",) def pytest_configure(config): - config.addinivalue_line( - 'markers', 'docker: mark tests for lone or manually orchestrated containers' - ) - config.addinivalue_line( - 'markers', 'compose: mark tests for docker compose projects' - ) + config.addinivalue_line("markers", "docker: mark tests for lone or manually orchestrated containers") + config.addinivalue_line("markers", "compose: mark tests for docker compose projects") diff --git a/docker/test/tests/test_agent.py b/docker/test/tests/test_agent.py index e55d11af850..aec1bbdaae8 100644 --- a/docker/test/tests/test_agent.py +++ b/docker/test/tests/test_agent.py @@ -10,12 +10,12 @@ def test_no_agent(crowdsec, flavor): """Test DISABLE_AGENT=true""" env = { - 'DISABLE_AGENT': 'true', + "DISABLE_AGENT": "true", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*CrowdSec Local API listening on *:8080*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -24,23 +24,25 @@ def test_no_agent(crowdsec, flavor): def test_machine_register(crowdsec, flavor, tmp_path_factory): """A local agent is always registered for use by cscli""" - data_dir = tmp_path_factory.mktemp('data') + data_dir = tmp_path_factory.mktemp("data") env = { - 'DISABLE_AGENT': 'true', + "DISABLE_AGENT": "true", } volumes = { - data_dir: {'bind': '/var/lib/crowdsec/data', 'mode': 'rw'}, + data_dir: {"bind": "/var/lib/crowdsec/data", "mode": "rw"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Generate local agent credentials*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -48,27 +50,31 @@ def test_machine_register(crowdsec, flavor, tmp_path_factory): # The local agent is not registered, because we didn't persist local_api_credentials.yaml with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Generate local agent credentials*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - config_dir = tmp_path_factory.mktemp('config') + config_dir = tmp_path_factory.mktemp("config") - volumes[config_dir] = {'bind': '/etc/crowdsec', 'mode': 'rw'} + volumes[config_dir] = {"bind": "/etc/crowdsec", "mode": "rw"} with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Generate local agent credentials*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -76,12 +82,14 @@ def test_machine_register(crowdsec, flavor, tmp_path_factory): # The local agent is now already registered with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ + cs.wait_for_log( + [ "*Local agent already registered*", "*CrowdSec Local API listening on *:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout diff --git a/docker/test/tests/test_agent_only.py b/docker/test/tests/test_agent_only.py index 038b726e324..4e1689e0b9b 100644 --- a/docker/test/tests/test_agent_only.py +++ b/docker/test/tests/test_agent_only.py @@ -1,7 +1,7 @@ #!/usr/bin/env python -from http import HTTPStatus import random +from http import HTTPStatus import pytest @@ -10,19 +10,19 @@ def test_split_lapi_agent(crowdsec, flavor): rand = str(random.randint(0, 10000)) - lapiname = f'lapi-{rand}' - agentname = f'agent-{rand}' + lapiname = f"lapi-{rand}" + agentname = f"agent-{rand}" lapi_env = { - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", } agent_env = { - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', - 'DISABLE_LOCAL_API': 'true', - 'LOCAL_API_URL': f'http://{lapiname}:8080', + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", + "DISABLE_LOCAL_API": "true", + "LOCAL_API_URL": f"http://{lapiname}:8080", } cs_lapi = crowdsec(name=lapiname, environment=lapi_env, flavor=flavor) @@ -30,10 +30,10 @@ def test_split_lapi_agent(crowdsec, flavor): with cs_lapi as lapi: lapi.wait_for_log("*CrowdSec Local API listening on *:8080*") - lapi.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) + lapi.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout diff --git a/docker/test/tests/test_bouncer.py b/docker/test/tests/test_bouncer.py index 98b86de858c..d87aff734c5 100644 --- a/docker/test/tests/test_bouncer.py +++ b/docker/test/tests/test_bouncer.py @@ -5,8 +5,8 @@ """ import hashlib -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -21,36 +21,33 @@ def hex512(s): def test_register_bouncer_env(crowdsec, flavor): """Test installing bouncers at startup, from envvar""" - env = { - 'BOUNCER_KEY_bouncer1name': 'bouncer1key', - 'BOUNCER_KEY_bouncer2name': 'bouncer2key' - } + env = {"BOUNCER_KEY_bouncer1name": "bouncer1key", "BOUNCER_KEY_bouncer2name": "bouncer2key"} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli bouncers list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli bouncers list -o json") assert res.exit_code == 0 j = json.loads(res.output) assert len(j) == 2 bouncer1, bouncer2 = j - assert bouncer1['name'] == 'bouncer1name' - assert bouncer2['name'] == 'bouncer2name' + assert bouncer1["name"] == "bouncer1name" + assert bouncer2["name"] == "bouncer2name" # add a second bouncer at runtime - res = cs.cont.exec_run('cscli bouncers add bouncer3name -k bouncer3key') + res = cs.cont.exec_run("cscli bouncers add bouncer3name -k bouncer3key") assert res.exit_code == 0 - res = cs.cont.exec_run('cscli bouncers list -o json') + res = cs.cont.exec_run("cscli bouncers list -o json") assert res.exit_code == 0 j = json.loads(res.output) assert len(j) == 3 bouncer3 = j[2] - assert bouncer3['name'] == 'bouncer3name' + assert bouncer3["name"] == "bouncer3name" # remove all bouncers - res = cs.cont.exec_run('cscli bouncers delete bouncer1name bouncer2name bouncer3name') + res = cs.cont.exec_run("cscli bouncers delete bouncer1name bouncer2name bouncer3name") assert res.exit_code == 0 - res = cs.cont.exec_run('cscli bouncers list -o json') + res = cs.cont.exec_run("cscli bouncers list -o json") assert res.exit_code == 0 j = json.loads(res.output) assert len(j) == 0 diff --git a/docker/test/tests/test_capi.py b/docker/test/tests/test_capi.py index 08b3a70471e..ad25f7a766f 100644 --- a/docker/test/tests/test_capi.py +++ b/docker/test/tests/test_capi.py @@ -3,6 +3,7 @@ from http import HTTPStatus import pytest + pytestmark = pytest.mark.docker @@ -10,13 +11,13 @@ def test_no_capi(crowdsec, flavor): """Test no CAPI (disabled by default in tests)""" env = { - 'DISABLE_ONLINE_API': 'true', + "DISABLE_ONLINE_API": "true", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli capi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli capi status") assert res.exit_code == 1 assert "You can successfully interact with Central API (CAPI)" not in res.output.decode() @@ -29,17 +30,19 @@ def test_capi(crowdsec, flavor): """Test CAPI""" env = { - 'DISABLE_ONLINE_API': 'false', + "DISABLE_ONLINE_API": "false", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli capi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli capi status") assert res.exit_code == 0 assert "You can successfully interact with Central API (CAPI)" in res.output.decode() - cs.wait_for_log([ - "*Successfully registered to Central API (CAPI)*", - "*Registration to online API done*", - ]) + cs.wait_for_log( + [ + "*Successfully registered to Central API (CAPI)*", + "*Registration to online API done*", + ] + ) diff --git a/docker/test/tests/test_capi_whitelists.py b/docker/test/tests/test_capi_whitelists.py index 19378ba86f0..6cdd5f401f5 100644 --- a/docker/test/tests/test_capi_whitelists.py +++ b/docker/test/tests/test_capi_whitelists.py @@ -1,32 +1,32 @@ #!/usr/bin/env python from http import HTTPStatus -import yaml import pytest +import yaml pytestmark = pytest.mark.docker -def test_capi_whitelists(crowdsec, tmp_path_factory, flavor,): +def test_capi_whitelists( + crowdsec, + tmp_path_factory, + flavor, +): """Test CAPI_WHITELISTS_PATH""" - env = { - "CAPI_WHITELISTS_PATH": "/path/to/whitelists.yaml" - } + env = {"CAPI_WHITELISTS_PATH": "/path/to/whitelists.yaml"} whitelists = tmp_path_factory.mktemp("whitelists") with open(whitelists / "whitelists.yaml", "w") as f: yaml.dump({"ips": ["1.2.3.4", "2.3.4.5"], "cidrs": ["1.2.3.0/24"]}, f) - volumes = { - whitelists / "whitelists.yaml": {"bind": "/path/to/whitelists.yaml", "mode": "ro"} - } + volumes = {whitelists / "whitelists.yaml": {"bind": "/path/to/whitelists.yaml", "mode": "ro"}} with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show-yaml') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show-yaml") assert res.exit_code == 0 stdout = res.output.decode() y = yaml.safe_load(stdout) - assert y['api']['server']['capi_whitelists_path'] == '/path/to/whitelists.yaml' + assert y["api"]["server"]["capi_whitelists_path"] == "/path/to/whitelists.yaml" diff --git a/docker/test/tests/test_cold_logs.py b/docker/test/tests/test_cold_logs.py index 6f6c578ebe0..2eb3248ffd7 100644 --- a/docker/test/tests/test_cold_logs.py +++ b/docker/test/tests/test_cold_logs.py @@ -2,16 +2,15 @@ import datetime -from pytest_cs import Status - import pytest +from pytest_cs import Status pytestmark = pytest.mark.docker def test_cold_logs(crowdsec, tmp_path_factory, flavor): env = { - 'DSN': 'file:///var/log/toto.log', + "DSN": "file:///var/log/toto.log", } logs = tmp_path_factory.mktemp("logs") @@ -20,11 +19,11 @@ def test_cold_logs(crowdsec, tmp_path_factory, flavor): with open(logs / "toto.log", "w") as f: # like date '+%b %d %H:%M:%S' but in python for i in range(10): - ts = (now + datetime.timedelta(seconds=i)).strftime('%b %d %H:%M:%S') - f.write(ts + ' sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424\n') + ts = (now + datetime.timedelta(seconds=i)).strftime("%b %d %H:%M:%S") + f.write(ts + " sd-126005 sshd[12422]: Invalid user netflix from 1.1.1.172 port 35424\n") volumes = { - logs / "toto.log": {'bind': '/var/log/toto.log', 'mode': 'ro'}, + logs / "toto.log": {"bind": "/var/log/toto.log", "mode": "ro"}, } # missing type @@ -32,20 +31,22 @@ def test_cold_logs(crowdsec, tmp_path_factory, flavor): with crowdsec(flavor=flavor, environment=env, volumes=volumes, wait_status=Status.EXITED) as cs: cs.wait_for_log("*-dsn requires a -type argument*") - env['TYPE'] = 'syslog' + env["TYPE"] = "syslog" with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: - cs.wait_for_log([ - "*Adding file /var/log/toto.log to filelist*", - "*reading /var/log/toto.log at once*", - "*Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 5s)*", - "*crowdsec shutdown*" - ]) + cs.wait_for_log( + [ + "*Adding file /var/log/toto.log to filelist*", + "*reading /var/log/toto.log at once*", + "*Ip 1.1.1.172 performed 'crowdsecurity/ssh-bf' (6 events over 5s)*", + "*crowdsec shutdown*", + ] + ) def test_cold_logs_missing_dsn(crowdsec, flavor): env = { - 'TYPE': 'syslog', + "TYPE": "syslog", } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: diff --git a/docker/test/tests/test_flavors.py b/docker/test/tests/test_flavors.py index 7e78b8d681b..a48fe428c7b 100644 --- a/docker/test/tests/test_flavors.py +++ b/docker/test/tests/test_flavors.py @@ -15,8 +15,8 @@ def test_cscli_lapi(crowdsec, flavor): """Test if cscli can talk to lapi""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -27,35 +27,34 @@ def test_flavor_content(crowdsec, flavor): """Test flavor contents""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - x = cs.cont.exec_run('ls -1 /var/lib/crowdsec/data/') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + x = cs.cont.exec_run("ls -1 /var/lib/crowdsec/data/") assert x.exit_code == 0 stdout = x.output.decode() - if 'slim' in flavor or 'plugins' in flavor: - assert 'GeoLite2-City.mmdb' not in stdout - assert 'GeoLite2-ASN.mmdb' not in stdout + if "slim" in flavor or "plugins" in flavor: + assert "GeoLite2-City.mmdb" not in stdout + assert "GeoLite2-ASN.mmdb" not in stdout else: - assert 'GeoLite2-City.mmdb' in stdout - assert 'GeoLite2-ASN.mmdb' in stdout - assert 'crowdsec.db' in stdout + assert "GeoLite2-City.mmdb" in stdout + assert "GeoLite2-ASN.mmdb" in stdout + assert "crowdsec.db" in stdout - x = cs.cont.exec_run( - 'ls -1 /usr/local/lib/crowdsec/plugins/') + x = cs.cont.exec_run("ls -1 /usr/local/lib/crowdsec/plugins/") stdout = x.output.decode() - if 'slim' in flavor: + if "slim" in flavor: # the exact return code and full message depend # on the 'ls' implementation (busybox vs coreutils) assert x.exit_code != 0 - assert 'No such file or directory' in stdout - assert 'notification-email' not in stdout - assert 'notification-http' not in stdout - assert 'notification-slack' not in stdout - assert 'notification-splunk' not in stdout - assert 'notification-sentinel' not in stdout + assert "No such file or directory" in stdout + assert "notification-email" not in stdout + assert "notification-http" not in stdout + assert "notification-slack" not in stdout + assert "notification-splunk" not in stdout + assert "notification-sentinel" not in stdout else: assert x.exit_code == 0 - assert 'notification-email' in stdout - assert 'notification-http' in stdout - assert 'notification-slack' in stdout - assert 'notification-splunk' in stdout - assert 'notification-sentinel' in stdout + assert "notification-email" in stdout + assert "notification-http" in stdout + assert "notification-slack" in stdout + assert "notification-splunk" in stdout + assert "notification-sentinel" in stdout diff --git a/docker/test/tests/test_hello.py b/docker/test/tests/test_hello.py index a21fde85044..a3ff4f07a93 100644 --- a/docker/test/tests/test_hello.py +++ b/docker/test/tests/test_hello.py @@ -13,24 +13,23 @@ def test_docker_cli_run(): """Test if docker run works from the command line. Capture stdout too""" - res = subprocess.run(['docker', 'run', '--rm', 'hello-world'], - capture_output=True, text=True) + res = subprocess.run(["docker", "run", "--rm", "hello-world"], capture_output=True, text=True) assert 0 == res.returncode - assert 'Hello from Docker!' in res.stdout + assert "Hello from Docker!" in res.stdout def test_docker_run(docker_client): """Test if docker run works from the python SDK.""" - output = docker_client.containers.run('hello-world', remove=True) + output = docker_client.containers.run("hello-world", remove=True) lines = output.decode().splitlines() assert "Hello from Docker!" in lines def test_docker_run_detach(docker_client): """Test with python SDK (async).""" - cont = docker_client.containers.run('hello-world', detach=True) - assert cont.status == 'created' - assert cont.attrs['State']['ExitCode'] == 0 + cont = docker_client.containers.run("hello-world", detach=True) + assert cont.status == "created" + assert cont.attrs["State"]["ExitCode"] == 0 lines = cont.logs().decode().splitlines() assert "Hello from Docker!" in lines cont.remove(force=True) diff --git a/docker/test/tests/test_hub.py b/docker/test/tests/test_hub.py index 2365e3a9cef..a7134fcb5c8 100644 --- a/docker/test/tests/test_hub.py +++ b/docker/test/tests/test_hub.py @@ -4,8 +4,8 @@ Test pre-installed hub items. """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -16,12 +16,12 @@ def test_preinstalled_hub(crowdsec, flavor): """Test hub objects installed in the entrypoint""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli hub list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli hub list -o json", stderr=False) assert res.exit_code == 0 j = json.loads(res.output) - collections = {c['name']: c for c in j['collections']} - assert collections['crowdsecurity/linux']['status'] == 'enabled' - parsers = {c['name']: c for c in j['parsers']} - assert parsers['crowdsecurity/whitelists']['status'] == 'enabled' - assert parsers['crowdsecurity/docker-logs']['status'] == 'enabled' + collections = {c["name"]: c for c in j["collections"]} + assert collections["crowdsecurity/linux"]["status"] == "enabled" + parsers = {c["name"]: c for c in j["parsers"]} + assert parsers["crowdsecurity/whitelists"]["status"] == "enabled" + assert parsers["crowdsecurity/docker-logs"]["status"] == "enabled" diff --git a/docker/test/tests/test_hub_collections.py b/docker/test/tests/test_hub_collections.py index 962f8ff8df4..71fa698af06 100644 --- a/docker/test/tests/test_hub_collections.py +++ b/docker/test/tests/test_hub_collections.py @@ -4,8 +4,8 @@ Test collection management """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -14,101 +14,98 @@ def test_install_two_collections(crowdsec, flavor): """Test installing collections at startup""" - it1 = 'crowdsecurity/apache2' - it2 = 'crowdsecurity/asterisk' - env = { - 'COLLECTIONS': f'{it1} {it2}' - } + it1 = "crowdsecurity/apache2" + it2 = "crowdsecurity/asterisk" + env = {"COLLECTIONS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['collections']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' - cs.wait_for_log([ - # f'*collections install "{it1}"*' - # f'*collections install "{it2}"*' - f'*Enabled collections: {it1}*', - f'*Enabled collections: {it2}*', - ]) + items = {c["name"]: c for c in j["collections"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" + cs.wait_for_log( + [ + f"*enabling collections:{it1}*", + f"*enabling collections:{it2}*", + ] + ) def test_disable_collection(crowdsec, flavor): """Test removing a pre-installed collection at startup""" - it = 'crowdsecurity/linux' - env = { - 'DISABLE_COLLECTIONS': it - } + it = "crowdsecurity/linux" + env = {"DISABLE_COLLECTIONS": it} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['collections']} + items = {c["name"] for c in j["collections"]} assert it not in items - cs.wait_for_log([ - # f'*collections remove "{it}*", - f'*Removed symlink [[]{it}[]]*', - ]) + cs.wait_for_log( + [ + f"*disabling collections:{it}*", + ] + ) def test_install_and_disable_collection(crowdsec, flavor): """Declare a collection to install AND disable: disable wins""" - it = 'crowdsecurity/apache2' + it = "crowdsecurity/apache2" env = { - 'COLLECTIONS': it, - 'DISABLE_COLLECTIONS': it, + "COLLECTIONS": it, + "DISABLE_COLLECTIONS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['collections']} + items = {c["name"] for c in j["collections"]} assert it not in items logs = cs.log_lines() # check that there was no attempt to install - assert not any(f'Enabled collections: {it}' in line for line in logs) + assert not any(f"enabling collections:{it}" in line for line in logs) # already done in bats, prividing here as example of a somewhat complex test def test_taint_bubble_up(crowdsec, tmp_path_factory, flavor): - coll = 'crowdsecurity/nginx' - env = { - 'COLLECTIONS': f'{coll}' - } + coll = "crowdsecurity/nginx" + env = {"COLLECTIONS": f"{coll}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli collections list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['collections']} + items = {c["name"]: c for c in j["collections"]} # implicit check for tainted=False - assert items[coll]['status'] == 'enabled' - cs.wait_for_log([ - f'*Enabled collections: {coll}*', - ]) + assert items[coll]["status"] == "enabled" + cs.wait_for_log( + [ + f"*enabling collections:{coll}*", + ] + ) - scenario = 'crowdsecurity/http-crawl-non_statics' + scenario = "crowdsecurity/http-crawl-non_statics" # the description won't be read back, it's from the index yq_command = f"yq -e -i '.description=\"tainted\"' /etc/crowdsec/hub/scenarios/{scenario}.yaml" res = cs.cont.exec_run(yq_command) assert res.exit_code == 0 - res = cs.cont.exec_run(f'cscli scenarios inspect {scenario} -o json') + res = cs.cont.exec_run(f"cscli scenarios inspect {scenario} -o json") assert res.exit_code == 0 j = json.loads(res.output) - assert j['tainted'] is True + assert j["tainted"] is True - res = cs.cont.exec_run('cscli collections list -o json') + res = cs.cont.exec_run("cscli collections list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['collections']} - assert items['crowdsecurity/nginx']['status'] == 'enabled,tainted' - assert items['crowdsecurity/base-http-scenarios']['status'] == 'enabled,tainted' + items = {c["name"]: c for c in j["collections"]} + assert items["crowdsecurity/nginx"]["status"] == "enabled,tainted" + assert items["crowdsecurity/base-http-scenarios"]["status"] == "enabled,tainted" diff --git a/docker/test/tests/test_hub_parsers.py b/docker/test/tests/test_hub_parsers.py index 8cfaeecf94c..42794d20b42 100644 --- a/docker/test/tests/test_hub_parsers.py +++ b/docker/test/tests/test_hub_parsers.py @@ -4,8 +4,8 @@ Test parser management """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -14,60 +14,54 @@ def test_install_two_parsers(crowdsec, flavor): """Test installing parsers at startup""" - it1 = 'crowdsecurity/cpanel-logs' - it2 = 'crowdsecurity/cowrie-logs' - env = { - 'PARSERS': f'{it1} {it2}' - } + it1 = "crowdsecurity/cpanel-logs" + it2 = "crowdsecurity/cowrie-logs" + env = {"PARSERS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*parsers install "{it1}"*', - f'*parsers install "{it2}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli parsers list -o json') + cs.wait_for_log([f'*parsers install "{it1}"*', f'*parsers install "{it2}"*', "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['parsers']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' + items = {c["name"]: c for c in j["parsers"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" # XXX check that the parser is preinstalled by default def test_disable_parser(crowdsec, flavor): """Test removing a pre-installed parser at startup""" - it = 'crowdsecurity/whitelists' - env = { - 'DISABLE_PARSERS': it - } + it = "crowdsecurity/whitelists" + env = {"DISABLE_PARSERS": it} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*parsers remove "{it}"*', - "*Starting processing data*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli parsers list -o json') + cs.wait_for_log( + [ + f'*parsers remove "{it}"*', + "*Starting processing data*", + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['parsers']} + items = {c["name"] for c in j["parsers"]} assert it not in items def test_install_and_disable_parser(crowdsec, flavor): """Declare a parser to install AND disable: disable wins""" - it = 'crowdsecurity/cpanel-logs' + it = "crowdsecurity/cpanel-logs" env = { - 'PARSERS': it, - 'DISABLE_PARSERS': it, + "PARSERS": it, + "DISABLE_PARSERS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli parsers list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['parsers']} + items = {c["name"] for c in j["parsers"]} assert it not in items logs = cs.log_lines() # check that there was no attempt to install diff --git a/docker/test/tests/test_hub_postoverflows.py b/docker/test/tests/test_hub_postoverflows.py index 80fdbc2b7bd..69f383cda24 100644 --- a/docker/test/tests/test_hub_postoverflows.py +++ b/docker/test/tests/test_hub_postoverflows.py @@ -4,8 +4,9 @@ Test postoverflow management """ -from http import HTTPStatus import json +from http import HTTPStatus + import pytest pytestmark = pytest.mark.docker @@ -13,24 +14,20 @@ def test_install_two_postoverflows(crowdsec, flavor): """Test installing postoverflows at startup""" - it1 = 'crowdsecurity/cdn-whitelist' - it2 = 'crowdsecurity/ipv6_to_range' - env = { - 'POSTOVERFLOWS': f'{it1} {it2}' - } + it1 = "crowdsecurity/cdn-whitelist" + it2 = "crowdsecurity/ipv6_to_range" + env = {"POSTOVERFLOWS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*postoverflows install "{it1}"*', - f'*postoverflows install "{it2}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli postoverflows list -o json') + cs.wait_for_log( + [f'*postoverflows install "{it1}"*', f'*postoverflows install "{it2}"*', "*Starting processing data*"] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli postoverflows list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['postoverflows']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' + items = {c["name"]: c for c in j["postoverflows"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" def test_disable_postoverflow(): @@ -40,18 +37,18 @@ def test_disable_postoverflow(): def test_install_and_disable_postoverflow(crowdsec, flavor): """Declare a postoverflow to install AND disable: disable wins""" - it = 'crowdsecurity/cdn-whitelist' + it = "crowdsecurity/cdn-whitelist" env = { - 'POSTOVERFLOWS': it, - 'DISABLE_POSTOVERFLOWS': it, + "POSTOVERFLOWS": it, + "DISABLE_POSTOVERFLOWS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli postoverflows list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli postoverflows list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['postoverflows']} + items = {c["name"] for c in j["postoverflows"]} assert it not in items logs = cs.log_lines() # check that there was no attempt to install diff --git a/docker/test/tests/test_hub_scenarios.py b/docker/test/tests/test_hub_scenarios.py index 2a8c3a275f2..4376a3ce64a 100644 --- a/docker/test/tests/test_hub_scenarios.py +++ b/docker/test/tests/test_hub_scenarios.py @@ -4,8 +4,8 @@ Test scenario management """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -14,59 +14,48 @@ def test_install_two_scenarios(crowdsec, flavor): """Test installing scenarios at startup""" - it1 = 'crowdsecurity/cpanel-bf-attempt' - it2 = 'crowdsecurity/asterisk_bf' - env = { - 'SCENARIOS': f'{it1} {it2}' - } + it1 = "crowdsecurity/cpanel-bf-attempt" + it2 = "crowdsecurity/asterisk_bf" + env = {"SCENARIOS": f"{it1} {it2}"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*scenarios install "{it1}"*', - f'*scenarios install "{it2}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli scenarios list -o json') + cs.wait_for_log([f'*scenarios install "{it1}"*', f'*scenarios install "{it2}"*', "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli scenarios list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['scenarios']} - assert items[it1]['status'] == 'enabled' - assert items[it2]['status'] == 'enabled' + items = {c["name"]: c for c in j["scenarios"]} + assert items[it1]["status"] == "enabled" + assert items[it2]["status"] == "enabled" def test_disable_scenario(crowdsec, flavor): """Test removing a pre-installed scenario at startup""" - it = 'crowdsecurity/ssh-bf' - env = { - 'DISABLE_SCENARIOS': it - } + it = "crowdsecurity/ssh-bf" + env = {"DISABLE_SCENARIOS": it} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - f'*scenarios remove "{it}"*', - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli scenarios list -o json') + cs.wait_for_log([f'*scenarios remove "{it}"*', "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli scenarios list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['scenarios']} + items = {c["name"] for c in j["scenarios"]} assert it not in items def test_install_and_disable_scenario(crowdsec, flavor): """Declare a scenario to install AND disable: disable wins""" - it = 'crowdsecurity/asterisk_bf' + it = "crowdsecurity/asterisk_bf" env = { - 'SCENARIOS': it, - 'DISABLE_SCENARIOS': it, + "SCENARIOS": it, + "DISABLE_SCENARIOS": it, } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli scenarios list -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli scenarios list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name'] for c in j['scenarios']} + items = {c["name"] for c in j["scenarios"]} assert it not in items logs = cs.cont.logs().decode().splitlines() # check that there was no attempt to install diff --git a/docker/test/tests/test_local_api_url.py b/docker/test/tests/test_local_api_url.py index aa90c9fb798..e38af3fedbe 100644 --- a/docker/test/tests/test_local_api_url.py +++ b/docker/test/tests/test_local_api_url.py @@ -10,12 +10,9 @@ def test_local_api_url_default(crowdsec, flavor): """Test LOCAL_API_URL (default)""" with crowdsec(flavor=flavor) as cs: - cs.wait_for_log([ - "*CrowdSec Local API listening on *:8080*", - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_log(["*CrowdSec Local API listening on *:8080*", "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "on http://0.0.0.0:8080/" in stdout @@ -24,16 +21,11 @@ def test_local_api_url_default(crowdsec, flavor): def test_local_api_url(crowdsec, flavor): """Test LOCAL_API_URL (custom)""" - env = { - "LOCAL_API_URL": "http://127.0.0.1:8080" - } + env = {"LOCAL_API_URL": "http://127.0.0.1:8080"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - "*CrowdSec Local API listening on *:8080*", - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_log(["*CrowdSec Local API listening on *:8080*", "*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "on http://127.0.0.1:8080/" in stdout @@ -48,16 +40,16 @@ def test_local_api_url_ipv6(crowdsec, flavor): # FIXME: https://forums.docker.com/t/assigning-default-ipv6-addresses/128665/3 # FIXME: https://github.com/moby/moby/issues/41438 - env = { - "LOCAL_API_URL": "http://[::1]:8080" - } + env = {"LOCAL_API_URL": "http://[::1]:8080"} with crowdsec(flavor=flavor, environment=env) as cs: - cs.wait_for_log([ - "*Starting processing data*", - "*CrowdSec Local API listening on [::1]:8080*", - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli lapi status') + cs.wait_for_log( + [ + "*Starting processing data*", + "*CrowdSec Local API listening on [::1]:8080*", + ] + ) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "on http://[::1]:8080/" in stdout diff --git a/docker/test/tests/test_local_item.py b/docker/test/tests/test_local_item.py index 3d6ac2fc954..e4c8e3c165a 100644 --- a/docker/test/tests/test_local_item.py +++ b/docker/test/tests/test_local_item.py @@ -4,8 +4,8 @@ Test bind-mounting local items """ -from http import HTTPStatus import json +from http import HTTPStatus import pytest @@ -15,33 +15,29 @@ def test_inject_local_item(crowdsec, tmp_path_factory, flavor): """Test mounting a custom whitelist at startup""" - localitems = tmp_path_factory.mktemp('localitems') - custom_whitelists = localitems / 'custom_whitelists.yaml' + localitems = tmp_path_factory.mktemp("localitems") + custom_whitelists = localitems / "custom_whitelists.yaml" - with open(custom_whitelists, 'w') as f: + with open(custom_whitelists, "w") as f: f.write('{"whitelist":{"reason":"Good IPs","ip":["1.2.3.4"]}}') - volumes = { - custom_whitelists: {'bind': '/etc/crowdsec/parsers/s02-enrich/custom_whitelists.yaml'} - } + volumes = {custom_whitelists: {"bind": "/etc/crowdsec/parsers/s02-enrich/custom_whitelists.yaml"}} with crowdsec(flavor=flavor, volumes=volumes) as cs: - cs.wait_for_log([ - "*Starting processing data*" - ]) - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) + cs.wait_for_log(["*Starting processing data*"]) + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) # the parser should be enabled - res = cs.cont.exec_run('cscli parsers list -o json') + res = cs.cont.exec_run("cscli parsers list -o json") assert res.exit_code == 0 j = json.loads(res.output) - items = {c['name']: c for c in j['parsers']} - assert items['custom_whitelists.yaml']['status'] == 'enabled,local' + items = {c["name"]: c for c in j["parsers"]} + assert items["custom_whitelists.yaml"]["status"] == "enabled,local" # regression test: the linux collection should not be tainted # (the parsers were not copied from /staging when using "cp -an" with local parsers) - res = cs.cont.exec_run('cscli collections inspect crowdsecurity/linux -o json') + res = cs.cont.exec_run("cscli collections inspect crowdsecurity/linux -o json") assert res.exit_code == 0 j = json.loads(res.output) # crowdsec <= 1.5.5 omits a "tainted" when it's false - assert j.get('tainted', False) is False + assert j.get("tainted", False) is False diff --git a/docker/test/tests/test_metrics.py b/docker/test/tests/test_metrics.py index 8a6d5318156..bd41bdcea41 100644 --- a/docker/test/tests/test_metrics.py +++ b/docker/test/tests/test_metrics.py @@ -12,12 +12,12 @@ def test_metrics_port_default(crowdsec, flavor): metrics_port = 6060 with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - cs.wait_for_http(metrics_port, '/metrics', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://127.0.0.1:{metrics_port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + cs.wait_for_http(metrics_port, "/metrics", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://127.0.0.1:{metrics_port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout @@ -25,15 +25,15 @@ def test_metrics_port_default(crowdsec, flavor): def test_metrics_port_default_ipv6(crowdsec, flavor): """Test metrics (ipv6)""" - pytest.skip('ipv6 not supported yet') + pytest.skip("ipv6 not supported yet") port = 6060 with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://[::1]:{port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://[::1]:{port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout @@ -42,16 +42,14 @@ def test_metrics_port_default_ipv6(crowdsec, flavor): def test_metrics_port(crowdsec, flavor): """Test metrics (custom METRICS_PORT)""" port = 7070 - env = { - "METRICS_PORT": port - } + env = {"METRICS_PORT": port} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://127.0.0.1:{port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://127.0.0.1:{port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout @@ -59,18 +57,16 @@ def test_metrics_port(crowdsec, flavor): def test_metrics_port_ipv6(crowdsec, flavor): """Test metrics (custom METRICS_PORT, ipv6)""" - pytest.skip('ipv6 not supported yet') + pytest.skip("ipv6 not supported yet") port = 7070 - env = { - "METRICS_PORT": port - } + env = {"METRICS_PORT": port} with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run(f'wget -O - http://[::1]:{port}/metrics') - if 'executable file not found' in res.output.decode(): + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run(f"wget -O - http://[::1]:{port}/metrics") + if "executable file not found" in res.output.decode(): # TODO: find an alternative to wget - pytest.skip('wget not found') + pytest.skip("wget not found") assert res.exit_code == 0 stdout = res.output.decode() assert "# HELP cs_info Information about Crowdsec." in stdout diff --git a/docker/test/tests/test_nolapi.py b/docker/test/tests/test_nolapi.py index 6edb354fe75..e5dbc3c2624 100644 --- a/docker/test/tests/test_nolapi.py +++ b/docker/test/tests/test_nolapi.py @@ -1,8 +1,7 @@ #!/usr/bin/env python -from pytest_cs import Status - import pytest +from pytest_cs import Status pytestmark = pytest.mark.docker @@ -10,7 +9,7 @@ def test_no_agent(crowdsec, flavor): """Test DISABLE_LOCAL_API=true (failing stand-alone container)""" env = { - 'DISABLE_LOCAL_API': 'true', + "DISABLE_LOCAL_API": "true", } # if an alternative lapi url is not defined, the container should exit diff --git a/docker/test/tests/test_simple.py b/docker/test/tests/test_simple.py index 951d8be4b24..b5c8425b371 100644 --- a/docker/test/tests/test_simple.py +++ b/docker/test/tests/test_simple.py @@ -13,4 +13,4 @@ def test_crowdsec(crowdsec, flavor): matcher.fnmatch_lines(["*Starting processing data*"]) res = cs.cont.exec_run('sh -c "echo $CI_TESTING"') assert res.exit_code == 0 - assert 'true' == res.output.decode().strip() + assert "true" == res.output.decode().strip() diff --git a/docker/test/tests/test_tls.py b/docker/test/tests/test_tls.py index d2f512fcbc1..220738a9f07 100644 --- a/docker/test/tests/test_tls.py +++ b/docker/test/tests/test_tls.py @@ -6,9 +6,8 @@ import uuid -from pytest_cs import Status - import pytest +from pytest_cs import Status pytestmark = pytest.mark.docker @@ -17,8 +16,8 @@ def test_missing_key_file(crowdsec, flavor): """Test that cscli and agent can communicate to LAPI with TLS""" env = { - 'CERT_FILE': '/etc/ssl/crowdsec/cert.pem', - 'USE_TLS': 'true', + "CERT_FILE": "/etc/ssl/crowdsec/cert.pem", + "USE_TLS": "true", } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: @@ -29,8 +28,8 @@ def test_missing_cert_file(crowdsec, flavor): """Test that cscli and agent can communicate to LAPI with TLS""" env = { - 'KEY_FILE': '/etc/ssl/crowdsec/cert.key', - 'USE_TLS': 'true', + "KEY_FILE": "/etc/ssl/crowdsec/cert.key", + "USE_TLS": "true", } with crowdsec(flavor=flavor, environment=env, wait_status=Status.EXITED) as cs: @@ -41,14 +40,14 @@ def test_tls_missing_ca(crowdsec, flavor, certs_dir): """Missing CA cert, unknown authority""" env = { - 'CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes, wait_status=Status.EXITED) as cs: @@ -59,22 +58,22 @@ def test_tls_legacy_var(crowdsec, flavor, certs_dir): """Test server-only certificate, legacy variables""" env = { - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") # TODO: wait_for_https - cs.wait_for_http(8080, '/health', want_status=None) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=None) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -84,24 +83,24 @@ def test_tls_mutual_monolith(crowdsec, flavor, certs_dir): """Server and client certificates, on the same container""" env = { - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'CLIENT_CERT_FILE': '/etc/ssl/crowdsec/agent.crt', - 'CLIENT_KEY_FILE': '/etc/ssl/crowdsec/agent.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "CLIENT_CERT_FILE": "/etc/ssl/crowdsec/agent.crt", + "CLIENT_KEY_FILE": "/etc/ssl/crowdsec/agent.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") # TODO: wait_for_https - cs.wait_for_http(8080, '/health', want_status=None) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=None) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -111,26 +110,27 @@ def test_tls_lapi_var(crowdsec, flavor, certs_dir): """Test server-only certificate, lapi variables""" env = { - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'USE_TLS': 'true', - 'LOCAL_API_URL': 'https://localhost:8080', + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "USE_TLS": "true", + "LOCAL_API_URL": "https://localhost:8080", } volumes = { - certs_dir(lapi_hostname='lapi'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname="lapi"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } with crowdsec(flavor=flavor, environment=env, volumes=volumes) as cs: cs.wait_for_log("*Starting processing data*") # TODO: wait_for_https - cs.wait_for_http(8080, '/health', want_status=None) - x = cs.cont.exec_run('cscli lapi status') + cs.wait_for_http(8080, "/health", want_status=None) + x = cs.cont.exec_run("cscli lapi status") assert x.exit_code == 0 stdout = x.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout + # TODO: bad lapi hostname # the cert is valid, but has a CN that doesn't match the hostname # we must set insecure_skip_verify to true to use it @@ -140,50 +140,49 @@ def test_tls_split_lapi_agent(crowdsec, flavor, certs_dir): """Server-only certificate, split containers""" rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) lapi_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', - 'LOCAL_API_URL': 'https://localhost:8080', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", + "LOCAL_API_URL": "https://localhost:8080", } agent_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'AGENT_USERNAME': 'testagent', - 'AGENT_PASSWORD': 'testpassword', - 'LOCAL_API_URL': f'https://{lapiname}:8080', - 'DISABLE_LOCAL_API': 'true', - 'CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF': 'false', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "AGENT_USERNAME": "testagent", + "AGENT_PASSWORD": "testpassword", + "LOCAL_API_URL": f"https://{lapiname}:8080", + "DISABLE_LOCAL_API": "true", + "CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF": "false", } volumes = { - certs_dir(lapi_hostname=lapiname): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - res = lapi.cont.exec_run('cscli lapi status') + res = lapi.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -193,48 +192,47 @@ def test_tls_mutual_split_lapi_agent(crowdsec, flavor, certs_dir): """Server and client certificates, split containers""" rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) lapi_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'LOCAL_API_URL': 'https://localhost:8080', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "LOCAL_API_URL": "https://localhost:8080", } agent_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'CLIENT_CERT_FILE': '/etc/ssl/crowdsec/agent.crt', - 'CLIENT_KEY_FILE': '/etc/ssl/crowdsec/agent.key', - 'LOCAL_API_URL': f'https://{lapiname}:8080', - 'DISABLE_LOCAL_API': 'true', - 'CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF': 'false', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "CLIENT_CERT_FILE": "/etc/ssl/crowdsec/agent.crt", + "CLIENT_KEY_FILE": "/etc/ssl/crowdsec/agent.key", + "LOCAL_API_URL": f"https://{lapiname}:8080", + "DISABLE_LOCAL_API": "true", + "CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF": "false", } volumes = { - certs_dir(lapi_hostname=lapiname): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - res = lapi.cont.exec_run('cscli lapi status') + res = lapi.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout @@ -244,78 +242,78 @@ def test_tls_client_ou(crowdsec, flavor, certs_dir): """Check behavior of client certificate vs AGENTS_ALLOWED_OU""" rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) lapi_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'LAPI_CERT_FILE': '/etc/ssl/crowdsec/lapi.crt', - 'LAPI_KEY_FILE': '/etc/ssl/crowdsec/lapi.key', - 'LOCAL_API_URL': 'https://localhost:8080', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "LAPI_CERT_FILE": "/etc/ssl/crowdsec/lapi.crt", + "LAPI_KEY_FILE": "/etc/ssl/crowdsec/lapi.key", + "LOCAL_API_URL": "https://localhost:8080", } agent_env = { - 'USE_TLS': 'true', - 'CACERT_FILE': '/etc/ssl/crowdsec/ca.crt', - 'CLIENT_CERT_FILE': '/etc/ssl/crowdsec/agent.crt', - 'CLIENT_KEY_FILE': '/etc/ssl/crowdsec/agent.key', - 'LOCAL_API_URL': f'https://{lapiname}:8080', - 'DISABLE_LOCAL_API': 'true', - 'CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF': 'false', + "USE_TLS": "true", + "CACERT_FILE": "/etc/ssl/crowdsec/ca.crt", + "CLIENT_CERT_FILE": "/etc/ssl/crowdsec/agent.crt", + "CLIENT_KEY_FILE": "/etc/ssl/crowdsec/agent.key", + "LOCAL_API_URL": f"https://{lapiname}:8080", + "DISABLE_LOCAL_API": "true", + "CROWDSEC_FEATURE_DISABLE_HTTP_RETRY_BACKOFF": "false", } volumes = { - certs_dir(lapi_hostname=lapiname, agent_ou='custom-client-ou'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname, agent_ou="custom-client-ou"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: - lapi.wait_for_log([ - "*client certificate OU ?custom-client-ou? doesn't match expected OU ?agent-ou?*", - ]) + lapi.wait_for_log( + [ + "*client certificate OU ?custom-client-ou? doesn't match expected OU ?agent-ou?*", + ] + ) - lapi_env['AGENTS_ALLOWED_OU'] = 'custom-client-ou' + lapi_env["AGENTS_ALLOWED_OU"] = "custom-client-ou" # change container names to avoid conflict # recreate certificates because they need the new hostname rand = uuid.uuid1() - lapiname = 'lapi-' + str(rand) - agentname = 'agent-' + str(rand) + lapiname = "lapi-" + str(rand) + agentname = "agent-" + str(rand) - agent_env['LOCAL_API_URL'] = f'https://{lapiname}:8080' + agent_env["LOCAL_API_URL"] = f"https://{lapiname}:8080" volumes = { - certs_dir(lapi_hostname=lapiname, agent_ou='custom-client-ou'): {'bind': '/etc/ssl/crowdsec', 'mode': 'ro'}, + certs_dir(lapi_hostname=lapiname, agent_ou="custom-client-ou"): {"bind": "/etc/ssl/crowdsec", "mode": "ro"}, } cs_lapi = crowdsec(flavor=flavor, name=lapiname, environment=lapi_env, volumes=volumes) cs_agent = crowdsec(flavor=flavor, name=agentname, environment=agent_env, volumes=volumes) with cs_lapi as lapi: - lapi.wait_for_log([ - "*(tls) Client Auth Type set to VerifyClientCertIfGiven*", - "*CrowdSec Local API listening on *:8080*" - ]) + lapi.wait_for_log( + ["*(tls) Client Auth Type set to VerifyClientCertIfGiven*", "*CrowdSec Local API listening on *:8080*"] + ) # TODO: wait_for_https - lapi.wait_for_http(8080, '/health', want_status=None) + lapi.wait_for_http(8080, "/health", want_status=None) with cs_agent as agent: agent.wait_for_log("*Starting processing data*") - res = agent.cont.exec_run('cscli lapi status') + res = agent.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout - res = lapi.cont.exec_run('cscli lapi status') + res = lapi.cont.exec_run("cscli lapi status") assert res.exit_code == 0 stdout = res.output.decode() assert "You can successfully interact with Local API (LAPI)" in stdout diff --git a/docker/test/tests/test_version.py b/docker/test/tests/test_version.py index c152d2e4e6c..baac61c36ab 100644 --- a/docker/test/tests/test_version.py +++ b/docker/test/tests/test_version.py @@ -10,9 +10,9 @@ def test_version_docker_platform(crowdsec, flavor): for waiter in cs.log_waiters(): with waiter as matcher: matcher.fnmatch_lines(["*Starting processing data*"]) - res = cs.cont.exec_run('cscli version') + res = cs.cont.exec_run("cscli version") assert res.exit_code == 0 - assert 'Platform: docker' in res.output.decode() - res = cs.cont.exec_run('crowdsec -version') + assert "Platform: docker" in res.output.decode() + res = cs.cont.exec_run("crowdsec -version") assert res.exit_code == 0 - assert 'Platform: docker' in res.output.decode() + assert "Platform: docker" in res.output.decode() diff --git a/docker/test/tests/test_wal.py b/docker/test/tests/test_wal.py index e3edbcaf385..e1fe3d260be 100644 --- a/docker/test/tests/test_wal.py +++ b/docker/test/tests/test_wal.py @@ -11,8 +11,8 @@ def test_use_wal_default(crowdsec, flavor): """Test USE_WAL default""" with crowdsec(flavor=flavor) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show --key Config.DbConfig.UseWal -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show --key Config.DbConfig.UseWal -o json") assert res.exit_code == 0 stdout = res.output.decode() assert "false" in stdout @@ -21,12 +21,12 @@ def test_use_wal_default(crowdsec, flavor): def test_use_wal_true(crowdsec, flavor): """Test USE_WAL=true""" env = { - 'USE_WAL': 'true', + "USE_WAL": "true", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show --key Config.DbConfig.UseWal -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show --key Config.DbConfig.UseWal -o json") assert res.exit_code == 0 stdout = res.output.decode() assert "true" in stdout @@ -35,12 +35,12 @@ def test_use_wal_true(crowdsec, flavor): def test_use_wal_false(crowdsec, flavor): """Test USE_WAL=false""" env = { - 'USE_WAL': 'false', + "USE_WAL": "false", } with crowdsec(flavor=flavor, environment=env) as cs: cs.wait_for_log("*Starting processing data*") - cs.wait_for_http(8080, '/health', want_status=HTTPStatus.OK) - res = cs.cont.exec_run('cscli config show --key Config.DbConfig.UseWal -o json') + cs.wait_for_http(8080, "/health", want_status=HTTPStatus.OK) + res = cs.cont.exec_run("cscli config show --key Config.DbConfig.UseWal -o json") assert res.exit_code == 0 stdout = res.output.decode() assert "false" in stdout diff --git a/docker/test/uv.lock b/docker/test/uv.lock new file mode 100644 index 00000000000..d8cc42c89ab --- /dev/null +++ b/docker/test/uv.lock @@ -0,0 +1,587 @@ +version = 1 +requires-python = ">=3.12" + +[[package]] +name = "asttokens" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918 }, +] + +[[package]] +name = "certifi" +version = "2024.12.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/bd/1d41ee578ce09523c81a15426705dd20969f5abf006d1afe8aeff0dd776a/certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db", size = 166010 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/32/8f6669fc4798494966bf446c8c4a162e0b5d893dff088afddf76414f70e1/certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56", size = 164927 }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178 }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840 }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803 }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850 }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729 }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256 }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424 }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568 }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736 }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448 }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976 }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989 }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802 }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792 }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893 }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810 }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200 }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447 }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358 }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469 }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475 }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105 }, + { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404 }, + { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423 }, + { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184 }, + { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268 }, + { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601 }, + { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098 }, + { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520 }, + { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852 }, + { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488 }, + { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192 }, + { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550 }, + { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785 }, + { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 }, + { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 }, + { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 }, + { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 }, + { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 }, + { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 }, + { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 }, + { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 }, + { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 }, + { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 }, + { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 }, + { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 }, + { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 }, + { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "crowdsec-docker-tests" +version = "0.1.0" +source = { virtual = "." } +dependencies = [ + { name = "pytest" }, + { name = "pytest-cs" }, + { name = "pytest-dotenv" }, + { name = "pytest-xdist" }, +] + +[package.dev-dependencies] +dev = [ + { name = "ipdb" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "pytest", specifier = ">=8.3.4" }, + { name = "pytest-cs", git = "https://github.com/crowdsecurity/pytest-cs" }, + { name = "pytest-dotenv", specifier = ">=0.5.2" }, + { name = "pytest-xdist", specifier = ">=3.6.1" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "ipdb", specifier = ">=0.13.13" }, + { name = "ruff", specifier = ">=0.9.3" }, +] + +[[package]] +name = "cryptography" +version = "44.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/4c/45dfa6829acffa344e3967d6006ee4ae8be57af746ae2eba1c431949b32c/cryptography-44.0.0.tar.gz", hash = "sha256:cd4e834f340b4293430701e772ec543b0fbe6c2dea510a5286fe0acabe153a02", size = 710657 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/09/8cc67f9b84730ad330b3b72cf867150744bf07ff113cda21a15a1c6d2c7c/cryptography-44.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:84111ad4ff3f6253820e6d3e58be2cc2a00adb29335d4cacb5ab4d4d34f2a123", size = 6541833 }, + { url = "https://files.pythonhosted.org/packages/7e/5b/3759e30a103144e29632e7cb72aec28cedc79e514b2ea8896bb17163c19b/cryptography-44.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b15492a11f9e1b62ba9d73c210e2416724633167de94607ec6069ef724fad092", size = 3922710 }, + { url = "https://files.pythonhosted.org/packages/5f/58/3b14bf39f1a0cfd679e753e8647ada56cddbf5acebffe7db90e184c76168/cryptography-44.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:831c3c4d0774e488fdc83a1923b49b9957d33287de923d58ebd3cec47a0ae43f", size = 4137546 }, + { url = "https://files.pythonhosted.org/packages/98/65/13d9e76ca19b0ba5603d71ac8424b5694415b348e719db277b5edc985ff5/cryptography-44.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:761817a3377ef15ac23cd7834715081791d4ec77f9297ee694ca1ee9c2c7e5eb", size = 3915420 }, + { url = "https://files.pythonhosted.org/packages/b1/07/40fe09ce96b91fc9276a9ad272832ead0fddedcba87f1190372af8e3039c/cryptography-44.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3c672a53c0fb4725a29c303be906d3c1fa99c32f58abe008a82705f9ee96f40b", size = 4154498 }, + { url = "https://files.pythonhosted.org/packages/75/ea/af65619c800ec0a7e4034207aec543acdf248d9bffba0533342d1bd435e1/cryptography-44.0.0-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:4ac4c9f37eba52cb6fbeaf5b59c152ea976726b865bd4cf87883a7e7006cc543", size = 3932569 }, + { url = "https://files.pythonhosted.org/packages/c7/af/d1deb0c04d59612e3d5e54203159e284d3e7a6921e565bb0eeb6269bdd8a/cryptography-44.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ed3534eb1090483c96178fcb0f8893719d96d5274dfde98aa6add34614e97c8e", size = 4016721 }, + { url = "https://files.pythonhosted.org/packages/bd/69/7ca326c55698d0688db867795134bdfac87136b80ef373aaa42b225d6dd5/cryptography-44.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:f3f6fdfa89ee2d9d496e2c087cebef9d4fcbb0ad63c40e821b39f74bf48d9c5e", size = 4240915 }, + { url = "https://files.pythonhosted.org/packages/ef/d4/cae11bf68c0f981e0413906c6dd03ae7fa864347ed5fac40021df1ef467c/cryptography-44.0.0-cp37-abi3-win32.whl", hash = "sha256:eb33480f1bad5b78233b0ad3e1b0be21e8ef1da745d8d2aecbb20671658b9053", size = 2757925 }, + { url = "https://files.pythonhosted.org/packages/64/b1/50d7739254d2002acae64eed4fc43b24ac0cc44bf0a0d388d1ca06ec5bb1/cryptography-44.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:abc998e0c0eee3c8a1904221d3f67dcfa76422b23620173e28c11d3e626c21bd", size = 3202055 }, + { url = "https://files.pythonhosted.org/packages/11/18/61e52a3d28fc1514a43b0ac291177acd1b4de00e9301aaf7ef867076ff8a/cryptography-44.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:660cb7312a08bc38be15b696462fa7cc7cd85c3ed9c576e81f4dc4d8b2b31591", size = 6542801 }, + { url = "https://files.pythonhosted.org/packages/1a/07/5f165b6c65696ef75601b781a280fc3b33f1e0cd6aa5a92d9fb96c410e97/cryptography-44.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1923cb251c04be85eec9fda837661c67c1049063305d6be5721643c22dd4e2b7", size = 3922613 }, + { url = "https://files.pythonhosted.org/packages/28/34/6b3ac1d80fc174812486561cf25194338151780f27e438526f9c64e16869/cryptography-44.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:404fdc66ee5f83a1388be54300ae978b2efd538018de18556dde92575e05defc", size = 4137925 }, + { url = "https://files.pythonhosted.org/packages/d0/c7/c656eb08fd22255d21bc3129625ed9cd5ee305f33752ef2278711b3fa98b/cryptography-44.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:c5eb858beed7835e5ad1faba59e865109f3e52b3783b9ac21e7e47dc5554e289", size = 3915417 }, + { url = "https://files.pythonhosted.org/packages/ef/82/72403624f197af0db6bac4e58153bc9ac0e6020e57234115db9596eee85d/cryptography-44.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f53c2c87e0fb4b0c00fa9571082a057e37690a8f12233306161c8f4b819960b7", size = 4155160 }, + { url = "https://files.pythonhosted.org/packages/a2/cd/2f3c440913d4329ade49b146d74f2e9766422e1732613f57097fea61f344/cryptography-44.0.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e6fc8a08e116fb7c7dd1f040074c9d7b51d74a8ea40d4df2fc7aa08b76b9e6c", size = 3932331 }, + { url = "https://files.pythonhosted.org/packages/7f/df/8be88797f0a1cca6e255189a57bb49237402b1880d6e8721690c5603ac23/cryptography-44.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:d2436114e46b36d00f8b72ff57e598978b37399d2786fd39793c36c6d5cb1c64", size = 4017372 }, + { url = "https://files.pythonhosted.org/packages/af/36/5ccc376f025a834e72b8e52e18746b927f34e4520487098e283a719c205e/cryptography-44.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a01956ddfa0a6790d594f5b34fc1bfa6098aca434696a03cfdbe469b8ed79285", size = 4239657 }, + { url = "https://files.pythonhosted.org/packages/46/b0/f4f7d0d0bcfbc8dd6296c1449be326d04217c57afb8b2594f017eed95533/cryptography-44.0.0-cp39-abi3-win32.whl", hash = "sha256:eca27345e1214d1b9f9490d200f9db5a874479be914199194e746c893788d417", size = 2758672 }, + { url = "https://files.pythonhosted.org/packages/97/9b/443270b9210f13f6ef240eff73fd32e02d381e7103969dc66ce8e89ee901/cryptography-44.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:708ee5f1bafe76d041b53a4f95eb28cdeb8d18da17e597d46d7833ee59b97ede", size = 3202071 }, +] + +[[package]] +name = "decorator" +version = "5.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/0c/8d907af351aa16b42caae42f9d6aa37b900c67308052d10fdce809f8d952/decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", size = 35016 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186", size = 9073 }, +] + +[[package]] +name = "docker" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "requests" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774 }, +] + +[[package]] +name = "execnet" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612 }, +] + +[[package]] +name = "executing" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, +] + +[[package]] +name = "ipdb" +version = "0.13.13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "decorator" }, + { name = "ipython" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/1b/7e07e7b752017f7693a0f4d41c13e5ca29ce8cbcfdcc1fd6c4ad8c0a27a0/ipdb-0.13.13.tar.gz", hash = "sha256:e3ac6018ef05126d442af680aad863006ec19d02290561ac88b8b1c0b0cfc726", size = 17042 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/4c/b075da0092003d9a55cf2ecc1cae9384a1ca4f650d51b00fc59875fe76f6/ipdb-0.13.13-py3-none-any.whl", hash = "sha256:45529994741c4ab6d2388bfa5d7b725c2cf7fe9deffabdb8a6113aa5ed449ed4", size = 12130 }, +] + +[[package]] +name = "ipython" +version = "8.31.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "decorator" }, + { name = "jedi" }, + { name = "matplotlib-inline" }, + { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "stack-data" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/01/35/6f90fdddff7a08b7b715fccbd2427b5212c9525cd043d26fdc45bee0708d/ipython-8.31.0.tar.gz", hash = "sha256:b6a2274606bec6166405ff05e54932ed6e5cfecaca1fc05f2cacde7bb074d70b", size = 5501011 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/60/d0feb6b6d9fe4ab89fe8fe5b47cbf6cd936bfd9f1e7ffa9d0015425aeed6/ipython-8.31.0-py3-none-any.whl", hash = "sha256:46ec58f8d3d076a61d128fe517a51eb730e3aaf0c184ea8c17d16e366660c6a6", size = 821583 }, +] + +[[package]] +name = "jedi" +version = "0.19.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "parso" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278 }, +] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, +] + +[[package]] +name = "parso" +version = "0.8.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.50" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/e1/bd15cb8ffdcfeeb2bdc215de3c3cffca11408d829e4b8416dcfe71ba8854/prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab", size = 429087 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/ea/d836f008d33151c7a1f62caf3d8dd782e4d15f6a43897f64480c2b8de2ad/prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198", size = 387816 }, +] + +[[package]] +name = "psutil" +version = "6.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1f/5a/07871137bb752428aa4b659f910b399ba6f291156bdea939be3e96cae7cb/psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5", size = 508502 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/99/ca79d302be46f7bdd8321089762dd4476ee725fce16fc2b2e1dbba8cac17/psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8", size = 247511 }, + { url = "https://files.pythonhosted.org/packages/0b/6b/73dbde0dd38f3782905d4587049b9be64d76671042fdcaf60e2430c6796d/psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377", size = 248985 }, + { url = "https://files.pythonhosted.org/packages/17/38/c319d31a1d3f88c5b79c68b3116c129e5133f1822157dd6da34043e32ed6/psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003", size = 284488 }, + { url = "https://files.pythonhosted.org/packages/9c/39/0f88a830a1c8a3aba27fededc642da37613c57cbff143412e3536f89784f/psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160", size = 287477 }, + { url = "https://files.pythonhosted.org/packages/47/da/99f4345d4ddf2845cb5b5bd0d93d554e84542d116934fde07a0c50bd4e9f/psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3", size = 289017 }, + { url = "https://files.pythonhosted.org/packages/38/53/bd755c2896f4461fd4f36fa6a6dcb66a88a9e4b9fd4e5b66a77cf9d4a584/psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53", size = 250602 }, + { url = "https://files.pythonhosted.org/packages/7b/d7/7831438e6c3ebbfa6e01a927127a6cb42ad3ab844247f3c5b96bea25d73d/psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649", size = 254444 }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993 }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842 }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552 }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, +] + +[[package]] +name = "pytest" +version = "8.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, +] + +[[package]] +name = "pytest-cs" +version = "0.7.20" +source = { git = "https://github.com/crowdsecurity/pytest-cs#73380b837a80337f361414bebbaf4b914713c4ae" } +dependencies = [ + { name = "docker" }, + { name = "psutil" }, + { name = "pytest" }, + { name = "pytest-datadir" }, + { name = "pytest-dotenv" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "trustme" }, +] + +[[package]] +name = "pytest-datadir" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/aa/97/a93900d82635aa3f419c3cd2059b4de7d7fe44e415eaf00c298854582dcc/pytest-datadir-1.5.0.tar.gz", hash = "sha256:1617ed92f9afda0c877e4eac91904b5f779d24ba8f5e438752e3ae39d8d2ee3f", size = 8821 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/90/96b9474cddda5ef9e10e6f1871c0fadfa153b605e0e749ba30437bfb62a0/pytest_datadir-1.5.0-py3-none-any.whl", hash = "sha256:34adf361bcc7b37961bbc1dfa8d25a4829e778bab461703c38a5c50ca9c36dc8", size = 5095 }, +] + +[[package]] +name = "pytest-dotenv" +version = "0.5.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, + { name = "python-dotenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cd/b0/cafee9c627c1bae228eb07c9977f679b3a7cb111b488307ab9594ba9e4da/pytest-dotenv-0.5.2.tar.gz", hash = "sha256:2dc6c3ac6d8764c71c6d2804e902d0ff810fa19692e95fe138aefc9b1aa73732", size = 3782 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/da/9da67c67b3d0963160e3d2cbc7c38b6fae342670cc8e6d5936644b2cf944/pytest_dotenv-0.5.2-py3-none-any.whl", hash = "sha256:40a2cece120a213898afaa5407673f6bd924b1fa7eafce6bda0e8abffe2f710f", size = 3993 }, +] + +[[package]] +name = "pytest-xdist" +version = "3.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "execnet" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/41/c4/3c310a19bc1f1e9ef50075582652673ef2bfc8cd62afef9585683821902f/pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d", size = 84060 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/82/1d96bf03ee4c0fdc3c0cbe61470070e659ca78dc0086fb88b66c185e2449/pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7", size = 46108 }, +] + +[[package]] +name = "python-dotenv" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bc/57/e84d88dfe0aec03b7a2d4327012c1627ab5f03652216c63d49846d7a6c58/python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", size = 39115 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/3e/b68c118422ec867fa7ab88444e1274aa40681c606d59ac27de5a5588f082/python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a", size = 19863 }, +] + +[[package]] +name = "pywin32" +version = "308" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/7c/d00d6bdd96de4344e06c4afbf218bc86b54436a94c01c71a8701f613aa56/pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897", size = 5939729 }, + { url = "https://files.pythonhosted.org/packages/21/27/0c8811fbc3ca188f93b5354e7c286eb91f80a53afa4e11007ef661afa746/pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47", size = 6543015 }, + { url = "https://files.pythonhosted.org/packages/9d/0f/d40f8373608caed2255781a3ad9a51d03a594a1248cd632d6a298daca693/pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091", size = 7976033 }, + { url = "https://files.pythonhosted.org/packages/a9/a4/aa562d8935e3df5e49c161b427a3a2efad2ed4e9cf81c3de636f1fdddfd0/pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed", size = 5938579 }, + { url = "https://files.pythonhosted.org/packages/c7/50/b0efb8bb66210da67a53ab95fd7a98826a97ee21f1d22949863e6d588b22/pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4", size = 6542056 }, + { url = "https://files.pythonhosted.org/packages/26/df/2b63e3e4f2df0224f8aaf6d131f54fe4e8c96400eb9df563e2aae2e1a1f9/pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd", size = 7974986 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873 }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302 }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154 }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223 }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542 }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164 }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611 }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591 }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338 }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[[package]] +name = "ruff" +version = "0.9.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/7f/60fda2eec81f23f8aa7cbbfdf6ec2ca11eb11c273827933fb2541c2ce9d8/ruff-0.9.3.tar.gz", hash = "sha256:8293f89985a090ebc3ed1064df31f3b4b56320cdfcec8b60d3295bddb955c22a", size = 3586740 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/77/4fb790596d5d52c87fd55b7160c557c400e90f6116a56d82d76e95d9374a/ruff-0.9.3-py3-none-linux_armv6l.whl", hash = "sha256:7f39b879064c7d9670197d91124a75d118d00b0990586549949aae80cdc16624", size = 11656815 }, + { url = "https://files.pythonhosted.org/packages/a2/a8/3338ecb97573eafe74505f28431df3842c1933c5f8eae615427c1de32858/ruff-0.9.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:a187171e7c09efa4b4cc30ee5d0d55a8d6c5311b3e1b74ac5cb96cc89bafc43c", size = 11594821 }, + { url = "https://files.pythonhosted.org/packages/8e/89/320223c3421962762531a6b2dd58579b858ca9916fb2674874df5e97d628/ruff-0.9.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:c59ab92f8e92d6725b7ded9d4a31be3ef42688a115c6d3da9457a5bda140e2b4", size = 11040475 }, + { url = "https://files.pythonhosted.org/packages/b2/bd/1d775eac5e51409535804a3a888a9623e87a8f4b53e2491580858a083692/ruff-0.9.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc153c25e715be41bb228bc651c1e9b1a88d5c6e5ed0194fa0dfea02b026439", size = 11856207 }, + { url = "https://files.pythonhosted.org/packages/7f/c6/3e14e09be29587393d188454064a4aa85174910d16644051a80444e4fd88/ruff-0.9.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:646909a1e25e0dc28fbc529eab8eb7bb583079628e8cbe738192853dbbe43af5", size = 11420460 }, + { url = "https://files.pythonhosted.org/packages/ef/42/b7ca38ffd568ae9b128a2fa76353e9a9a3c80ef19746408d4ce99217ecc1/ruff-0.9.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a5a46e09355695fbdbb30ed9889d6cf1c61b77b700a9fafc21b41f097bfbba4", size = 12605472 }, + { url = "https://files.pythonhosted.org/packages/a6/a1/3167023f23e3530fde899497ccfe239e4523854cb874458ac082992d206c/ruff-0.9.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c4bb09d2bbb394e3730d0918c00276e79b2de70ec2a5231cd4ebb51a57df9ba1", size = 13243123 }, + { url = "https://files.pythonhosted.org/packages/d0/b4/3c600758e320f5bf7de16858502e849f4216cb0151f819fa0d1154874802/ruff-0.9.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96a87ec31dc1044d8c2da2ebbed1c456d9b561e7d087734336518181b26b3aa5", size = 12744650 }, + { url = "https://files.pythonhosted.org/packages/be/38/266fbcbb3d0088862c9bafa8b1b99486691d2945a90b9a7316336a0d9a1b/ruff-0.9.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bb7554aca6f842645022fe2d301c264e6925baa708b392867b7a62645304df4", size = 14458585 }, + { url = "https://files.pythonhosted.org/packages/63/a6/47fd0e96990ee9b7a4abda62de26d291bd3f7647218d05b7d6d38af47c30/ruff-0.9.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cabc332b7075a914ecea912cd1f3d4370489c8018f2c945a30bcc934e3bc06a6", size = 12419624 }, + { url = "https://files.pythonhosted.org/packages/84/5d/de0b7652e09f7dda49e1a3825a164a65f4998175b6486603c7601279baad/ruff-0.9.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:33866c3cc2a575cbd546f2cd02bdd466fed65118e4365ee538a3deffd6fcb730", size = 11843238 }, + { url = "https://files.pythonhosted.org/packages/9e/be/3f341ceb1c62b565ec1fb6fd2139cc40b60ae6eff4b6fb8f94b1bb37c7a9/ruff-0.9.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:006e5de2621304c8810bcd2ee101587712fa93b4f955ed0985907a36c427e0c2", size = 11484012 }, + { url = "https://files.pythonhosted.org/packages/a3/c8/ff8acbd33addc7e797e702cf00bfde352ab469723720c5607b964491d5cf/ruff-0.9.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ba6eea4459dbd6b1be4e6bfc766079fb9b8dd2e5a35aff6baee4d9b1514ea519", size = 12038494 }, + { url = "https://files.pythonhosted.org/packages/73/b1/8d9a2c0efbbabe848b55f877bc10c5001a37ab10aca13c711431673414e5/ruff-0.9.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:90230a6b8055ad47d3325e9ee8f8a9ae7e273078a66401ac66df68943ced029b", size = 12473639 }, + { url = "https://files.pythonhosted.org/packages/cb/44/a673647105b1ba6da9824a928634fe23186ab19f9d526d7bdf278cd27bc3/ruff-0.9.3-py3-none-win32.whl", hash = "sha256:eabe5eb2c19a42f4808c03b82bd313fc84d4e395133fb3fc1b1516170a31213c", size = 9834353 }, + { url = "https://files.pythonhosted.org/packages/c3/01/65cadb59bf8d4fbe33d1a750103e6883d9ef302f60c28b73b773092fbde5/ruff-0.9.3-py3-none-win_amd64.whl", hash = "sha256:040ceb7f20791dfa0e78b4230ee9dce23da3b64dd5848e40e3bf3ab76468dcf4", size = 10821444 }, + { url = "https://files.pythonhosted.org/packages/69/cb/b3fe58a136a27d981911cba2f18e4b29f15010623b79f0f2510fd0d31fd3/ruff-0.9.3-py3-none-win_arm64.whl", hash = "sha256:800d773f6d4d33b0a3c60e2c6ae8f4c202ea2de056365acfa519aa48acf28e0b", size = 10038168 }, +] + +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "pure-eval" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 }, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359 }, +] + +[[package]] +name = "trustme" +version = "1.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4b/c5/931476f4cf1cd9e736f32651005078061a50dc164a2569fb874e00eb2786/trustme-1.2.1.tar.gz", hash = "sha256:6528ba2bbc7f2db41f33825c8dd13e3e3eb9d334ba0f909713c8c3139f4ae47f", size = 26844 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b5/f3/c34dbabf6da5eda56fe923226769d40e11806952cd7f46655dd06e10f018/trustme-1.2.1-py3-none-any.whl", hash = "sha256:d768e5fc57c86dfc5ec9365102e9b092541cd6954b35d8c1eea01a84f35a762a", size = 16530 }, +] + +[[package]] +name = "urllib3" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 }, +] + +[[package]] +name = "wcwidth" +version = "0.2.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, +] diff --git a/go.mod b/go.mod index f4bd9379a2d..ed406e4aedc 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/crowdsecurity/crowdsec -go 1.23.3 +go 1.23.5 // Don't use the toolchain directive to avoid uncontrolled downloads during // a build, especially in sandboxed environments (freebsd, gentoo...). @@ -11,6 +11,7 @@ require ( github.com/AlecAivazis/survey/v2 v2.3.7 github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.3 + github.com/Microsoft/go-winio v0.6.2 // indirect github.com/agext/levenshtein v1.2.3 github.com/alexliesenfeld/health v0.8.0 github.com/appleboy/gin-jwt/v2 v2.9.2 @@ -22,16 +23,21 @@ require ( github.com/buger/jsonparser v1.1.1 github.com/c-robinson/iplib v1.0.8 github.com/cespare/xxhash/v2 v2.3.0 - github.com/corazawaf/libinjection-go v0.1.2 - github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 + github.com/containerd/log v0.1.0 // indirect + github.com/corazawaf/libinjection-go v0.2.2 + github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/creack/pty v1.1.21 // indirect + github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7 github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 - github.com/crowdsecurity/go-cs-lib v0.0.15 + github.com/crowdsecurity/go-cs-lib v0.0.16 github.com/crowdsecurity/grokky v0.2.2 github.com/crowdsecurity/machineid v1.0.2 github.com/davecgh/go-spew v1.1.1 github.com/dghubble/sling v1.4.2 - github.com/docker/docker v24.0.9+incompatible - github.com/docker/go-connections v0.4.0 + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/docker v27.3.1+incompatible + github.com/docker/go-connections v0.5.0 + github.com/docker/go-units v0.5.0 // indirect github.com/expr-lang/expr v1.16.9 github.com/fatih/color v1.16.0 github.com/fsnotify/fsnotify v1.7.0 @@ -43,8 +49,10 @@ require ( github.com/go-openapi/validate v0.20.0 github.com/go-sql-driver/mysql v1.6.0 github.com/goccy/go-yaml v1.11.0 - github.com/gofrs/uuid v4.0.0+incompatible - github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.1 + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 github.com/google/uuid v1.6.0 github.com/google/winops v0.0.0-20230712152054-af9b550d0601 @@ -59,17 +67,24 @@ require ( github.com/jarcoal/httpmock v1.1.0 github.com/jedib0t/go-pretty/v6 v6.5.9 github.com/jszwec/csvutil v1.5.1 + github.com/klauspost/compress v1.17.9 // indirect github.com/lithammer/dedent v1.1.0 github.com/mattn/go-isatty v0.0.20 github.com/mattn/go-sqlite3 v1.14.16 + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/term v0.5.0 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 + github.com/morikuni/aec v1.0.0 // indirect github.com/nxadm/tail v1.4.8 + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/oschwald/geoip2-golang v1.9.0 github.com/oschwald/maxminddb-golang v1.12.0 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 - github.com/prometheus/client_model v0.4.0 + github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_model v0.5.0 github.com/prometheus/prom2json v1.3.0 github.com/r3labs/diff/v2 v2.14.1 github.com/sanity-io/litter v1.5.5 @@ -77,28 +92,38 @@ require ( github.com/shirou/gopsutil/v3 v3.23.5 github.com/sirupsen/logrus v1.9.3 github.com/slack-go/slack v0.12.2 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 + github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.9.0 github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 github.com/wasilibs/go-re2 v1.7.0 github.com/xhit/go-simple-mail/v2 v2.16.0 - golang.org/x/crypto v0.26.0 - golang.org/x/mod v0.17.0 - golang.org/x/sys v0.24.0 - golang.org/x/text v0.17.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + golang.org/x/crypto v0.32.0 + golang.org/x/mod v0.20.0 + golang.org/x/net v0.34.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.29.0 + golang.org/x/text v0.21.0 + golang.org/x/time v0.6.0 // indirect google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.34.2 + google.golang.org/protobuf v1.36.3 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/tomb.v2 v2.0.0-20161208151619-d5d1b5820637 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 + gotest.tools/v3 v3.5.1 // indirect k8s.io/apiserver v0.28.4 + ) require ( ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ahmetalpbalkan/dlog v0.0.0-20170105205344-4fb5f8204f26 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect @@ -106,14 +131,13 @@ require ( github.com/bytedance/sonic v1.10.2 // indirect github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d // indirect github.com/chenzhuoyu/iasm v0.9.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect - github.com/creack/pty v1.1.18 // indirect - github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/go-units v0.5.0 // indirect + github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.19.16 // indirect github.com/go-openapi/inflect v0.19.0 // indirect @@ -127,10 +151,7 @@ require ( github.com/go-playground/validator/v10 v10.17.0 // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/goccy/go-json v0.10.2 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/hashicorp/hcl/v2 v2.13.0 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect @@ -144,49 +165,44 @@ require ( github.com/jackc/pgproto3/v2 v2.3.3 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/pgtype v1.14.0 // indirect + github.com/jcchavezs/mergefs v0.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.17.3 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/leodido/go-urn v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a // indirect + github.com/magefile/mage v1.15.1-0.20241126214340-bdc92f694516 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/morikuni/aec v1.0.0 // indirect github.com/oklog/run v1.0.0 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect github.com/pelletier/go-toml/v2 v2.1.1 // indirect - github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e // indirect + github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.3.1 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/spf13/cast v1.3.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect github.com/tetratelabs/wazero v1.8.0 // indirect - github.com/tidwall/gjson v1.17.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect @@ -194,24 +210,21 @@ require ( github.com/toorop/go-dkim v0.0.0-20201103131630-e1cd1a0a5208 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect github.com/ugorji/go/codec v1.2.12 // indirect + github.com/valllabh/ocsf-schema-golang v1.0.3 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/wasilibs/wazero-helpers v0.0.0-20240620070341-3dff1577cd52 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zclconf/go-cty v1.8.0 // indirect go.mongodb.org/mongo-driver v1.9.4 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect go.uber.org/atomic v1.10.0 // indirect golang.org/x/arch v0.7.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/appengine v1.6.7 // indirect + golang.org/x/term v0.28.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gotest.tools/v3 v3.5.0 // indirect k8s.io/api v0.28.4 // indirect k8s.io/apimachinery v0.28.4 // indirect k8s.io/klog/v2 v2.100.1 // indirect diff --git a/go.sum b/go.sum index b2bd77c9915..e9873f0d46f 100644 --- a/go.sum +++ b/go.sum @@ -19,8 +19,8 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0 github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 h1:+vx7roKuyA63nhn5WAunQHLTznkw5W8b1Xc0dNjp83s= github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2/go.mod h1:HBCaDeC1lPdgDeDbhX8XFpy1jqjK0IBG8W5K+xYqA0w= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -56,8 +56,6 @@ github.com/aws/aws-lambda-go v1.47.0/go.mod h1:dpMpZgvWx5vuQJfBt0zqBha60q7Dd7Rfg github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.52.0 h1:ptgek/4B2v/ljsjYSEvLQ8LTD+SQyrqhOOWvHc/VGPI= github.com/aws/aws-sdk-go v1.52.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/beevik/etree v1.3.0 h1:hQTc+pylzIKDb23yYprodCWWTt+ojFfUZyzU09a/hmU= -github.com/beevik/etree v1.3.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc= github.com/beevik/etree v1.4.1 h1:PmQJDDYahBGNKDcpdX8uPy1xRCwoCGVUiW669MEirVI= github.com/beevik/etree v1.4.1/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -76,6 +74,8 @@ github.com/bytedance/sonic v1.10.2 h1:GQebETVBxYB7JGWJtLBi07OVzWwt+8dWA00gEVW2ZF github.com/bytedance/sonic v1.10.2/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= github.com/c-robinson/iplib v1.0.8 h1:exDRViDyL9UBLcfmlxxkY5odWX5092nPsQIykHXhIn4= github.com/c-robinson/iplib v1.0.8/go.mod h1:i3LuuFL1hRT5gFpBRnEydzw8R6yhGkF4szNDIbF8pgo= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= @@ -87,26 +87,32 @@ github.com/chenzhuoyu/iasm v0.9.1 h1:tUHQJXo3NhBqw6s33wkGn9SP3bvrWLdlVIJ3hQBL7P0 github.com/chenzhuoyu/iasm v0.9.1/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/corazawaf/libinjection-go v0.1.2 h1:oeiV9pc5rvJ+2oqOqXEAMJousPpGiup6f7Y3nZj5GoM= -github.com/corazawaf/libinjection-go v0.1.2/go.mod h1:OP4TM7xdJ2skyXqNX1AN1wN5nNZEmJNuWbNPOItn7aw= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc h1:OlJhrgI3I+FLUCTI3JJW8MoqyM78WbqJjecqMnqG+wc= +github.com/corazawaf/coraza-coreruleset v0.0.0-20240226094324-415b1017abdc/go.mod h1:7rsocqNDkTCira5T0M7buoKR2ehh7YZiPkzxRuAgvVU= +github.com/corazawaf/coraza/v3 v3.3.2 h1:eG1HPLySTR9lND6y6fPOajubwbuHRF6aXCsCtxyqKTY= +github.com/corazawaf/coraza/v3 v3.3.2/go.mod h1:4EqMZkRoil11FnResCT/2JIg61dH+6D7F48VG8SVzuA= +github.com/corazawaf/libinjection-go v0.2.2 h1:Chzodvb6+NXh6wew5/yhD0Ggioif9ACrQGR4qjTCs1g= +github.com/corazawaf/libinjection-go v0.2.2/go.mod h1:OP4TM7xdJ2skyXqNX1AN1wN5nNZEmJNuWbNPOItn7aw= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607 h1:hyrYw3h8clMcRL2u5ooZ3tmwnmJftmhb9Ws1MKmavvI= -github.com/crowdsecurity/coraza/v3 v3.0.0-20240108124027-a62b8d8e5607/go.mod h1:br36fEqurGYZQGit+iDYsIzW0FF6VufMbDzyyLxEuPA= +github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= +github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7 h1:nIwAjapWmiQD3W/uAWYE3z+DC5Coy/zTyPBCJ379fAw= +github.com/crowdsecurity/coraza/v3 v3.0.0-20250121111732-9b0043b679d7/go.mod h1:A+uciRXu+yhZcHMtM052bSM6vyJsMMU37NJN+tVoGqo= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26 h1:r97WNVC30Uen+7WnLs4xDScS/Ex988+id2k6mDf8psU= github.com/crowdsecurity/dlog v0.0.0-20170105205344-4fb5f8204f26/go.mod h1:zpv7r+7KXwgVUZnUNjyP22zc/D7LKjyoY02weH2RBbk= -github.com/crowdsecurity/go-cs-lib v0.0.15 h1:zNWqOPVLHgKUstlr6clom9d66S0eIIW66jQG3Y7FEvo= -github.com/crowdsecurity/go-cs-lib v0.0.15/go.mod h1:ePyQyJBxp1W/1bq4YpVAilnLSz7HkzmtI7TRhX187EU= +github.com/crowdsecurity/go-cs-lib v0.0.16 h1:2/htodjwc/sfsv4deX8F/2Fzg1bOI8w3O1/BPSvvsB0= +github.com/crowdsecurity/go-cs-lib v0.0.16/go.mod h1:XwGcvTt4lMq4Tm1IRMSKMDf0CVrnytTU8Uoofa7AR+g= github.com/crowdsecurity/grokky v0.2.2 h1:yALsI9zqpDArYzmSSxfBq2dhYuGUTKMJq8KOEIAsuo4= github.com/crowdsecurity/grokky v0.2.2/go.mod h1:33usDIYzGDsgX1kHAThCbseso6JuWNJXOzRQDGXHtWM= github.com/crowdsecurity/machineid v1.0.2 h1:wpkpsUghJF8Khtmn/tg6GxgdhLA1Xflerh5lirI+bdc= @@ -117,12 +123,12 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dghubble/sling v1.4.2 h1:vs1HIGBbSl2SEALyU+irpYFLZMfc49Fp+jYryFebQjM= github.com/dghubble/sling v1.4.2/go.mod h1:o0arCOz0HwfqYQJLrRtqunaWOn4X6jxE/6ORKRpVTD4= -github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v24.0.9+incompatible h1:HPGzNmwfLZWdxHqK9/II92pyi1EpYKsAqcl4G0Of9v0= -github.com/docker/docker v24.0.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -132,8 +138,10 @@ github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40 github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= -github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7DlmewI= +github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= @@ -154,8 +162,11 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= @@ -294,8 +305,8 @@ github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -303,8 +314,9 @@ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -334,6 +346,9 @@ github.com/goombaio/namegenerator v0.0.0-20181006234301-989e774b106e/go.mod h1:A github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-plugin v1.4.10 h1:xUbmA4jC6Dq163/fWcp8P3JuHilrHHMLNRxzGQJ9hNk= @@ -407,6 +422,8 @@ github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0f github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jarcoal/httpmock v1.1.0 h1:F47ChZj1Y2zFsCXxNkBPwNNKnAyOATcdQibk0qEdVCE= github.com/jarcoal/httpmock v1.1.0/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jcchavezs/mergefs v0.1.0 h1:7oteO7Ocl/fnfFMkoVLJxTveCjrsd//UB0j89xmnpec= +github.com/jcchavezs/mergefs v0.1.0/go.mod h1:eRLTrsA+vFwQZ48hj8p8gki/5v9C2bFtHH5Mnn4bcGk= github.com/jedib0t/go-pretty/v6 v6.5.9 h1:ACteMBRrrmm1gMsXe9PSTOClQ63IXDUt03H5U+UV8OU= github.com/jedib0t/go-pretty/v6 v6.5.9/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E= github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= @@ -434,8 +451,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA= -github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -467,8 +484,8 @@ github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffkt github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a h1:tdPcGgyiH0K+SbsJBBm2oPyEIOTAvLBwD9TuUwVtZho= -github.com/magefile/mage v1.15.1-0.20230912152418-9f54e0f83e2a/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magefile/mage v1.15.1-0.20241126214340-bdc92f694516 h1:aAO0L0ulox6m/CLRYvJff+jWXYYCKGpEm3os7dM/Z+M= +github.com/magefile/mage v1.15.1-0.20241126214340-bdc92f694516/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -504,8 +521,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfr github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= +github.com/miekg/dns v1.1.57/go.mod h1:uqRjCRUuEAA6qsOiJvDd+CFo/vW+y5WR6SNmHE55hZk= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -522,6 +539,8 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -543,8 +562,8 @@ github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/oschwald/geoip2-golang v1.9.0 h1:uvD3O6fXAXs+usU+UGExshpdP13GAqp4GBrzN7IgKZc= github.com/oschwald/geoip2-golang v1.9.0/go.mod h1:BHK6TvDyATVQhKNbQBdrj9eAvuwOMi2zSFXizL3K81Y= github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs= @@ -556,8 +575,8 @@ github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUr github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml/v2 v2.1.1 h1:LWAJwfNvjQZCFIDKWYQaM62NcYeYViCmWIwmOStowAI= github.com/pelletier/go-toml/v2 v2.1.1/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e h1:POJco99aNgosh92lGqmx7L1ei+kCymivB/419SD15PQ= -github.com/petar-dambovaliev/aho-corasick v0.0.0-20230725210150-fb29fc3c913e/go.mod h1:EHPiTAKtiFmrMldLUNswFwfZ2eJIYBHktdaUTZxYWRw= +github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4 h1:1Kw2vDBXmjop+LclnzCb/fFy+sgb3gYARwfmoUcQe6o= +github.com/petar-dambovaliev/aho-corasick v0.0.0-20240411101913-e07a1f0e8eb4/go.mod h1:EHPiTAKtiFmrMldLUNswFwfZ2eJIYBHktdaUTZxYWRw= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -573,21 +592,21 @@ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y= github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM= github.com/r3labs/diff/v2 v2.14.1 h1:wRZ3jB44Ny50DSXsoIcFQ27l2x+n5P31K/Pk+b9B0Ic= @@ -601,8 +620,8 @@ github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -637,8 +656,8 @@ github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -667,8 +686,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= -github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= -github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -687,6 +706,8 @@ github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65E github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26 h1:UFHFmFfixpmfRBcxuu+LA9l8MdURWVdVNUHxO5n1d2w= github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26/go.mod h1:IGhd0qMDsUa9acVjsbsT7bu3ktadtGOHI79+idTew/M= +github.com/valllabh/ocsf-schema-golang v1.0.3 h1:eR8k/3jP/OOqB8LRCtdJ4U+vlgd/gk5y3KMXoodrsrw= +github.com/valllabh/ocsf-schema-golang v1.0.3/go.mod h1:sZ3as9xqm1SSK5feFWIR2CuGeGRhsM7TR1MbpBctzPk= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vjeantet/grok v1.0.1 h1:2rhIR7J4gThTgcZ1m2JY4TrJZNgjn985U28kT2wQrJ4= github.com/vjeantet/grok v1.0.1/go.mod h1:ax1aAchzC6/QMXMcyzHQGZWaW1l195+uMYIkCWPCNIo= @@ -729,6 +750,22 @@ go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4S go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= go.mongodb.org/mongo-driver v1.9.4 h1:qXWlnK2WCOWSxJ/Hm3XyYOGKv3ujA2btBsCyuIFvQjc= go.mongodb.org/mongo-driver v1.9.4/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -765,8 +802,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -774,8 +811,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -799,8 +836,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -810,8 +847,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -847,8 +884,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -856,8 +893,8 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -870,10 +907,10 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -895,27 +932,31 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= +golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54 h1:9NWlQfY2ePejTmfwUH1OWwmznFa+0kKcHGPDvcPza9M= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= +google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -945,8 +986,8 @@ gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= k8s.io/api v0.28.4/go.mod h1:axWTGrY88s/5YE+JSt4uUi6NMM+gur1en2REMR7IRj0= diff --git a/pkg/acquisition/acquisition.go b/pkg/acquisition/acquisition.go index ef5a413b91f..d3928270598 100644 --- a/pkg/acquisition/acquisition.go +++ b/pkg/acquisition/acquisition.go @@ -16,6 +16,7 @@ import ( tomb "gopkg.in/tomb.v2" "gopkg.in/yaml.v2" + "github.com/crowdsecurity/go-cs-lib/csstring" "github.com/crowdsecurity/go-cs-lib/trace" "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" @@ -116,7 +117,7 @@ func setupLogger(source, name string, level *log.Level) (*log.Entry, error) { // if the configuration is not valid it returns an error. // If the datasource can't be run (eg. journalctl not available), it still returns an error which // can be checked for the appropriate action. -func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metricsLevel int) (*DataSource, error) { +func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metricsLevel int) (DataSource, error) { // we dump it back to []byte, because we want to decode the yaml blob twice: // once to DataSourceCommonCfg, and then later to the dedicated type of the datasource yamlConfig, err := yaml.Marshal(commonConfig) @@ -140,10 +141,10 @@ func DataSourceConfigure(commonConfig configuration.DataSourceCommonCfg, metrics } /* configure the actual datasource */ if err := dataSrc.Configure(yamlConfig, subLogger, metricsLevel); err != nil { - return nil, fmt.Errorf("failed to configure datasource %s: %w", commonConfig.Source, err) + return nil, err } - return &dataSrc, nil + return dataSrc, nil } // detectBackwardCompatAcquis: try to magically detect the type for backward compat (type was not mandatory then) @@ -164,8 +165,6 @@ func detectBackwardCompatAcquis(sub configuration.DataSourceCommonCfg) string { } func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr string) ([]DataSource, error) { - var sources []DataSource - frags := strings.Split(dsn, ":") if len(frags) == 1 { return nil, fmt.Errorf("%s isn't valid dsn (no protocol)", dsn) @@ -197,9 +196,7 @@ func LoadAcquisitionFromDSN(dsn string, labels map[string]string, transformExpr return nil, fmt.Errorf("while configuration datasource for %s: %w", dsn, err) } - sources = append(sources, dataSrc) - - return sources, nil + return []DataSource{dataSrc}, nil } func GetMetricsLevelFromPromCfg(prom *csconfig.PrometheusCfg) int { @@ -236,7 +233,16 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig return nil, err } - dec := yaml.NewDecoder(yamlFile) + defer yamlFile.Close() + + acquisContent, err := io.ReadAll(yamlFile) + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", acquisFile, err) + } + + expandedAcquis := csstring.StrictExpand(string(acquisContent), os.LookupEnv) + + dec := yaml.NewDecoder(strings.NewReader(expandedAcquis)) dec.SetStrict(true) idx := -1 @@ -249,7 +255,7 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig err = dec.Decode(&sub) if err != nil { if !errors.Is(err, io.EOF) { - return nil, fmt.Errorf("failed to yaml decode %s: %w", acquisFile, err) + return nil, fmt.Errorf("failed to parse %s: %w", acquisFile, err) } log.Tracef("End of yaml file") @@ -259,6 +265,12 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig // for backward compat ('type' was not mandatory, detect it) if guessType := detectBackwardCompatAcquis(sub); guessType != "" { + log.Debugf("datasource type missing in %s (position %d): detected 'source=%s'", acquisFile, idx, guessType) + + if sub.Source != "" && sub.Source != guessType { + log.Warnf("datasource type mismatch in %s (position %d): found '%s' but should probably be '%s'", acquisFile, idx, sub.Source, guessType) + } + sub.Source = guessType } // it's an empty item, skip it @@ -270,18 +282,18 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig if sub.Source != "docker" { // docker is the only source that can be empty - return nil, fmt.Errorf("missing labels in %s (position: %d)", acquisFile, idx) + return nil, fmt.Errorf("missing labels in %s (position %d)", acquisFile, idx) } } if sub.Source == "" { - return nil, fmt.Errorf("data source type is empty ('source') in %s (position: %d)", acquisFile, idx) + return nil, fmt.Errorf("data source type is empty ('source') in %s (position %d)", acquisFile, idx) } // pre-check that the source is valid _, err := GetDataSourceIface(sub.Source) if err != nil { - return nil, fmt.Errorf("in file %s (position: %d) - %w", acquisFile, idx, err) + return nil, fmt.Errorf("in file %s (position %d) - %w", acquisFile, idx, err) } uniqueId := uuid.NewString() @@ -295,19 +307,19 @@ func LoadAcquisitionFromFile(config *csconfig.CrowdsecServiceCfg, prom *csconfig continue } - return nil, fmt.Errorf("while configuring datasource of type %s from %s (position: %d): %w", sub.Source, acquisFile, idx, err) + return nil, fmt.Errorf("while configuring datasource of type %s from %s (position %d): %w", sub.Source, acquisFile, idx, err) } if sub.TransformExpr != "" { vm, err := expr.Compile(sub.TransformExpr, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) if err != nil { - return nil, fmt.Errorf("while compiling transform expression '%s' for datasource %s in %s (position: %d): %w", sub.TransformExpr, sub.Source, acquisFile, idx, err) + return nil, fmt.Errorf("while compiling transform expression '%s' for datasource %s in %s (position %d): %w", sub.TransformExpr, sub.Source, acquisFile, idx, err) } transformRuntimes[uniqueId] = vm } - sources = append(sources, *src) + sources = append(sources, src) } } @@ -326,7 +338,8 @@ func GetMetrics(sources []DataSource, aggregated bool) error { for _, metric := range metrics { if err := prometheus.Register(metric); err != nil { - if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { + var alreadyRegisteredErr prometheus.AlreadyRegisteredError + if !errors.As(err, &alreadyRegisteredErr) { return fmt.Errorf("could not register metrics for datasource %s: %w", sources[i].GetName(), err) } // ignore the error @@ -344,6 +357,7 @@ func copyEvent(evt types.Event, line string) types.Event { evtCopy.Line = evt.Line evtCopy.Line.Raw = line evtCopy.Line.Labels = make(map[string]string) + for k, v := range evt.Line.Labels { evtCopy.Line.Labels[k] = v } @@ -351,13 +365,13 @@ func copyEvent(evt types.Event, line string) types.Event { return evtCopy } -func transform(transformChan chan types.Event, output chan types.Event, AcquisTomb *tomb.Tomb, transformRuntime *vm.Program, logger *log.Entry) { +func transform(transformChan chan types.Event, output chan types.Event, acquisTomb *tomb.Tomb, transformRuntime *vm.Program, logger *log.Entry) { defer trace.CatchPanic("crowdsec/acquis") logger.Infof("transformer started") for { select { - case <-AcquisTomb.Dying(): + case <-acquisTomb.Dying(): logger.Debugf("transformer is dying") return case evt := <-transformChan: @@ -386,6 +400,7 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo if !ok { logger.Errorf("transform expression returned []interface{}, but cannot assert an element to string") output <- evt + continue } @@ -405,7 +420,7 @@ func transform(transformChan chan types.Event, output chan types.Event, AcquisTo } } -func StartAcquisition(ctx context.Context, sources []DataSource, output chan types.Event, AcquisTomb *tomb.Tomb) error { +func StartAcquisition(ctx context.Context, sources []DataSource, output chan types.Event, acquisTomb *tomb.Tomb) error { // Don't wait if we have no sources, as it will hang forever if len(sources) == 0 { return nil @@ -415,7 +430,7 @@ func StartAcquisition(ctx context.Context, sources []DataSource, output chan typ subsrc := sources[i] // ensure its a copy log.Debugf("starting one source %d/%d ->> %T", i, len(sources), subsrc) - AcquisTomb.Go(func() error { + acquisTomb.Go(func() error { defer trace.CatchPanic("crowdsec/acquis") var err error @@ -434,21 +449,21 @@ func StartAcquisition(ctx context.Context, sources []DataSource, output chan typ "datasource": subsrc.GetName(), }) - AcquisTomb.Go(func() error { - transform(outChan, output, AcquisTomb, transformRuntime, transformLogger) + acquisTomb.Go(func() error { + transform(outChan, output, acquisTomb, transformRuntime, transformLogger) return nil }) } if subsrc.GetMode() == configuration.TAIL_MODE { - err = subsrc.StreamingAcquisition(ctx, outChan, AcquisTomb) + err = subsrc.StreamingAcquisition(ctx, outChan, acquisTomb) } else { - err = subsrc.OneShotAcquisition(ctx, outChan, AcquisTomb) + err = subsrc.OneShotAcquisition(ctx, outChan, acquisTomb) } if err != nil { // if one of the acqusition returns an error, we kill the others to properly shutdown - AcquisTomb.Kill(err) + acquisTomb.Kill(err) } return nil @@ -456,7 +471,7 @@ func StartAcquisition(ctx context.Context, sources []DataSource, output chan typ } /*return only when acquisition is over (cat) or never (tail)*/ - err := AcquisTomb.Wait() + err := acquisTomb.Wait() return err } diff --git a/pkg/acquisition/acquisition_test.go b/pkg/acquisition/acquisition_test.go index dd70172cf62..1ea8f11c22a 100644 --- a/pkg/acquisition/acquisition_test.go +++ b/pkg/acquisition/acquisition_test.go @@ -140,7 +140,7 @@ log_level: debug source: mock toto: test_value1 `, - ExpectedError: "failed to configure datasource mock: mode ratata is not supported", + ExpectedError: "mode ratata is not supported", }, { TestName: "bad_type_config", @@ -182,7 +182,8 @@ wowo: ajsajasjas for _, tc := range tests { t.Run(tc.TestName, func(t *testing.T) { common := configuration.DataSourceCommonCfg{} - yaml.Unmarshal([]byte(tc.String), &common) + err := yaml.Unmarshal([]byte(tc.String), &common) + require.NoError(t, err) ds, err := DataSourceConfigure(common, configuration.METRICS_NONE) cstest.RequireErrorContains(t, err, tc.ExpectedError) @@ -192,19 +193,19 @@ wowo: ajsajasjas switch tc.TestName { case "basic_valid_config": - mock := (*ds).Dump().(*MockSource) + mock := ds.Dump().(*MockSource) assert.Equal(t, "test_value1", mock.Toto) assert.Equal(t, "cat", mock.Mode) assert.Equal(t, log.InfoLevel, mock.logger.Logger.Level) assert.Equal(t, map[string]string{"test": "foobar"}, mock.Labels) case "basic_debug_config": - mock := (*ds).Dump().(*MockSource) + mock := ds.Dump().(*MockSource) assert.Equal(t, "test_value1", mock.Toto) assert.Equal(t, "cat", mock.Mode) assert.Equal(t, log.DebugLevel, mock.logger.Logger.Level) assert.Equal(t, map[string]string{"test": "foobar"}, mock.Labels) case "basic_tailmode_config": - mock := (*ds).Dump().(*MockSource) + mock := ds.Dump().(*MockSource) assert.Equal(t, "test_value1", mock.Toto) assert.Equal(t, "tail", mock.Mode) assert.Equal(t, log.DebugLevel, mock.logger.Logger.Level) @@ -216,6 +217,7 @@ wowo: ajsajasjas func TestLoadAcquisitionFromFile(t *testing.T) { appendMockSource() + t.Setenv("TEST_ENV", "test_value2") tests := []struct { TestName string @@ -236,7 +238,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { Config: csconfig.CrowdsecServiceCfg{ AcquisitionFiles: []string{"test_files/badyaml.yaml"}, }, - ExpectedError: "failed to yaml decode test_files/badyaml.yaml: yaml: unmarshal errors", + ExpectedError: "failed to parse test_files/badyaml.yaml: yaml: unmarshal errors", ExpectedLen: 0, }, { @@ -272,7 +274,7 @@ func TestLoadAcquisitionFromFile(t *testing.T) { Config: csconfig.CrowdsecServiceCfg{ AcquisitionFiles: []string{"test_files/bad_source.yaml"}, }, - ExpectedError: "in file test_files/bad_source.yaml (position: 0) - unknown data source does_not_exist", + ExpectedError: "in file test_files/bad_source.yaml (position 0) - unknown data source does_not_exist", }, { TestName: "invalid_filetype_config", @@ -281,6 +283,13 @@ func TestLoadAcquisitionFromFile(t *testing.T) { }, ExpectedError: "while configuring datasource of type file from test_files/bad_filetype.yaml", }, + { + TestName: "from_env", + Config: csconfig.CrowdsecServiceCfg{ + AcquisitionFiles: []string{"test_files/env.yaml"}, + }, + ExpectedLen: 1, + }, } for _, tc := range tests { t.Run(tc.TestName, func(t *testing.T) { @@ -292,6 +301,13 @@ func TestLoadAcquisitionFromFile(t *testing.T) { } assert.Len(t, dss, tc.ExpectedLen) + if tc.TestName == "from_env" { + mock := dss[0].Dump().(*MockSource) + assert.Equal(t, "test_value2", mock.Toto) + assert.Equal(t, "foobar", mock.Labels["test"]) + assert.Equal(t, "${NON_EXISTING}", mock.Labels["non_existing"]) + assert.Equal(t, log.InfoLevel, mock.logger.Logger.Level) + } }) } } diff --git a/pkg/acquisition/configuration/configuration.go b/pkg/acquisition/configuration/configuration.go index 3e27da1b9e6..a9d570d2788 100644 --- a/pkg/acquisition/configuration/configuration.go +++ b/pkg/acquisition/configuration/configuration.go @@ -13,12 +13,14 @@ type DataSourceCommonCfg struct { UseTimeMachine bool `yaml:"use_time_machine,omitempty"` UniqueId string `yaml:"unique_id,omitempty"` TransformExpr string `yaml:"transform,omitempty"` - Config map[string]interface{} `yaml:",inline"` //to keep the datasource-specific configuration directives + Config map[string]interface{} `yaml:",inline"` // to keep the datasource-specific configuration directives } -var TAIL_MODE = "tail" -var CAT_MODE = "cat" -var SERVER_MODE = "server" // No difference with tail, just a bit more verbose +var ( + TAIL_MODE = "tail" + CAT_MODE = "cat" + SERVER_MODE = "server" // No difference with tail, just a bit more verbose +) const ( METRICS_NONE = iota diff --git a/pkg/acquisition/modules/appsec/appsec.go b/pkg/acquisition/modules/appsec/appsec.go index 2f7861b32ff..78225d5f8c3 100644 --- a/pkg/acquisition/modules/appsec/appsec.go +++ b/pkg/acquisition/modules/appsec/appsec.go @@ -155,14 +155,14 @@ func (w *AppsecSource) GetAggregMetrics() []prometheus.Collector { return []prometheus.Collector{AppsecReqCounter, AppsecBlockCounter, AppsecRuleHits, AppsecOutbandParsingHistogram, AppsecInbandParsingHistogram, AppsecGlobalParsingHistogram} } -func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { err := w.UnmarshalConfig(yamlConfig) if err != nil { return fmt.Errorf("unable to parse appsec configuration: %w", err) } w.logger = logger - w.metricsLevel = MetricsLevel + w.metricsLevel = metricsLevel w.logger.Tracef("Appsec configuration: %+v", w.config) if w.config.AuthCacheDuration == nil { @@ -180,7 +180,7 @@ func (w *AppsecSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe w.InChan = make(chan appsec.ParsedRequest) appsecCfg := appsec.AppsecConfig{Logger: w.logger.WithField("component", "appsec_config")} - //we keep the datasource name + // we keep the datasource name appsecCfg.Name = w.config.Name // let's load the associated appsec_config: @@ -275,6 +275,7 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. for _, runner := range w.AppsecRunners { runner.outChan = out + t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/appsec/live/runner") return runner.Run(t) @@ -285,16 +286,20 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. if w.config.ListenSocket != "" { w.logger.Infof("creating unix socket %s", w.config.ListenSocket) _ = os.RemoveAll(w.config.ListenSocket) + listener, err := net.Listen("unix", w.config.ListenSocket) if err != nil { return fmt.Errorf("appsec server failed: %w", err) } + defer listener.Close() + if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { err = w.server.ServeTLS(listener, w.config.CertFilePath, w.config.KeyFilePath) } else { err = w.server.Serve(listener) } + if err != nil && !errors.Is(err, http.ErrServerClosed) { return fmt.Errorf("appsec server failed: %w", err) } @@ -304,8 +309,10 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. }) t.Go(func() error { var err error + if w.config.ListenAddr != "" { w.logger.Infof("creating TCP server on %s", w.config.ListenAddr) + if w.config.CertFilePath != "" && w.config.KeyFilePath != "" { err = w.server.ListenAndServeTLS(w.config.CertFilePath, w.config.KeyFilePath) } else { @@ -323,7 +330,11 @@ func (w *AppsecSource) StreamingAcquisition(ctx context.Context, out chan types. w.logger.Info("Shutting down Appsec server") // xx let's clean up the appsec runners :) appsec.AppsecRulesDetails = make(map[int]appsec.RulesDetails) - w.server.Shutdown(ctx) + + if err := w.server.Shutdown(ctx); err != nil { + w.logger.Errorf("Error shutting down Appsec server: %s", err.Error()) + } + return nil }) @@ -354,11 +365,13 @@ func (w *AppsecSource) IsAuth(apiKey string) bool { } req.Header.Add("X-Api-Key", apiKey) + resp, err := client.Do(req) if err != nil { log.Errorf("Error performing request: %s", err) return false } + defer resp.Body.Close() return resp.StatusCode == http.StatusOK @@ -371,17 +384,21 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { apiKey := r.Header.Get(appsec.APIKeyHeaderName) clientIP := r.Header.Get(appsec.IPHeaderName) remoteIP := r.RemoteAddr + if apiKey == "" { w.logger.Errorf("Unauthorized request from '%s' (real IP = %s)", remoteIP, clientIP) rw.WriteHeader(http.StatusUnauthorized) + return } + expiration, exists := w.AuthCache.Get(apiKey) // if the apiKey is not in cache or has expired, just recheck the auth if !exists || time.Now().After(expiration) { if !w.IsAuth(apiKey) { rw.WriteHeader(http.StatusUnauthorized) w.logger.Errorf("Unauthorized request from '%s' (real IP = %s)", remoteIP, clientIP) + return } @@ -394,8 +411,10 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { if err != nil { w.logger.Errorf("%s", err) rw.WriteHeader(http.StatusInternalServerError) + return } + parsedRequest.AppsecEngine = w.config.Name logger := w.logger.WithFields(log.Fields{ @@ -427,6 +446,8 @@ func (w *AppsecSource) appsecHandler(rw http.ResponseWriter, r *http.Request) { logger.Errorf("unable to serialize response: %s", err) rw.WriteHeader(http.StatusInternalServerError) } else { - rw.Write(body) + if _, err := rw.Write(body); err != nil { + logger.Errorf("unable to write response: %s", err) + } } } diff --git a/pkg/acquisition/modules/appsec/appsec_hooks_test.go b/pkg/acquisition/modules/appsec/appsec_hooks_test.go index c549d2ef1d1..d87384a0189 100644 --- a/pkg/acquisition/modules/appsec/appsec_hooks_test.go +++ b/pkg/acquisition/modules/appsec/appsec_hooks_test.go @@ -341,7 +341,6 @@ func TestAppsecOnMatchHooks(t *testing.T) { } func TestAppsecPreEvalHooks(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic pre_eval hook to disable inband rule", @@ -403,7 +402,6 @@ func TestAppsecPreEvalHooks(t *testing.T) { require.Len(t, responses, 1) require.True(t, responses[0].InBandInterrupt) - }, }, { @@ -670,7 +668,6 @@ func TestAppsecPreEvalHooks(t *testing.T) { } func TestAppsecRemediationConfigHooks(t *testing.T) { - tests := []appsecRuleTest{ { name: "Basic matching rule", @@ -759,6 +756,7 @@ func TestAppsecRemediationConfigHooks(t *testing.T) { }) } } + func TestOnMatchRemediationHooks(t *testing.T) { tests := []appsecRuleTest{ { diff --git a/pkg/acquisition/modules/appsec/appsec_runner.go b/pkg/acquisition/modules/appsec/appsec_runner.go index 7ce43779591..8bdb6405d98 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner.go +++ b/pkg/acquisition/modules/appsec/appsec_runner.go @@ -35,19 +35,24 @@ type AppsecRunner struct { func (r *AppsecRunner) MergeDedupRules(collections []appsec.AppsecCollection, logger *log.Entry) string { var rulesArr []string dedupRules := make(map[string]struct{}) + discarded := 0 for _, collection := range collections { + // Dedup *our* rules for _, rule := range collection.Rules { - if _, ok := dedupRules[rule]; !ok { - rulesArr = append(rulesArr, rule) - dedupRules[rule] = struct{}{} - } else { - logger.Debugf("Discarding duplicate rule : %s", rule) + if _, ok := dedupRules[rule]; ok { + discarded++ + logger.Debugf("Discarding duplicate rule : %s", rule) + continue } + rulesArr = append(rulesArr, rule) + dedupRules[rule] = struct{}{} } + // Don't mess up with native modsec rules + rulesArr = append(rulesArr, collection.NativeRules...) } - if len(rulesArr) != len(dedupRules) { - logger.Warningf("%d rules were discarded as they were duplicates", len(rulesArr)-len(dedupRules)) + if discarded > 0 { + logger.Warningf("%d rules were discarded as they were duplicates", discarded) } return strings.Join(rulesArr, "\n") @@ -90,6 +95,9 @@ func (r *AppsecRunner) Init(datadir string) error { outbandCfg = outbandCfg.WithRequestBodyInMemoryLimit(*r.AppsecRuntime.Config.OutOfBandOptions.RequestBodyInMemoryLimit) } r.AppsecOutbandEngine, err = coraza.NewWAF(outbandCfg) + if err != nil { + return fmt.Errorf("unable to initialize outband engine : %w", err) + } if r.AppsecRuntime.DisabledInBandRulesTags != nil { for _, tag := range r.AppsecRuntime.DisabledInBandRulesTags { @@ -118,10 +126,6 @@ func (r *AppsecRunner) Init(datadir string) error { r.logger.Tracef("Loaded inband rules: %+v", r.AppsecInbandEngine.GetRuleGroup().GetRules()) r.logger.Tracef("Loaded outband rules: %+v", r.AppsecOutbandEngine.GetRuleGroup().GetRules()) - if err != nil { - return fmt.Errorf("unable to initialize outband engine : %w", err) - } - return nil } @@ -379,7 +383,6 @@ func (r *AppsecRunner) handleRequest(request *appsec.ParsedRequest) { // time spent to process inband AND out of band rules globalParsingElapsed := time.Since(startGlobalParsing) AppsecGlobalParsingHistogram.With(prometheus.Labels{"source": request.RemoteAddrNormalized, "appsec_engine": request.AppsecEngine}).Observe(globalParsingElapsed.Seconds()) - } func (r *AppsecRunner) Run(t *tomb.Tomb) error { diff --git a/pkg/acquisition/modules/appsec/appsec_runner_test.go b/pkg/acquisition/modules/appsec/appsec_runner_test.go index 2027cf1d2c0..38d8bbe431f 100644 --- a/pkg/acquisition/modules/appsec/appsec_runner_test.go +++ b/pkg/acquisition/modules/appsec/appsec_runner_test.go @@ -3,14 +3,35 @@ package appsecacquisition import ( "testing" - "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/crowdsec/pkg/appsec/appsec_rule" ) -func TestAppsecRuleLoad(t *testing.T) { +func TestAppsecConflictRuleLoad(t *testing.T) { log.SetLevel(log.TraceLevel) tests := []appsecRuleTest{ + { + name: "simple native rule load", + expected_load_ok: true, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + }, + }, + { + name: "id conflict on native rule load", + expected_load_ok: false, + inband_native_rules: []string{ + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, + `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + }, + }, { name: "simple rule load", expected_load_ok: true, @@ -26,33 +47,66 @@ func TestAppsecRuleLoad(t *testing.T) { }, }, { - name: "simple native rule load", + name: "duplicate rule load", expected_load_ok: true, - inband_native_rules: []string{ - `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, + }, + afterload_asserts: func(runner AppsecRunner) { + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + loadAppSecEngine(test, t) + }) + } +} + +func TestAppsecRuleLoad(t *testing.T) { + log.SetLevel(log.TraceLevel) + + tests := []appsecRuleTest{ + { + name: "simple rule load", + expected_load_ok: true, + inband_rules: []appsec_rule.CustomRule{ + { + Name: "rule1", + Zones: []string{"ARGS"}, + Match: appsec_rule.Match{Type: "equals", Value: "toto"}, + }, }, afterload_asserts: func(runner AppsecRunner) { require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) }, }, { - name: "simple native rule load (2)", + name: "simple native rule load", expected_load_ok: true, inband_native_rules: []string{ `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, - `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, }, afterload_asserts: func(runner AppsecRunner) { - require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) + require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 1) }, }, { - name: "simple native rule load + dedup", + name: "simple native rule load (2)", expected_load_ok: true, inband_native_rules: []string{ `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, `Secrule REQUEST_HEADERS:Content-Type "@rx ^multipart/form-data" "id:101,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=MULTIPART"`, - `Secrule REQUEST_HEADERS:Content-Type "@rx ^application/x-www-form-urlencoded" "id:100,phase:1,pass,nolog,noauditlog,ctl:requestBodyProcessor=URLENCODED"`, }, afterload_asserts: func(runner AppsecRunner) { require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 2) @@ -105,21 +159,22 @@ func TestAppsecRuleLoad(t *testing.T) { Or: []appsec_rule.CustomRule{ { - //Name: "rule1", + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "toto"}, }, { - //Name: "rule1", + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "tutu"}, }, { - //Name: "rule1", + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "tata"}, - }, { - //Name: "rule1", + }, + { + // Name: "rule1", Zones: []string{"ARGS"}, Match: appsec_rule.Match{Type: "equals", Value: "titi"}, }, @@ -130,6 +185,20 @@ func TestAppsecRuleLoad(t *testing.T) { require.Len(t, runner.AppsecInbandEngine.GetRuleGroup().GetRules(), 4) }, }, + { + name: "invalid inband rule", + expected_load_ok: false, + inband_native_rules: []string{ + "this_is_not_a_rule", + }, + }, + { + name: "invalid outofband rule", + expected_load_ok: false, + outofband_native_rules: []string{ + "this_is_not_a_rule", + }, + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/pkg/acquisition/modules/appsec/appsec_test.go b/pkg/acquisition/modules/appsec/appsec_test.go index 1534f5cb7fa..5f2b93836f6 100644 --- a/pkg/acquisition/modules/appsec/appsec_test.go +++ b/pkg/acquisition/modules/appsec/appsec_test.go @@ -41,7 +41,9 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { log.SetLevel(log.WarnLevel) } inbandRules := []string{} + nativeInbandRules := []string{} outofbandRules := []string{} + nativeOutofbandRules := []string{} InChan := make(chan appsec.ParsedRequest) OutChan := make(chan types.Event) @@ -56,8 +58,8 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { inbandRules = append(inbandRules, strRule) } - inbandRules = append(inbandRules, test.inband_native_rules...) - outofbandRules = append(outofbandRules, test.outofband_native_rules...) + nativeInbandRules = append(nativeInbandRules, test.inband_native_rules...) + nativeOutofbandRules = append(nativeOutofbandRules, test.outofband_native_rules...) for ridx, rule := range test.outofband_rules { strRule, _, err := rule.Convert(appsec_rule.ModsecurityRuleType, rule.Name) if err != nil { @@ -66,7 +68,8 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { outofbandRules = append(outofbandRules, strRule) } - appsecCfg := appsec.AppsecConfig{Logger: logger, + appsecCfg := appsec.AppsecConfig{ + Logger: logger, OnLoad: test.on_load, PreEval: test.pre_eval, PostEval: test.post_eval, @@ -75,13 +78,14 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { UserBlockedHTTPCode: test.UserBlockedHTTPCode, UserPassedHTTPCode: test.UserPassedHTTPCode, DefaultRemediation: test.DefaultRemediation, - DefaultPassAction: test.DefaultPassAction} + DefaultPassAction: test.DefaultPassAction, + } AppsecRuntime, err := appsecCfg.Build() if err != nil { t.Fatalf("unable to build appsec runtime : %s", err) } - AppsecRuntime.InBandRules = []appsec.AppsecCollection{{Rules: inbandRules}} - AppsecRuntime.OutOfBandRules = []appsec.AppsecCollection{{Rules: outofbandRules}} + AppsecRuntime.InBandRules = []appsec.AppsecCollection{{Rules: inbandRules, NativeRules: nativeInbandRules}} + AppsecRuntime.OutOfBandRules = []appsec.AppsecCollection{{Rules: outofbandRules, NativeRules: nativeOutofbandRules}} appsecRunnerUUID := uuid.New().String() //we copy AppsecRutime for each runner wrt := *AppsecRuntime @@ -96,8 +100,14 @@ func loadAppSecEngine(test appsecRuleTest, t *testing.T) { } err = runner.Init("/tmp/") if err != nil { + if !test.expected_load_ok { + return + } t.Fatalf("unable to initialize runner : %s", err) } + if !test.expected_load_ok { + t.Fatalf("expected load to fail but it didn't") + } if test.afterload_asserts != nil { //afterload asserts are just to evaluate the state of the runner after the rules have been loaded diff --git a/pkg/acquisition/modules/appsec/bodyprocessors/raw.go b/pkg/acquisition/modules/appsec/bodyprocessors/raw.go index e2e23eb57ae..aa467ecf048 100644 --- a/pkg/acquisition/modules/appsec/bodyprocessors/raw.go +++ b/pkg/acquisition/modules/appsec/bodyprocessors/raw.go @@ -9,8 +9,7 @@ import ( "github.com/crowdsecurity/coraza/v3/experimental/plugins/plugintypes" ) -type rawBodyProcessor struct { -} +type rawBodyProcessor struct{} type setterInterface interface { Set(string) @@ -33,9 +32,7 @@ func (*rawBodyProcessor) ProcessResponse(reader io.Reader, v plugintypes.Transac return nil } -var ( - _ plugintypes.BodyProcessor = &rawBodyProcessor{} -) +var _ plugintypes.BodyProcessor = &rawBodyProcessor{} //nolint:gochecknoinits //Coraza recommends to use init() for registering plugins func init() { diff --git a/pkg/acquisition/modules/appsec/utils.go b/pkg/acquisition/modules/appsec/utils.go index 8995b305680..65bb4601c21 100644 --- a/pkg/acquisition/modules/appsec/utils.go +++ b/pkg/acquisition/modules/appsec/utils.go @@ -296,6 +296,7 @@ func (r *AppsecRunner) AccumulateTxToEvent(evt *types.Event, req *appsec.ParsedR "hash": hash, "version": version, "matched_zones": matchedZones, + "logdata": rule.Data(), } evt.Appsec.MatchedRules = append(evt.Appsec.MatchedRules, corazaRule) } diff --git a/pkg/acquisition/modules/cloudwatch/cloudwatch.go b/pkg/acquisition/modules/cloudwatch/cloudwatch.go index ba267c9050b..5739ebc3124 100644 --- a/pkg/acquisition/modules/cloudwatch/cloudwatch.go +++ b/pkg/acquisition/modules/cloudwatch/cloudwatch.go @@ -154,13 +154,13 @@ func (cw *CloudwatchSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (cw *CloudwatchSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { err := cw.UnmarshalConfig(yamlConfig) if err != nil { return err } - cw.metricsLevel = MetricsLevel + cw.metricsLevel = metricsLevel cw.logger = logger.WithField("group", cw.Config.GroupName) @@ -330,9 +330,12 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha LastIngestionTime := time.Unix(0, *event.LastIngestionTime*int64(time.Millisecond)) if LastIngestionTime.Before(oldest) { cw.logger.Tracef("stop iteration, %s reached oldest age, stop (%s < %s)", *event.LogStreamName, LastIngestionTime, time.Now().UTC().Add(-*cw.Config.MaxStreamAge)) + hasMoreStreams = false + return false } + cw.logger.Tracef("stream %s is elligible for monitoring", *event.LogStreamName) // the stream has been updated recently, check if we should monitor it var expectMode int @@ -341,6 +344,7 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha } else { expectMode = types.TIMEMACHINE } + monitorStream := LogStreamTailConfig{ GroupName: cw.Config.GroupName, StreamName: *event.LogStreamName, @@ -354,16 +358,20 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha out <- monitorStream } } + if lastPage { cw.logger.Tracef("reached last page") + hasMoreStreams = false } + return true }, ) if err != nil { return fmt.Errorf("while describing group %s: %w", cw.Config.GroupName, err) } + cw.logger.Tracef("after DescribeLogStreamsPagesWithContext") } } @@ -373,12 +381,14 @@ func (cw *CloudwatchSource) WatchLogGroupForStreams(ctx context.Context, out cha // LogStreamManager receives the potential streams to monitor, and starts a go routine when needed func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStreamTailConfig, outChan chan types.Event) error { cw.logger.Debugf("starting to monitor streams for %s", cw.Config.GroupName) + pollDeadStreamInterval := time.NewTicker(def_PollDeadStreamInterval) for { select { case newStream := <-in: //nolint:govet // copylocks won't matter if the tomb is not initialized shouldCreate := true + cw.logger.Tracef("received new streams to monitor : %s/%s", newStream.GroupName, newStream.StreamName) if cw.Config.StreamName != nil && newStream.StreamName != *cw.Config.StreamName { @@ -402,12 +412,16 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr if !stream.t.Alive() { cw.logger.Debugf("stream %s already exists, but is dead", newStream.StreamName) cw.monitoredStreams = append(cw.monitoredStreams[:idx], cw.monitoredStreams[idx+1:]...) + if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Dec() } + break } + shouldCreate = false + break } } @@ -417,19 +431,23 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": newStream.GroupName}).Inc() } + newStream.t = tomb.Tomb{} newStream.logger = cw.logger.WithField("stream", newStream.StreamName) cw.logger.Debugf("starting tail of stream %s", newStream.StreamName) newStream.t.Go(func() error { return cw.TailLogStream(ctx, &newStream, outChan) }) + cw.monitoredStreams = append(cw.monitoredStreams, &newStream) } case <-pollDeadStreamInterval.C: newMonitoredStreams := cw.monitoredStreams[:0] + for idx, stream := range cw.monitoredStreams { if !cw.monitoredStreams[idx].t.Alive() { cw.logger.Debugf("remove dead stream %s", stream.StreamName) + if cw.metricsLevel != configuration.METRICS_NONE { openedStreams.With(prometheus.Labels{"group": cw.monitoredStreams[idx].GroupName}).Dec() } @@ -437,20 +455,25 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr newMonitoredStreams = append(newMonitoredStreams, stream) } } + cw.monitoredStreams = newMonitoredStreams case <-cw.t.Dying(): cw.logger.Infof("LogStreamManager for %s is dying, %d alive streams", cw.Config.GroupName, len(cw.monitoredStreams)) + for idx, stream := range cw.monitoredStreams { if cw.monitoredStreams[idx].t.Alive() { cw.logger.Debugf("killing stream %s", stream.StreamName) cw.monitoredStreams[idx].t.Kill(nil) + if err := cw.monitoredStreams[idx].t.Wait(); err != nil { cw.logger.Debugf("error while waiting for death of %s : %s", stream.StreamName, err) } } } + cw.monitoredStreams = nil cw.logger.Debugf("routine cleanup done, return") + return nil } } @@ -458,12 +481,14 @@ func (cw *CloudwatchSource) LogStreamManager(ctx context.Context, in chan LogStr func (cw *CloudwatchSource) TailLogStream(ctx context.Context, cfg *LogStreamTailConfig, outChan chan types.Event) error { var startFrom *string + lastReadMessage := time.Now().UTC() ticker := time.NewTicker(cfg.PollStreamInterval) // resume at existing index if we already had streamIndexMutex.Lock() v := cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] streamIndexMutex.Unlock() + if v != "" { cfg.logger.Debugf("restarting on index %s", v) startFrom = &v @@ -474,7 +499,9 @@ func (cw *CloudwatchSource) TailLogStream(ctx context.Context, cfg *LogStreamTai select { case <-ticker.C: cfg.logger.Tracef("entering loop") + hasMorePages := true + for hasMorePages { /*for the first call, we only consume the last item*/ cfg.logger.Tracef("calling GetLogEventsPagesWithContext") @@ -489,36 +516,44 @@ func (cw *CloudwatchSource) TailLogStream(ctx context.Context, cfg *LogStreamTai func(page *cloudwatchlogs.GetLogEventsOutput, lastPage bool) bool { cfg.logger.Tracef("%d results, last:%t", len(page.Events), lastPage) startFrom = page.NextForwardToken + if page.NextForwardToken != nil { streamIndexMutex.Lock() cw.streamIndexes[cfg.GroupName+"+"+cfg.StreamName] = *page.NextForwardToken streamIndexMutex.Unlock() } + if lastPage { /*wait another ticker to check on new log availability*/ cfg.logger.Tracef("last page") + hasMorePages = false } + if len(page.Events) > 0 { lastReadMessage = time.Now().UTC() } + for _, event := range page.Events { evt, err := cwLogToEvent(event, cfg) if err != nil { cfg.logger.Warningf("cwLogToEvent error, discarded event : %s", err) } else { cfg.logger.Debugf("pushing message : %s", evt.Line.Raw) + if cw.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"group": cfg.GroupName, "stream": cfg.StreamName}).Inc() } outChan <- evt } } + return true }, ) if err != nil { newerr := fmt.Errorf("while reading %s/%s: %w", cfg.GroupName, cfg.StreamName, err) cfg.logger.Warningf("err : %s", newerr) + return newerr } cfg.logger.Tracef("done reading GetLogEventsPagesWithContext") diff --git a/pkg/acquisition/modules/docker/docker.go b/pkg/acquisition/modules/docker/docker.go index b27255ec13f..582da3d53a1 100644 --- a/pkg/acquisition/modules/docker/docker.go +++ b/pkg/acquisition/modules/docker/docker.go @@ -12,6 +12,7 @@ import ( "time" dockerTypes "github.com/docker/docker/api/types" + dockerContainer "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" @@ -56,7 +57,7 @@ type DockerSource struct { logger *log.Entry Client client.CommonAPIClient t *tomb.Tomb - containerLogsOptions *dockerTypes.ContainerLogsOptions + containerLogsOptions *dockerContainer.LogsOptions } type ContainerConfig struct { @@ -104,6 +105,7 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { if d.Config.Mode == "" { d.Config.Mode = configuration.TAIL_MODE } + if d.Config.Mode != configuration.CAT_MODE && d.Config.Mode != configuration.TAIL_MODE { return fmt.Errorf("unsupported mode %s for docker datasource", d.Config.Mode) } @@ -120,7 +122,7 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { d.Config.Since = time.Now().UTC().Format(time.RFC3339) } - d.containerLogsOptions = &dockerTypes.ContainerLogsOptions{ + d.containerLogsOptions = &dockerContainer.LogsOptions{ ShowStdout: d.Config.FollowStdout, ShowStderr: d.Config.FollowStdErr, Follow: true, @@ -134,9 +136,10 @@ func (d *DockerSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { d.logger = logger - d.metricsLevel = MetricsLevel + d.metricsLevel = metricsLevel + err := d.UnmarshalConfig(yamlConfig) if err != nil { return err @@ -146,18 +149,19 @@ func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe d.logger.Tracef("Actual DockerAcquisition configuration %+v", d.Config) - dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return err + opts := []client.Opt{ + client.FromEnv, + client.WithAPIVersionNegotiation(), } if d.Config.DockerHost != "" { - err = client.WithHost(d.Config.DockerHost)(dockerClient) - if err != nil { - return err - } + opts = append(opts, client.WithHost(d.Config.DockerHost)) + } + + d.Client, err = client.NewClientWithOpts(opts...) + if err != nil { + return err } - d.Client = dockerClient _, err = d.Client.Info(context.Background()) if err != nil { @@ -170,7 +174,12 @@ func (d *DockerSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLe func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { var err error - if !strings.HasPrefix(dsn, d.GetName()+"://") { + parsedURL, err := url.Parse(dsn) + if err != nil { + return fmt.Errorf("failed to parse DSN %s: %w", dsn, err) + } + + if parsedURL.Scheme != d.GetName() { return fmt.Errorf("invalid DSN %s for docker source, must start with %s://", dsn, d.GetName()) } @@ -187,40 +196,28 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg d.logger = logger d.Config.Labels = labels - dockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return err + opts := []client.Opt{ + client.FromEnv, + client.WithAPIVersionNegotiation(), } - d.containerLogsOptions = &dockerTypes.ContainerLogsOptions{ + d.containerLogsOptions = &dockerContainer.LogsOptions{ ShowStdout: d.Config.FollowStdout, ShowStderr: d.Config.FollowStdErr, Follow: false, } - dsn = strings.TrimPrefix(dsn, d.GetName()+"://") - args := strings.Split(dsn, "?") - if len(args) == 0 { - return fmt.Errorf("invalid dsn: %s", dsn) - } + containerNameOrID := parsedURL.Host - if len(args) == 1 && args[0] == "" { + if containerNameOrID == "" { return fmt.Errorf("empty %s DSN", d.GetName()+"://") } - d.Config.ContainerName = append(d.Config.ContainerName, args[0]) + + d.Config.ContainerName = append(d.Config.ContainerName, containerNameOrID) // we add it as an ID also so user can provide docker name or docker ID - d.Config.ContainerID = append(d.Config.ContainerID, args[0]) + d.Config.ContainerID = append(d.Config.ContainerID, containerNameOrID) - // no parameters - if len(args) == 1 { - d.Client = dockerClient - return nil - } - - parameters, err := url.ParseQuery(args[1]) - if err != nil { - return fmt.Errorf("while parsing parameters %s: %w", dsn, err) - } + parameters := parsedURL.Query() for k, v := range parameters { switch k { @@ -267,12 +264,15 @@ func (d *DockerSource) ConfigureByDSN(dsn string, labels map[string]string, logg if len(v) != 1 { return errors.New("only one 'docker_host' parameters is required, not many") } - if err := client.WithHost(v[0])(dockerClient); err != nil { - return err - } + opts = append(opts, client.WithHost(v[0])) } } - d.Client = dockerClient + + d.Client, err = client.NewClientWithOpts(opts...) + if err != nil { + return err + } + return nil } @@ -288,33 +288,42 @@ func (d *DockerSource) SupportedModes() []string { // OneShotAcquisition reads a set of file and returns when done func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { d.logger.Debug("In oneshot") - runningContainer, err := d.Client.ContainerList(ctx, dockerTypes.ContainerListOptions{}) + + runningContainers, err := d.Client.ContainerList(ctx, dockerContainer.ListOptions{}) if err != nil { return err } + foundOne := false - for _, container := range runningContainer { + + for _, container := range runningContainers { if _, ok := d.runningContainerState[container.ID]; ok { d.logger.Debugf("container with id %s is already being read from", container.ID) continue } + if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { d.logger.Infof("reading logs from container %s", containerConfig.Name) d.logger.Debugf("logs options: %+v", *d.containerLogsOptions) + dockerReader, err := d.Client.ContainerLogs(ctx, containerConfig.ID, *d.containerLogsOptions) if err != nil { d.logger.Errorf("unable to read logs from container: %+v", err) return err } + // we use this library to normalize docker API logs (cf. https://ahmet.im/blog/docker-logs-api-binary-format-explained/) foundOne = true + var scanner *bufio.Scanner + if containerConfig.Tty { scanner = bufio.NewScanner(dockerReader) } else { reader := dlog.NewReader(dockerReader) scanner = bufio.NewScanner(reader) } + for scanner.Scan() { select { case <-t.Dying(): @@ -324,6 +333,7 @@ func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Ev if line == "" { continue } + l := types.Line{} l.Raw = line l.Labels = d.Config.Labels @@ -331,9 +341,11 @@ func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Ev l.Src = containerConfig.Name l.Process = true l.Module = d.GetName() + if d.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": containerConfig.Name}).Inc() } + evt := types.MakeEvent(true, types.LOG, true) evt.Line = l evt.Process = true @@ -342,10 +354,12 @@ func (d *DockerSource) OneShotAcquisition(ctx context.Context, out chan types.Ev d.logger.Debugf("Sent line to parsing: %+v", evt.Line.Raw) } } + err = scanner.Err() if err != nil { d.logger.Errorf("Got error from docker read: %s", err) } + d.runningContainerState[container.ID] = containerConfig } } @@ -380,6 +394,7 @@ func (d *DockerSource) getContainerTTY(ctx context.Context, containerId string) if err != nil { return false } + return containerDetails.Config.Tty } @@ -388,6 +403,7 @@ func (d *DockerSource) getContainerLabels(ctx context.Context, containerId strin if err != nil { return map[string]interface{}{} } + return parseLabels(containerDetails.Config.Labels) } @@ -403,6 +419,7 @@ func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes. if strings.HasPrefix(name, "/") && name != "" { name = name[1:] } + if name == containerName { return &ContainerConfig{ID: container.ID, Name: name, Labels: d.Config.Labels, Tty: d.getContainerTTY(ctx, container.ID)} } @@ -429,38 +446,49 @@ func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes. d.logger.Tracef("container has no 'crowdsec' labels set, ignoring container: %s", container.ID) return nil } + if _, ok := parsedLabels["enable"]; !ok { d.logger.Errorf("container has 'crowdsec' labels set but no 'crowdsec.enable' key found") return nil } + enable, ok := parsedLabels["enable"].(string) if !ok { d.logger.Error("container has 'crowdsec.enable' label set but it's not a string") return nil } + if strings.ToLower(enable) != "true" { d.logger.Debugf("container has 'crowdsec.enable' label not set to true ignoring container: %s", container.ID) return nil } + if _, ok = parsedLabels["labels"]; !ok { d.logger.Error("container has 'crowdsec.enable' label set to true but no 'labels' keys found") return nil } + labelsTypeCast, ok := parsedLabels["labels"].(map[string]interface{}) if !ok { d.logger.Error("container has 'crowdsec.enable' label set to true but 'labels' is not a map") return nil } + d.logger.Debugf("container labels %+v", labelsTypeCast) + labels := make(map[string]string) + for k, v := range labelsTypeCast { if v, ok := v.(string); ok { log.Debugf("label %s is a string with value %s", k, v) labels[k] = v + continue } + d.logger.Errorf("label %s is not a string", k) } + return &ContainerConfig{ID: container.ID, Name: container.Names[0], Labels: labels, Tty: d.getContainerTTY(ctx, container.ID)} } @@ -470,6 +498,7 @@ func (d *DockerSource) EvalContainer(ctx context.Context, container dockerTypes. func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *ContainerConfig, deleteChan chan *ContainerConfig) error { ticker := time.NewTicker(d.CheckIntervalDuration) d.logger.Infof("Container watcher started, interval: %s", d.CheckIntervalDuration.String()) + for { select { case <-d.t.Dying(): @@ -478,32 +507,37 @@ func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *Conta case <-ticker.C: // to track for garbage collection runningContainersID := make(map[string]bool) - runningContainer, err := d.Client.ContainerList(ctx, dockerTypes.ContainerListOptions{}) + + runningContainers, err := d.Client.ContainerList(ctx, dockerContainer.ListOptions{}) if err != nil { if strings.Contains(strings.ToLower(err.Error()), "cannot connect to the docker daemon at") { for idx, container := range d.runningContainerState { if d.runningContainerState[idx].t.Alive() { d.logger.Infof("killing tail for container %s", container.Name) d.runningContainerState[idx].t.Kill(nil) + if err := d.runningContainerState[idx].t.Wait(); err != nil { d.logger.Infof("error while waiting for death of %s : %s", container.Name, err) } } + delete(d.runningContainerState, idx) } } else { log.Errorf("container list err: %s", err) } + continue } - for _, container := range runningContainer { + for _, container := range runningContainers { runningContainersID[container.ID] = true // don't need to re eval an already monitored container if _, ok := d.runningContainerState[container.ID]; ok { continue } + if containerConfig := d.EvalContainer(ctx, container); containerConfig != nil { monitChan <- containerConfig } @@ -514,6 +548,7 @@ func (d *DockerSource) WatchContainer(ctx context.Context, monitChan chan *Conta deleteChan <- containerConfig } } + d.logger.Tracef("Reading logs from %d containers", len(d.runningContainerState)) ticker.Reset(d.CheckIntervalDuration) @@ -525,7 +560,9 @@ func (d *DockerSource) StreamingAcquisition(ctx context.Context, out chan types. d.t = t monitChan := make(chan *ContainerConfig) deleteChan := make(chan *ContainerConfig) + d.logger.Infof("Starting docker acquisition") + t.Go(func() error { return d.DockerManager(ctx, monitChan, deleteChan, out) }) @@ -546,6 +583,7 @@ func ReadTailScanner(scanner *bufio.Scanner, out chan string, t *tomb.Tomb) erro func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfig, outChan chan types.Event, deleteChan chan *ContainerConfig) error { container.logger.Infof("start tail for container %s", container.Name) + dockerReader, err := d.Client.ContainerLogs(ctx, container.ID, *d.containerLogsOptions) if err != nil { container.logger.Errorf("unable to read logs from container: %+v", err) @@ -560,11 +598,13 @@ func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfi reader := dlog.NewReader(dockerReader) scanner = bufio.NewScanner(reader) } + readerChan := make(chan string) readerTomb := &tomb.Tomb{} readerTomb.Go(func() error { return ReadTailScanner(scanner, readerChan, readerTomb) }) + for { select { case <-container.t.Dying(): @@ -595,6 +635,7 @@ func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfi // Also reset the Since to avoid re-reading logs d.Config.Since = time.Now().UTC().Format(time.RFC3339) d.containerLogsOptions.Since = d.Config.Since + return nil } } @@ -602,6 +643,7 @@ func (d *DockerSource) TailDocker(ctx context.Context, container *ContainerConfi func (d *DockerSource) DockerManager(ctx context.Context, in chan *ContainerConfig, deleteChan chan *ContainerConfig, outChan chan types.Event) error { d.logger.Info("DockerSource Manager started") + for { select { case newContainer := <-in: @@ -611,6 +653,7 @@ func (d *DockerSource) DockerManager(ctx context.Context, in chan *ContainerConf newContainer.t.Go(func() error { return d.TailDocker(ctx, newContainer, outChan, deleteChan) }) + d.runningContainerState[newContainer.ID] = newContainer } case containerToDelete := <-deleteChan: @@ -624,13 +667,16 @@ func (d *DockerSource) DockerManager(ctx context.Context, in chan *ContainerConf if d.runningContainerState[idx].t.Alive() { d.logger.Infof("killing tail for container %s", container.Name) d.runningContainerState[idx].t.Kill(nil) + if err := d.runningContainerState[idx].t.Wait(); err != nil { d.logger.Infof("error while waiting for death of %s : %s", container.Name, err) } } } + d.runningContainerState = nil d.logger.Debugf("routine cleanup done, return") + return nil } } diff --git a/pkg/acquisition/modules/docker/docker_test.go b/pkg/acquisition/modules/docker/docker_test.go index 5d8208637e8..73e26b1e497 100644 --- a/pkg/acquisition/modules/docker/docker_test.go +++ b/pkg/acquisition/modules/docker/docker_test.go @@ -82,6 +82,11 @@ func TestConfigureDSN(t *testing.T) { }{ { name: "invalid DSN", + dsn: "asdfasdf", + expectedErr: "invalid DSN asdfasdf for docker source, must start with docker://", + }, + { + name: "invalid DSN scheme", dsn: "asd://", expectedErr: "invalid DSN asd:// for docker source, must start with docker://", }, @@ -102,16 +107,18 @@ func TestConfigureDSN(t *testing.T) { }, { name: "DSN ok with multiple parameters", - dsn: fmt.Sprintf("docker://test_docker?since=42min&docker_host=%s", dockerHost), + dsn: "docker://test_docker?since=42min&docker_host=" + dockerHost, expectedErr: "", }, } subLogger := log.WithField("type", "docker") for _, test := range tests { - f := DockerSource{} - err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger, "") - cstest.AssertErrorContains(t, err, test.expectedErr) + t.Run(test.name, func(t *testing.T) { + f := DockerSource{} + err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger, "") + cstest.AssertErrorContains(t, err, test.expectedErr) + }) } } @@ -121,6 +128,7 @@ type mockDockerCli struct { func TestStreamingAcquisition(t *testing.T) { ctx := context.Background() + log.SetOutput(os.Stdout) log.SetLevel(log.InfoLevel) log.Info("Test 'TestStreamingAcquisition'") @@ -191,6 +199,7 @@ container_name_regexp: readerTomb.Go(func() error { time.Sleep(1 * time.Second) ticker := time.NewTicker(1 * time.Second) + for { select { case <-out: @@ -205,7 +214,7 @@ container_name_regexp: }) cstest.AssertErrorContains(t, err, ts.expectedErr) - if err := readerTomb.Wait(); err != nil { + if err = readerTomb.Wait(); err != nil { t.Fatal(err) } @@ -220,7 +229,7 @@ container_name_regexp: } } -func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes.ContainerListOptions) ([]dockerTypes.Container, error) { +func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerContainer.ListOptions) ([]dockerTypes.Container, error) { if readLogs { return []dockerTypes.Container{}, nil } @@ -235,7 +244,7 @@ func (cli *mockDockerCli) ContainerList(ctx context.Context, options dockerTypes return containers, nil } -func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, options dockerTypes.ContainerLogsOptions) (io.ReadCloser, error) { +func (cli *mockDockerCli) ContainerLogs(ctx context.Context, container string, options dockerContainer.LogsOptions) (io.ReadCloser, error) { if readLogs { return io.NopCloser(strings.NewReader("")), nil } @@ -298,38 +307,40 @@ func TestOneShot(t *testing.T) { } for _, ts := range tests { - var ( - subLogger *log.Entry - logger *log.Logger - ) - - if ts.expectedOutput != "" { - logger.SetLevel(ts.logLevel) - subLogger = logger.WithField("type", "docker") - } else { - log.SetLevel(ts.logLevel) - subLogger = log.WithField("type", "docker") - } + t.Run(ts.dsn, func(t *testing.T) { + var ( + subLogger *log.Entry + logger *log.Logger + ) + + if ts.expectedOutput != "" { + logger.SetLevel(ts.logLevel) + subLogger = logger.WithField("type", "docker") + } else { + log.SetLevel(ts.logLevel) + subLogger = log.WithField("type", "docker") + } - readLogs = false - dockerClient := &DockerSource{} - labels := make(map[string]string) - labels["type"] = ts.logType + readLogs = false + dockerClient := &DockerSource{} + labels := make(map[string]string) + labels["type"] = ts.logType - if err := dockerClient.ConfigureByDSN(ts.dsn, labels, subLogger, ""); err != nil { - t.Fatalf("unable to configure dsn '%s': %s", ts.dsn, err) - } + if err := dockerClient.ConfigureByDSN(ts.dsn, labels, subLogger, ""); err != nil { + t.Fatalf("unable to configure dsn '%s': %s", ts.dsn, err) + } - dockerClient.Client = new(mockDockerCli) - out := make(chan types.Event, 100) - tomb := tomb.Tomb{} - err := dockerClient.OneShotAcquisition(ctx, out, &tomb) - cstest.AssertErrorContains(t, err, ts.expectedErr) + dockerClient.Client = new(mockDockerCli) + out := make(chan types.Event, 100) + tomb := tomb.Tomb{} + err := dockerClient.OneShotAcquisition(ctx, out, &tomb) + cstest.AssertErrorContains(t, err, ts.expectedErr) - // else we do the check before actualLines is incremented ... - if ts.expectedLines != 0 { - assert.Len(t, out, ts.expectedLines) - } + // else we do the check before actualLines is incremented ... + if ts.expectedLines != 0 { + assert.Len(t, out, ts.expectedLines) + } + }) } } diff --git a/pkg/acquisition/modules/file/file.go b/pkg/acquisition/modules/file/file.go index 9f439b0c82e..697a3d35dc2 100644 --- a/pkg/acquisition/modules/file/file.go +++ b/pkg/acquisition/modules/file/file.go @@ -102,9 +102,9 @@ func (f *FileSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (f *FileSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { f.logger = logger - f.metricsLevel = MetricsLevel + f.metricsLevel = metricsLevel err := f.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/acquisition/modules/file/file_test.go b/pkg/acquisition/modules/file/file_test.go index a26e44cc9c7..b9c6e65d8ce 100644 --- a/pkg/acquisition/modules/file/file_test.go +++ b/pkg/acquisition/modules/file/file_test.go @@ -333,14 +333,19 @@ force_inotify: true`, testPattern), logLevel: log.DebugLevel, name: "GlobInotifyChmod", afterConfigure: func() { - f, _ := os.Create("test_files/a.log") - f.Close() + f, err := os.Create("test_files/a.log") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) time.Sleep(1 * time.Second) - os.Chmod("test_files/a.log", 0o000) + err = os.Chmod("test_files/a.log", 0o000) + require.NoError(t, err) }, teardown: func() { - os.Chmod("test_files/a.log", 0o644) - os.Remove("test_files/a.log") + err := os.Chmod("test_files/a.log", 0o644) + require.NoError(t, err) + err = os.Remove("test_files/a.log") + require.NoError(t, err) }, }, { @@ -353,7 +358,8 @@ force_inotify: true`, testPattern), logLevel: log.DebugLevel, name: "InotifyMkDir", afterConfigure: func() { - os.Mkdir("test_files/pouet/", 0o700) + err := os.Mkdir("test_files/pouet/", 0o700) + require.NoError(t, err) }, teardown: func() { os.Remove("test_files/pouet/") diff --git a/pkg/acquisition/modules/http/http.go b/pkg/acquisition/modules/http/http.go index 98af134c84e..97e220570ff 100644 --- a/pkg/acquisition/modules/http/http.go +++ b/pkg/acquisition/modules/http/http.go @@ -16,7 +16,6 @@ import ( "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" "gopkg.in/yaml.v3" @@ -26,9 +25,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var ( - dataSourceName = "http" -) +var dataSourceName = "http" var linesRead = prometheus.NewCounterVec( prometheus.CounterOpts{ @@ -38,8 +35,8 @@ var linesRead = prometheus.NewCounterVec( []string{"path", "src"}) type HttpConfiguration struct { - //IPFilter []string `yaml:"ip_filter"` - //ChunkSize *int64 `yaml:"chunk_size"` + // IPFilter []string `yaml:"ip_filter"` + // ChunkSize *int64 `yaml:"chunk_size"` ListenAddr string `yaml:"listen_addr"` Path string `yaml:"path"` AuthType string `yaml:"auth_type"` @@ -78,6 +75,7 @@ func (h *HTTPSource) GetUuid() string { func (h *HTTPSource) UnmarshalConfig(yamlConfig []byte) error { h.Config = HttpConfiguration{} + err := yaml.Unmarshal(yamlConfig, &h.Config) if err != nil { return fmt.Errorf("cannot parse %s datasource configuration: %w", dataSourceName, err) @@ -98,6 +96,7 @@ func (hc *HttpConfiguration) Validate() error { if hc.Path == "" { hc.Path = "/" } + if hc.Path[0] != '/' { return errors.New("path must start with /") } @@ -108,9 +107,11 @@ func (hc *HttpConfiguration) Validate() error { if hc.BasicAuth == nil { return errors.New(baseErr + " basic_auth is not provided") } + if hc.BasicAuth.Username == "" { return errors.New(baseErr + " username is not provided") } + if hc.BasicAuth.Password == "" { return errors.New(baseErr + " password is not provided") } @@ -130,6 +131,7 @@ func (hc *HttpConfiguration) Validate() error { if hc.TLS.ServerCert == "" { return errors.New("server_cert is required") } + if hc.TLS.ServerKey == "" { return errors.New("server_key is required") } @@ -155,9 +157,10 @@ func (hc *HttpConfiguration) Validate() error { return nil } -func (h *HTTPSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (h *HTTPSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { h.logger = logger - h.metricsLevel = MetricsLevel + h.metricsLevel = metricsLevel + err := h.UnmarshalConfig(yamlConfig) if err != nil { return err @@ -212,6 +215,7 @@ func (hc *HttpConfiguration) NewTLSConfig() (*tls.Config, error) { if err != nil { return nil, fmt.Errorf("failed to load server cert/key: %w", err) } + tlsConfig.Certificates = []tls.Certificate{cert} } @@ -229,6 +233,7 @@ func (hc *HttpConfiguration) NewTLSConfig() (*tls.Config, error) { if caCertPool == nil { caCertPool = x509.NewCertPool() } + caCertPool.AppendCertsFromPEM(caCert) tlsConfig.ClientCAs = caCertPool tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert @@ -243,10 +248,12 @@ func authorizeRequest(r *http.Request, hc *HttpConfiguration) error { if !ok { return errors.New("missing basic auth") } + if username != hc.BasicAuth.Username || password != hc.BasicAuth.Password { return errors.New("invalid basic auth") } } + if hc.AuthType == "headers" { for key, value := range *hc.Headers { if r.Header.Get(key) != value { @@ -254,6 +261,7 @@ func authorizeRequest(r *http.Request, hc *HttpConfiguration) error { } } } + return nil } @@ -282,6 +290,7 @@ func (h *HTTPSource) processRequest(w http.ResponseWriter, r *http.Request, hc * } decoder := json.NewDecoder(reader) + for { var message json.RawMessage @@ -289,7 +298,9 @@ func (h *HTTPSource) processRequest(w http.ResponseWriter, r *http.Request, hc * if err == io.EOF { break } + w.WriteHeader(http.StatusBadRequest) + return fmt.Errorf("failed to decode: %w", err) } @@ -328,13 +339,17 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { if r.Method != http.MethodPost { h.logger.Errorf("method not allowed: %s", r.Method) http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return } + if err := authorizeRequest(r, &h.Config); err != nil { h.logger.Errorf("failed to authorize request from '%s': %s", r.RemoteAddr, err) http.Error(w, "Unauthorized", http.StatusUnauthorized) + return } + err := h.processRequest(w, r, &h.Config, out) if err != nil { h.logger.Errorf("failed to process request from '%s': %s", r.RemoteAddr, err) @@ -346,6 +361,7 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { w.Header().Set(key, value) } } + if h.Config.CustomStatusCode != nil { w.WriteHeader(*h.Config.CustomStatusCode) } else { @@ -369,25 +385,30 @@ func (h *HTTPSource) RunServer(out chan types.Event, t *tomb.Tomb) error { if err != nil { return fmt.Errorf("failed to create tls config: %w", err) } + h.logger.Tracef("tls config: %+v", tlsConfig) h.Server.TLSConfig = tlsConfig } t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/http/server") + if h.Config.TLS != nil { h.logger.Infof("start https server on %s", h.Config.ListenAddr) + err := h.Server.ListenAndServeTLS(h.Config.TLS.ServerCert, h.Config.TLS.ServerKey) if err != nil && err != http.ErrServerClosed { return fmt.Errorf("https server failed: %w", err) } } else { h.logger.Infof("start http server on %s", h.Config.ListenAddr) + err := h.Server.ListenAndServe() if err != nil && err != http.ErrServerClosed { return fmt.Errorf("http server failed: %w", err) } } + return nil }) diff --git a/pkg/acquisition/modules/http/http_test.go b/pkg/acquisition/modules/http/http_test.go index f89ba7aa8ba..b05979c5adf 100644 --- a/pkg/acquisition/modules/http/http_test.go +++ b/pkg/acquisition/modules/http/http_test.go @@ -14,13 +14,15 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/cstest" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" - "gopkg.in/tomb.v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/types" ) const ( @@ -218,7 +220,7 @@ func TestGetName(t *testing.T) { assert.Equal(t, "http", h.GetName()) } -func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLevel int) (chan types.Event, *tomb.Tomb) { +func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLevel int) (chan types.Event, *prometheus.Registry, *tomb.Tomb) { ctx := context.Background() subLogger := log.WithFields(log.Fields{ "type": "http", @@ -230,16 +232,18 @@ func SetupAndRunHTTPSource(t *testing.T, h *HTTPSource, config []byte, metricLev err = h.StreamingAcquisition(ctx, out, &tomb) require.NoError(t, err) + testRegistry := prometheus.NewPedanticRegistry() for _, metric := range h.GetMetrics() { - prometheus.Register(metric) + err = testRegistry.Register(metric) + require.NoError(t, err) } - return out, &tomb + return out, testRegistry, &tomb } func TestStreamingAcquisitionWrongHTTPMethod(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -256,13 +260,13 @@ basic_auth: h.Server.Close() tomb.Kill(nil) - tomb.Wait() - + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionUnknownPath(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -279,12 +283,13 @@ basic_auth: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionBasicAuth(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -311,12 +316,13 @@ basic_auth: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionBadHeaders(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -338,12 +344,13 @@ headers: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionMaxBodySize(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -366,12 +373,13 @@ max_body_size: 5`), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionSuccess(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -397,16 +405,17 @@ headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 1) + assertMetrics(t, reg, h.GetMetrics(), 1) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionCustomStatusCodeAndCustomHeaders(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -436,11 +445,12 @@ custom_headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 1) + assertMetrics(t, reg, h.GetMetrics(), 1) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } type slowReader struct { @@ -496,7 +506,7 @@ func assertEvents(out chan types.Event, expected []string, errChan chan error) { func TestStreamingAcquisitionTimeout(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -526,12 +536,13 @@ timeout: 1s`), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionTLSHTTPRequest(t *testing.T) { h := &HTTPSource{} - _, tomb := SetupAndRunHTTPSource(t, h, []byte(` + _, _, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 auth_type: mtls @@ -550,12 +561,13 @@ tls: h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionTLSWithHeadersAuthSuccess(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -601,16 +613,17 @@ tls: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 0) + assertMetrics(t, reg, h.GetMetrics(), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionMTLS(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -658,16 +671,17 @@ tls: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 0) + assertMetrics(t, reg, h.GetMetrics(), 0) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionGzipData(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -710,16 +724,17 @@ headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 2) + assertMetrics(t, reg, h.GetMetrics(), 2) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } func TestStreamingAcquisitionNDJson(t *testing.T) { h := &HTTPSource{} - out, tomb := SetupAndRunHTTPSource(t, h, []byte(` + out, reg, tomb := SetupAndRunHTTPSource(t, h, []byte(` source: http listen_addr: 127.0.0.1:8080 path: /test @@ -748,15 +763,16 @@ headers: err = <-errChan require.NoError(t, err) - assertMetrics(t, h.GetMetrics(), 2) + assertMetrics(t, reg, h.GetMetrics(), 2) h.Server.Close() tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } -func assertMetrics(t *testing.T, metrics []prometheus.Collector, expected int) { - promMetrics, err := prometheus.DefaultGatherer.Gather() +func assertMetrics(t *testing.T, reg *prometheus.Registry, metrics []prometheus.Collector, expected int) { + promMetrics, err := reg.Gather() require.NoError(t, err) isExist := false diff --git a/pkg/acquisition/modules/journalctl/journalctl.go b/pkg/acquisition/modules/journalctl/journalctl.go index 27f20b9f446..f72878d9b3c 100644 --- a/pkg/acquisition/modules/journalctl/journalctl.go +++ b/pkg/acquisition/modules/journalctl/journalctl.go @@ -53,15 +53,18 @@ func readLine(scanner *bufio.Scanner, out chan string, errChan chan error) error txt := scanner.Text() out <- txt } + if errChan != nil && scanner.Err() != nil { errChan <- scanner.Err() close(errChan) // the error is already consumed by runJournalCtl return nil //nolint:nilerr } + if errChan != nil { close(errChan) } + return nil } @@ -69,15 +72,17 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve ctx, cancel := context.WithCancel(ctx) cmd := exec.CommandContext(ctx, journalctlCmd, j.args...) + stdout, err := cmd.StdoutPipe() if err != nil { cancel() - return fmt.Errorf("could not get journalctl stdout: %s", err) + return fmt.Errorf("could not get journalctl stdout: %w", err) } + stderr, err := cmd.StderrPipe() if err != nil { cancel() - return fmt.Errorf("could not get journalctl stderr: %s", err) + return fmt.Errorf("could not get journalctl stderr: %w", err) } stderrChan := make(chan string) @@ -87,6 +92,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve logger := j.logger.WithField("src", j.src) logger.Infof("Running journalctl command: %s %s", cmd.Path, cmd.Args) + err = cmd.Start() if err != nil { cancel() @@ -109,9 +115,11 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve cmd.Wait() return errors.New("failed to create stderr scanner") } + t.Go(func() error { return readLine(stdoutscanner, stdoutChan, errChan) }) + t.Go(func() error { // looks like journalctl closes stderr quite early, so ignore its status (but not its output) return readLine(stderrScanner, stderrChan, nil) @@ -123,6 +131,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve logger.Infof("journalctl datasource %s stopping", j.src) cancel() cmd.Wait() // avoid zombie process + return nil case stdoutLine := <-stdoutChan: l := types.Line{} @@ -133,6 +142,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve l.Src = j.src l.Process = true l.Module = j.GetName() + if j.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"source": j.src}).Inc() } @@ -149,6 +159,7 @@ func (j *JournalCtlSource) runJournalCtl(ctx context.Context, out chan types.Eve logger.Debugf("errChan is closed, quitting") t.Kill(nil) } + if errScanner != nil { t.Kill(errScanner) } @@ -170,6 +181,7 @@ func (j *JournalCtlSource) GetAggregMetrics() []prometheus.Collector { func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error { j.config = JournalCtlConfiguration{} + err := yaml.UnmarshalStrict(yamlConfig, &j.config) if err != nil { return fmt.Errorf("cannot parse JournalCtlSource configuration: %w", err) @@ -189,15 +201,18 @@ func (j *JournalCtlSource) UnmarshalConfig(yamlConfig []byte) error { if len(j.config.Filters) == 0 { return errors.New("journalctl_filter is required") } - j.args = append(args, j.config.Filters...) - j.src = fmt.Sprintf("journalctl-%s", strings.Join(j.config.Filters, ".")) + + args = append(args, j.config.Filters...) + + j.args = args + j.src = "journalctl-%s" + strings.Join(j.config.Filters, ".") return nil } -func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (j *JournalCtlSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { j.logger = logger - j.metricsLevel = MetricsLevel + j.metricsLevel = metricsLevel err := j.UnmarshalConfig(yamlConfig) if err != nil { @@ -226,8 +241,9 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, params, err := url.ParseQuery(qs) if err != nil { - return fmt.Errorf("could not parse journalctl DSN : %s", err) + return fmt.Errorf("could not parse journalctl DSN: %w", err) } + for key, value := range params { switch key { case "filters": @@ -236,10 +252,12 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, if len(value) != 1 { return errors.New("expected zero or one value for 'log_level'") } + lvl, err := log.ParseLevel(value[0]) if err != nil { return fmt.Errorf("unknown level %s: %w", value[0], err) } + j.logger.Logger.SetLevel(lvl) case "since": j.args = append(j.args, "--since", value[0]) @@ -247,7 +265,9 @@ func (j *JournalCtlSource) ConfigureByDSN(dsn string, labels map[string]string, return fmt.Errorf("unsupported key %s in journalctl DSN", key) } } + j.args = append(j.args, j.config.Filters...) + return nil } @@ -261,8 +281,10 @@ func (j *JournalCtlSource) GetName() string { func (j *JournalCtlSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { defer trace.CatchPanic("crowdsec/acquis/journalctl/oneshot") + err := j.runJournalCtl(ctx, out, t) j.logger.Debug("Oneshot journalctl acquisition is done") + return err } @@ -271,6 +293,7 @@ func (j *JournalCtlSource) StreamingAcquisition(ctx context.Context, out chan ty defer trace.CatchPanic("crowdsec/acquis/journalctl/streaming") return j.runJournalCtl(ctx, out, t) }) + return nil } diff --git a/pkg/acquisition/modules/journalctl/journalctl_test.go b/pkg/acquisition/modules/journalctl/journalctl_test.go index 687067c1881..48b034f41c6 100644 --- a/pkg/acquisition/modules/journalctl/journalctl_test.go +++ b/pkg/acquisition/modules/journalctl/journalctl_test.go @@ -12,6 +12,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -81,7 +82,7 @@ func TestConfigureDSN(t *testing.T) { }, { dsn: "journalctl://filters=%ZZ", - expectedErr: "could not parse journalctl DSN : invalid URL escape \"%ZZ\"", + expectedErr: "could not parse journalctl DSN: invalid URL escape \"%ZZ\"", }, { dsn: "journalctl://filters=_UID=42?log_level=warn", @@ -191,6 +192,7 @@ journalctl_filter: func TestStreaming(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } @@ -267,10 +269,11 @@ journalctl_filter: } tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) output, _ := exec.Command("pgrep", "-x", "journalctl").CombinedOutput() - if string(output) != "" { + if len(output) != 0 { t.Fatalf("Found a journalctl process after killing the tomb !") } diff --git a/pkg/acquisition/modules/kafka/kafka.go b/pkg/acquisition/modules/kafka/kafka.go index 77fc44e310d..f213b85814c 100644 --- a/pkg/acquisition/modules/kafka/kafka.go +++ b/pkg/acquisition/modules/kafka/kafka.go @@ -85,9 +85,9 @@ func (k *KafkaSource) UnmarshalConfig(yamlConfig []byte) error { return err } -func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (k *KafkaSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { k.logger = logger - k.metricsLevel = MetricsLevel + k.metricsLevel = metricsLevel k.logger.Debugf("start configuring %s source", dataSourceName) @@ -160,6 +160,7 @@ func (k *KafkaSource) ReadMessage(ctx context.Context, out chan types.Event) err k.logger.Errorln(fmt.Errorf("while reading %s message: %w", dataSourceName, err)) continue } + k.logger.Tracef("got message: %s", string(m.Value)) l := types.Line{ Raw: string(m.Value), @@ -170,9 +171,11 @@ func (k *KafkaSource) ReadMessage(ctx context.Context, out chan types.Event) err Module: k.GetName(), } k.logger.Tracef("line with message read from topic '%s': %+v", k.Config.Topic, l) + if k.metricsLevel != configuration.METRICS_NONE { linesRead.With(prometheus.Labels{"topic": k.Config.Topic}).Inc() } + evt := types.MakeEvent(k.Config.UseTimeMachine, types.LOG, true) evt.Line = l out <- evt diff --git a/pkg/acquisition/modules/kafka/kafka_test.go b/pkg/acquisition/modules/kafka/kafka_test.go index d796166a6ca..2f3361c4f6b 100644 --- a/pkg/acquisition/modules/kafka/kafka_test.go +++ b/pkg/acquisition/modules/kafka/kafka_test.go @@ -194,7 +194,8 @@ topic: crowdsecplaintext`), subLogger, configuration.METRICS_NONE) } require.Equal(t, ts.expectedLines, actualLines) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) }) } } @@ -271,7 +272,8 @@ tls: } require.Equal(t, ts.expectedLines, actualLines) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) }) } } diff --git a/pkg/acquisition/modules/kinesis/kinesis.go b/pkg/acquisition/modules/kinesis/kinesis.go index 3744e43f38d..16c91ad06bc 100644 --- a/pkg/acquisition/modules/kinesis/kinesis.go +++ b/pkg/acquisition/modules/kinesis/kinesis.go @@ -99,17 +99,22 @@ func (k *KinesisSource) newClient() error { if sess == nil { return errors.New("failed to create aws session") } + config := aws.NewConfig() + if k.Config.AwsRegion != "" { config = config.WithRegion(k.Config.AwsRegion) } + if k.Config.AwsEndpoint != "" { config = config.WithEndpoint(k.Config.AwsEndpoint) } + k.kClient = kinesis.New(sess, config) if k.kClient == nil { return errors.New("failed to create kinesis client") } + return nil } @@ -136,15 +141,19 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { if k.Config.StreamName == "" && !k.Config.UseEnhancedFanOut { return errors.New("stream_name is mandatory when use_enhanced_fanout is false") } + if k.Config.StreamARN == "" && k.Config.UseEnhancedFanOut { return errors.New("stream_arn is mandatory when use_enhanced_fanout is true") } + if k.Config.ConsumerName == "" && k.Config.UseEnhancedFanOut { return errors.New("consumer_name is mandatory when use_enhanced_fanout is true") } + if k.Config.StreamARN != "" && k.Config.StreamName != "" { return errors.New("stream_arn and stream_name are mutually exclusive") } + if k.Config.MaxRetries <= 0 { k.Config.MaxRetries = 10 } @@ -152,9 +161,9 @@ func (k *KinesisSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { k.logger = logger - k.metricsLevel = MetricsLevel + k.metricsLevel = metricsLevel err := k.UnmarshalConfig(yamlConfig) if err != nil { @@ -167,6 +176,7 @@ func (k *KinesisSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsL } k.shardReaderTomb = &tomb.Tomb{} + return nil } @@ -188,22 +198,27 @@ func (k *KinesisSource) OneShotAcquisition(_ context.Context, _ chan types.Event func (k *KinesisSource) decodeFromSubscription(record []byte) ([]CloudwatchSubscriptionLogEvent, error) { b := bytes.NewBuffer(record) + r, err := gzip.NewReader(b) if err != nil { k.logger.Error(err) return nil, err } + decompressed, err := io.ReadAll(r) if err != nil { k.logger.Error(err) return nil, err } + var subscriptionRecord CloudWatchSubscriptionRecord + err = json.Unmarshal(decompressed, &subscriptionRecord) if err != nil { k.logger.Error(err) return nil, err } + return subscriptionRecord.LogEvents, nil } @@ -214,17 +229,20 @@ func (k *KinesisSource) WaitForConsumerDeregistration(consumerName string, strea ConsumerName: aws.String(consumerName), StreamARN: aws.String(streamARN), }) + + var resourceNotFoundErr *kinesis.ResourceNotFoundException + if errors.As(err, &resourceNotFoundErr) { + return nil + } + if err != nil { - switch err.(type) { - case *kinesis.ResourceNotFoundException: - return nil - default: - k.logger.Errorf("Error while waiting for consumer deregistration: %s", err) - return fmt.Errorf("cannot describe stream consumer: %w", err) - } + k.logger.Errorf("Error while waiting for consumer deregistration: %s", err) + return fmt.Errorf("cannot describe stream consumer: %w", err) } + time.Sleep(time.Millisecond * 200 * time.Duration(i+1)) } + return fmt.Errorf("consumer %s is not deregistered after %d tries", consumerName, maxTries) } @@ -234,17 +252,21 @@ func (k *KinesisSource) DeregisterConsumer() error { ConsumerName: aws.String(k.Config.ConsumerName), StreamARN: aws.String(k.Config.StreamARN), }) + + var resourceNotFoundErr *kinesis.ResourceNotFoundException + if errors.As(err, &resourceNotFoundErr) { + return nil + } + if err != nil { - switch err.(type) { - case *kinesis.ResourceNotFoundException: - default: - return fmt.Errorf("cannot deregister stream consumer: %w", err) - } + return fmt.Errorf("cannot deregister stream consumer: %w", err) } + err = k.WaitForConsumerDeregistration(k.Config.ConsumerName, k.Config.StreamARN) if err != nil { return fmt.Errorf("cannot wait for consumer deregistration: %w", err) } + return nil } @@ -257,18 +279,22 @@ func (k *KinesisSource) WaitForConsumerRegistration(consumerARN string) error { if err != nil { return fmt.Errorf("cannot describe stream consumer: %w", err) } + if *describeOutput.ConsumerDescription.ConsumerStatus == "ACTIVE" { k.logger.Debugf("Consumer %s is active", consumerARN) return nil } + time.Sleep(time.Millisecond * 200 * time.Duration(i+1)) k.logger.Debugf("Waiting for consumer registration %d", i) } + return fmt.Errorf("consumer %s is not active after %d tries", consumerARN, maxTries) } func (k *KinesisSource) RegisterConsumer() (*kinesis.RegisterStreamConsumerOutput, error) { k.logger.Debugf("Registering consumer %s", k.Config.ConsumerName) + streamConsumer, err := k.kClient.RegisterStreamConsumer(&kinesis.RegisterStreamConsumerInput{ ConsumerName: aws.String(k.Config.ConsumerName), StreamARN: aws.String(k.Config.StreamARN), @@ -276,10 +302,12 @@ func (k *KinesisSource) RegisterConsumer() (*kinesis.RegisterStreamConsumerOutpu if err != nil { return nil, fmt.Errorf("cannot register stream consumer: %w", err) } + err = k.WaitForConsumerRegistration(*streamConsumer.Consumer.ConsumerARN) if err != nil { return nil, fmt.Errorf("timeout while waiting for consumer to be active: %w", err) } + return streamConsumer, nil } @@ -296,8 +324,12 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan linesRead.With(prometheus.Labels{"stream": k.Config.StreamName}).Inc() } } - var data []CloudwatchSubscriptionLogEvent - var err error + + var ( + data []CloudwatchSubscriptionLogEvent + err error + ) + if k.Config.FromSubscription { // The AWS docs says that the data is base64 encoded // but apparently GetRecords decodes it for us ? @@ -309,19 +341,22 @@ func (k *KinesisSource) ParseAndPushRecords(records []*kinesis.Record, out chan } else { data = []CloudwatchSubscriptionLogEvent{{Message: string(record.Data)}} } + for _, event := range data { logger.Tracef("got record %s", event.Message) + l := types.Line{} l.Raw = event.Message l.Labels = k.Config.Labels l.Time = time.Now().UTC() l.Process = true l.Module = k.GetName() - if k.Config.StreamARN != "" { - l.Src = k.Config.StreamARN - } else { + + l.Src = k.Config.StreamARN + if l.Src == "" { l.Src = k.Config.StreamName } + evt := types.MakeEvent(k.Config.UseTimeMachine, types.LOG, true) evt.Line = l out <- evt @@ -335,20 +370,23 @@ func (k *KinesisSource) ReadFromSubscription(reader kinesis.SubscribeToShardEven // and we won't be able to start a new one if this is the first one started by the tomb // TODO: look into parent shards to see if a shard is closed before starting to read it ? time.Sleep(time.Second) + for { select { case <-k.shardReaderTomb.Dying(): logger.Infof("Subscribed shard reader is dying") - err := reader.Close() - if err != nil { + + if err := reader.Close(); err != nil { return fmt.Errorf("cannot close kinesis subscribed shard reader: %w", err) } + return nil case event, ok := <-reader.Events(): if !ok { logger.Infof("Event chan has been closed") return nil } + switch event := event.(type) { case *kinesis.SubscribeToShardEvent: k.ParseAndPushRecords(event.Records, out, logger, shardId) @@ -369,6 +407,7 @@ func (k *KinesisSource) SubscribeToShards(arn arn.ARN, streamConsumer *kinesis.R for _, shard := range shards.Shards { shardId := *shard.ShardId + r, err := k.kClient.SubscribeToShard(&kinesis.SubscribeToShardInput{ ShardId: aws.String(shardId), StartingPosition: &kinesis.StartingPosition{Type: aws.String(kinesis.ShardIteratorTypeLatest)}, @@ -377,10 +416,12 @@ func (k *KinesisSource) SubscribeToShards(arn arn.ARN, streamConsumer *kinesis.R if err != nil { return fmt.Errorf("cannot subscribe to shard: %w", err) } + k.shardReaderTomb.Go(func() error { return k.ReadFromSubscription(r.GetEventStream().Reader, out, shardId, arn.Resource[7:]) }) } + return nil } @@ -389,12 +430,14 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { if err != nil { return fmt.Errorf("cannot parse stream ARN: %w", err) } + if !strings.HasPrefix(parsedARN.Resource, "stream/") { return fmt.Errorf("resource part of stream ARN %s does not start with stream/", k.Config.StreamARN) } k.logger = k.logger.WithField("stream", parsedARN.Resource[7:]) k.logger.Info("starting kinesis acquisition with enhanced fan-out") + err = k.DeregisterConsumer() if err != nil { return fmt.Errorf("cannot deregister consumer: %w", err) @@ -417,18 +460,22 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { k.logger.Infof("Kinesis source is dying") k.shardReaderTomb.Kill(nil) _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves + err = k.DeregisterConsumer() if err != nil { return fmt.Errorf("cannot deregister consumer: %w", err) } + return nil case <-k.shardReaderTomb.Dying(): k.logger.Debugf("Kinesis subscribed shard reader is dying") + if k.shardReaderTomb.Err() != nil { return k.shardReaderTomb.Err() } // All goroutines have exited without error, so a resharding event, start again k.logger.Debugf("All reader goroutines have exited, resharding event or periodic resubscribe") + continue } } @@ -437,6 +484,7 @@ func (k *KinesisSource) EnhancedRead(out chan types.Event, t *tomb.Tomb) error { func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) error { logger := k.logger.WithField("shard", shardId) logger.Debugf("Starting to read shard") + sharIt, err := k.kClient.GetShardIterator(&kinesis.GetShardIteratorInput{ ShardId: aws.String(shardId), StreamName: &k.Config.StreamName, @@ -446,28 +494,35 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro logger.Errorf("Cannot get shard iterator: %s", err) return fmt.Errorf("cannot get shard iterator: %w", err) } + it := sharIt.ShardIterator // AWS recommends to wait for a second between calls to GetRecords for a given shard ticker := time.NewTicker(time.Second) + for { select { case <-ticker.C: records, err := k.kClient.GetRecords(&kinesis.GetRecordsInput{ShardIterator: it}) it = records.NextShardIterator + + var throughputErr *kinesis.ProvisionedThroughputExceededException + if errors.As(err, &throughputErr) { + logger.Warn("Provisioned throughput exceeded") + // TODO: implement exponential backoff + continue + } + + var expiredIteratorErr *kinesis.ExpiredIteratorException + if errors.As(err, &expiredIteratorErr) { + logger.Warn("Expired iterator") + continue + } + if err != nil { - switch err.(type) { - case *kinesis.ProvisionedThroughputExceededException: - logger.Warn("Provisioned throughput exceeded") - // TODO: implement exponential backoff - continue - case *kinesis.ExpiredIteratorException: - logger.Warn("Expired iterator") - continue - default: - logger.Error("Cannot get records") - return fmt.Errorf("cannot get records: %w", err) - } + logger.Error("Cannot get records") + return fmt.Errorf("cannot get records: %w", err) } + k.ParseAndPushRecords(records.Records, out, logger, shardId) if it == nil { @@ -477,6 +532,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro case <-k.shardReaderTomb.Dying(): logger.Infof("shardReaderTomb is dying, exiting ReadFromShard") ticker.Stop() + return nil } } @@ -485,6 +541,7 @@ func (k *KinesisSource) ReadFromShard(out chan types.Event, shardId string) erro func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error { k.logger = k.logger.WithField("stream", k.Config.StreamName) k.logger.Info("starting kinesis acquisition from shards") + for { shards, err := k.kClient.ListShards(&kinesis.ListShardsInput{ StreamName: aws.String(k.Config.StreamName), @@ -492,9 +549,12 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error if err != nil { return fmt.Errorf("cannot list shards: %w", err) } + k.shardReaderTomb = &tomb.Tomb{} + for _, shard := range shards.Shards { shardId := *shard.ShardId + k.shardReaderTomb.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming/shard") return k.ReadFromShard(out, shardId) @@ -505,6 +565,7 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error k.logger.Info("kinesis source is dying") k.shardReaderTomb.Kill(nil) _ = k.shardReaderTomb.Wait() // we don't care about the error as we kill the tomb ourselves + return nil case <-k.shardReaderTomb.Dying(): reason := k.shardReaderTomb.Err() @@ -512,7 +573,9 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error k.logger.Errorf("Unexpected error from shard reader : %s", reason) return reason } + k.logger.Infof("All shards have been closed, probably a resharding event, restarting acquisition") + continue } } @@ -521,11 +584,14 @@ func (k *KinesisSource) ReadFromStream(out chan types.Event, t *tomb.Tomb) error func (k *KinesisSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/kinesis/streaming") + if k.Config.UseEnhancedFanOut { return k.EnhancedRead(out, t) } + return k.ReadFromStream(out, t) }) + return nil } diff --git a/pkg/acquisition/modules/kinesis/kinesis_test.go b/pkg/acquisition/modules/kinesis/kinesis_test.go index 027cbde9240..3f6d780b192 100644 --- a/pkg/acquisition/modules/kinesis/kinesis_test.go +++ b/pkg/acquisition/modules/kinesis/kinesis_test.go @@ -9,6 +9,7 @@ import ( "net" "os" "runtime" + "strconv" "strings" "testing" "time" @@ -18,6 +19,7 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -28,17 +30,20 @@ import ( func getLocalStackEndpoint() (string, error) { endpoint := "http://localhost:4566" + if v := os.Getenv("AWS_ENDPOINT_FORCE"); v != "" { v = strings.TrimPrefix(v, "http://") + _, err := net.Dial("tcp", v) if err != nil { return "", fmt.Errorf("while dialing %s: %w: aws endpoint isn't available", v, err) } } + return endpoint, nil } -func GenSubObject(i int) []byte { +func GenSubObject(t *testing.T, i int) []byte { r := CloudWatchSubscriptionRecord{ MessageType: "subscription", Owner: "test", @@ -48,51 +53,51 @@ func GenSubObject(i int) []byte { LogEvents: []CloudwatchSubscriptionLogEvent{ { ID: "testid", - Message: fmt.Sprintf("%d", i), + Message: strconv.Itoa(i), Timestamp: time.Now().UTC().Unix(), }, }, } body, err := json.Marshal(r) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) + var b bytes.Buffer gz := gzip.NewWriter(&b) - gz.Write(body) + _, err = gz.Write(body) + require.NoError(t, err) gz.Close() // AWS actually base64 encodes the data, but it looks like kinesis automatically decodes it at some point // localstack does not do it, so let's just write a raw gzipped stream return b.Bytes() } -func WriteToStream(streamName string, count int, shards int, sub bool) { +func WriteToStream(t *testing.T, streamName string, count int, shards int, sub bool) { endpoint, err := getLocalStackEndpoint() - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) + sess := session.Must(session.NewSession()) kinesisClient := kinesis.New(sess, aws.NewConfig().WithEndpoint(endpoint).WithRegion("us-east-1")) + for i := range count { partition := "partition" if shards != 1 { partition = fmt.Sprintf("partition-%d", i%shards) } + var data []byte + if sub { - data = GenSubObject(i) + data = GenSubObject(t, i) } else { - data = []byte(fmt.Sprintf("%d", i)) + data = []byte(strconv.Itoa(i)) } + _, err = kinesisClient.PutRecord(&kinesis.PutRecordInput{ Data: data, PartitionKey: aws.String(partition), StreamName: aws.String(streamName), }) - if err != nil { - fmt.Printf("Error writing to stream: %s\n", err) - log.Fatal(err) - } + require.NoError(t, err) } } @@ -111,6 +116,7 @@ func TestBadConfiguration(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string expectedErr string @@ -142,6 +148,7 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, } subLogger := log.WithField("type", "kinesis") + for _, test := range tests { f := KinesisSource{} err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) @@ -151,9 +158,11 @@ stream_arn: arn:aws:kinesis:eu-west-1:123456789012:stream/my-stream`, func TestReadFromStream(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string count int @@ -169,36 +178,39 @@ stream_name: stream-1-shard`, }, } endpoint, _ := getLocalStackEndpoint() + for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) + tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(ctx, out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(f.Config.StreamName, test.count, test.shards, false) + WriteToStream(t, f.Config.StreamName, test.count, test.shards, false) + for i := range test.count { e := <-out - assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) + assert.Equal(t, strconv.Itoa(i), e.Line.Raw) } + tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } } func TestReadFromMultipleShards(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string count int @@ -214,38 +226,40 @@ stream_name: stream-2-shards`, }, } endpoint, _ := getLocalStackEndpoint() + for _, test := range tests { f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(ctx, out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(f.Config.StreamName, test.count, test.shards, false) + WriteToStream(t, f.Config.StreamName, test.count, test.shards, false) + c := 0 + for range test.count { <-out c += 1 } assert.Equal(t, test.count, c) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } } func TestFromSubscription(t *testing.T) { ctx := context.Background() + if runtime.GOOS == "windows" { t.Skip("Skipping test on windows") } + tests := []struct { config string count int @@ -266,24 +280,21 @@ from_subscription: true`, f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis"), configuration.METRICS_NONE) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(ctx, out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) // Allow the datasource to start listening to the stream time.Sleep(4 * time.Second) - WriteToStream(f.Config.StreamName, test.count, test.shards, true) + WriteToStream(t, f.Config.StreamName, test.count, test.shards, true) for i := range test.count { e := <-out assert.Equal(t, fmt.Sprintf("%d", i), e.Line.Raw) } tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) } } @@ -310,15 +321,11 @@ use_enhanced_fanout: true`, f := KinesisSource{} config := fmt.Sprintf(test.config, endpoint) err := f.Configure([]byte(config), log.WithField("type", "kinesis")) - if err != nil { - t.Fatalf("Error configuring source: %s", err) - } + require.NoError(t, err) tomb := &tomb.Tomb{} out := make(chan types.Event) err = f.StreamingAcquisition(out, tomb) - if err != nil { - t.Fatalf("Error starting source: %s", err) - } + require.NoError(t, err) //Allow the datasource to start listening to the stream time.Sleep(10 * time.Second) WriteToStream("stream-1-shard", test.count, test.shards) diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go index 1fa6c894a32..b0650d3906e 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit.go @@ -66,6 +66,7 @@ func (ka *KubernetesAuditSource) GetAggregMetrics() []prometheus.Collector { func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { k8sConfig := KubernetesAuditConfiguration{} + err := yaml.UnmarshalStrict(yamlConfig, &k8sConfig) if err != nil { return fmt.Errorf("cannot parse k8s-audit configuration: %w", err) @@ -92,12 +93,13 @@ func (ka *KubernetesAuditSource) UnmarshalConfig(yamlConfig []byte) error { if ka.config.Mode == "" { ka.config.Mode = configuration.TAIL_MODE } + return nil } -func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, MetricsLevel int) error { +func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, metricsLevel int) error { ka.logger = logger - ka.metricsLevel = MetricsLevel + ka.metricsLevel = metricsLevel err := ka.UnmarshalConfig(config) if err != nil { @@ -116,6 +118,7 @@ func (ka *KubernetesAuditSource) Configure(config []byte, logger *log.Entry, Met } ka.mux.HandleFunc(ka.config.WebhookPath, ka.webhookHandler) + return nil } @@ -137,6 +140,7 @@ func (ka *KubernetesAuditSource) OneShotAcquisition(_ context.Context, _ chan ty func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { ka.outChan = out + t.Go(func() error { defer trace.CatchPanic("crowdsec/acquis/k8s-audit/live") ka.logger.Infof("Starting k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) @@ -145,13 +149,18 @@ func (ka *KubernetesAuditSource) StreamingAcquisition(ctx context.Context, out c if err != nil && err != http.ErrServerClosed { return fmt.Errorf("k8s-audit server failed: %w", err) } + return nil }) <-t.Dying() ka.logger.Infof("Stopping k8s-audit server on %s:%d%s", ka.config.ListenAddr, ka.config.ListenPort, ka.config.WebhookPath) - ka.server.Shutdown(ctx) + if err := ka.server.Shutdown(ctx); err != nil { + ka.logger.Errorf("Error shutting down k8s-audit server: %s", err.Error()) + } + return nil }) + return nil } @@ -167,42 +176,52 @@ func (ka *KubernetesAuditSource) webhookHandler(w http.ResponseWriter, r *http.R if ka.metricsLevel != configuration.METRICS_NONE { requestCount.WithLabelValues(ka.addr).Inc() } + if r.Method != http.MethodPost { w.WriteHeader(http.StatusMethodNotAllowed) return } + ka.logger.Tracef("webhookHandler called") + var auditEvents audit.EventList jsonBody, err := io.ReadAll(r.Body) if err != nil { ka.logger.Errorf("Error reading request body: %v", err) w.WriteHeader(http.StatusInternalServerError) + return } + ka.logger.Tracef("webhookHandler receveid: %s", string(jsonBody)) + err = json.Unmarshal(jsonBody, &auditEvents) if err != nil { ka.logger.Errorf("Error decoding audit events: %s", err) w.WriteHeader(http.StatusInternalServerError) + return } remoteIP := strings.Split(r.RemoteAddr, ":")[0] - for _, auditEvent := range auditEvents.Items { + + for idx := range auditEvents.Items { if ka.metricsLevel != configuration.METRICS_NONE { eventCount.WithLabelValues(ka.addr).Inc() } - bytesEvent, err := json.Marshal(auditEvent) + + bytesEvent, err := json.Marshal(auditEvents.Items[idx]) if err != nil { ka.logger.Errorf("Error serializing audit event: %s", err) continue } + ka.logger.Tracef("Got audit event: %s", string(bytesEvent)) l := types.Line{ Raw: string(bytesEvent), Labels: ka.config.Labels, - Time: auditEvent.StageTimestamp.Time, + Time: auditEvents.Items[idx].StageTimestamp.Time, Src: remoteIP, Process: true, Module: ka.GetName(), diff --git a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go index a086a756e4a..bf8a8cea02c 100644 --- a/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go +++ b/pkg/acquisition/modules/kubernetesaudit/k8s_audit_test.go @@ -85,7 +85,8 @@ webhook_path: /k8s-audit`, err = f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) require.NoError(t, err) - f.StreamingAcquisition(ctx, out, tb) + err = f.StreamingAcquisition(ctx, out, tb) + require.NoError(t, err) time.Sleep(1 * time.Second) tb.Kill(nil) @@ -260,7 +261,8 @@ webhook_path: /k8s-audit`, req := httptest.NewRequest(test.method, "/k8s-audit", strings.NewReader(test.body)) w := httptest.NewRecorder() - f.StreamingAcquisition(ctx, out, tb) + err = f.StreamingAcquisition(ctx, out, tb) + require.NoError(t, err) f.webhookHandler(w, req) diff --git a/pkg/acquisition/modules/loki/loki.go b/pkg/acquisition/modules/loki/loki.go index c57e6a67c94..47493d8cdfe 100644 --- a/pkg/acquisition/modules/loki/loki.go +++ b/pkg/acquisition/modules/loki/loki.go @@ -120,10 +120,10 @@ func (l *LokiSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (l *LokiSource) Configure(config []byte, logger *log.Entry, MetricsLevel int) error { +func (l *LokiSource) Configure(config []byte, logger *log.Entry, metricsLevel int) error { l.Config = LokiConfiguration{} l.logger = logger - l.metricsLevel = MetricsLevel + l.metricsLevel = metricsLevel err := l.UnmarshalConfig(config) if err != nil { return err diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go index 66d842ed519..04c7053ef27 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc3164/parse.go @@ -48,7 +48,6 @@ func WithStrictHostname() RFC3164Option { } func (r *RFC3164) parsePRI() error { - pri := 0 if r.buf[r.position] != '<' { diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go index 639e91e1224..c9aa89f7256 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse.go @@ -48,7 +48,6 @@ func WithStrictHostname() RFC5424Option { } func (r *RFC5424) parsePRI() error { - pri := 0 if r.buf[r.position] != '<' { @@ -94,7 +93,6 @@ func (r *RFC5424) parseVersion() error { } func (r *RFC5424) parseTimestamp() error { - timestamp := []byte{} if r.buf[r.position] == NIL_VALUE { @@ -121,7 +119,6 @@ func (r *RFC5424) parseTimestamp() error { } date, err := time.Parse(VALID_TIMESTAMP, string(timestamp)) - if err != nil { return errors.New("timestamp is not valid") } diff --git a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go index 0938e947fe7..d3a68c196db 100644 --- a/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go +++ b/pkg/acquisition/modules/syslog/internal/parser/rfc5424/parse_test.go @@ -94,7 +94,8 @@ func TestParse(t *testing.T) { }{ { "valid msg", - `<13>1 2021-05-18T11:58:40.828081+02:42 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{ + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis sshd 49340 - [timeQuality isSynced="0" tzKnown="1"] blabla`, + expected{ Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), Hostname: "mantis", Tag: "sshd", @@ -102,11 +103,14 @@ func TestParse(t *testing.T) { MsgID: "", Message: "blabla", PRI: 13, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with msgid", - `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{ + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, + expected{ Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), Hostname: "mantis", Tag: "foobar", @@ -114,11 +118,14 @@ func TestParse(t *testing.T) { MsgID: "123123", Message: "blabla", PRI: 13, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with repeating SD", - `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"][foo="bar][a] blabla`, expected{ + `<13>1 2021-05-18T11:58:40.828081+02:42 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"][foo="bar][a] blabla`, + expected{ Timestamp: time.Date(2021, 5, 18, 11, 58, 40, 828081000, time.FixedZone("+0242", 9720)), Hostname: "mantis", Tag: "foobar", @@ -126,36 +133,53 @@ func TestParse(t *testing.T) { MsgID: "123123", Message: "blabla", PRI: 13, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "invalid SD", - `<13>1 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality asd`, expected{}, "structured data must end with ']'", []RFC5424Option{}, + `<13>1 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality asd`, + expected{}, + "structured data must end with ']'", + []RFC5424Option{}, }, { "invalid version", - `<13>42 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, expected{}, "version must be 1", []RFC5424Option{}, + `<13>42 2021-05-18T11:58:40.828081+02:00 mantis foobar 49340 123123 [timeQuality isSynced="0" tzKnown="1"] blabla`, + expected{}, + "version must be 1", + []RFC5424Option{}, }, { "invalid message", - `<13>1`, expected{}, "version must be followed by a space", []RFC5424Option{}, + `<13>1`, + expected{}, + "version must be followed by a space", + []RFC5424Option{}, }, { "valid msg with empty fields", - `<13>1 - foo - - - - blabla`, expected{ + `<13>1 - foo - - - - blabla`, + expected{ Timestamp: time.Now().UTC(), Hostname: "foo", PRI: 13, Message: "blabla", - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with empty fields", - `<13>1 - - - - - - blabla`, expected{ + `<13>1 - - - - - - blabla`, + expected{ Timestamp: time.Now().UTC(), PRI: 13, Message: "blabla", - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid msg with escaped SD", @@ -167,7 +191,9 @@ func TestParse(t *testing.T) { Hostname: "testhostname", MsgID: `sn="msgid"`, Message: `testmessage`, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "valid complex msg", @@ -179,7 +205,9 @@ func TestParse(t *testing.T) { PRI: 13, MsgID: `sn="msgid"`, Message: `source: sn="www.foobar.com" | message: 1.1.1.1 - - [24/May/2022:10:57:37 +0200] "GET /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js HTTP/2.0" 304 0 "https://www.foobar.com/sw.js" "Mozilla/5.0 (Linux; Android 9; ANE-LX1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.61 Mobile Safari/537.36" "-" "www.foobar.com" sn="www.foobar.com" rt=0.000 ua="-" us="-" ut="-" ul="-" cs=HIT { request: /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js | src_ip_geo_country: DE | MONTH: May | COMMONAPACHELOG: 1.1.1.1 - - [24/May/2022:10:57:37 +0200] "GET /dist/precache-manifest.58b57debe6bc4f96698da0dc314461e9.js HTTP/2.0" 304 0 | auth: - | HOUR: 10 | gl2_remote_ip: 172.31.32.142 | ident: - | gl2_remote_port: 43375 | BASE10NUM: [2.0, 304, 0] | pid: -1 | program: nginx | gl2_source_input: 623ed3440183476d61cff974 | INT: +0200 | is_private_ip: false | YEAR: 2022 | src_ip_geo_city: Achern | clientip: 1.1.1.1 | USERNAME:`, - }, "", []RFC5424Option{}, + }, + "", + []RFC5424Option{}, }, { "partial message", diff --git a/pkg/acquisition/modules/syslog/internal/server/syslogserver.go b/pkg/acquisition/modules/syslog/internal/server/syslogserver.go index 7118c295b54..83f5e5a57e5 100644 --- a/pkg/acquisition/modules/syslog/internal/server/syslogserver.go +++ b/pkg/acquisition/modules/syslog/internal/server/syslogserver.go @@ -25,7 +25,6 @@ type SyslogMessage struct { } func (s *SyslogServer) Listen(listenAddr string, port int) error { - s.listenAddr = listenAddr s.port = port udpAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", s.listenAddr, s.port)) diff --git a/pkg/acquisition/modules/syslog/syslog.go b/pkg/acquisition/modules/syslog/syslog.go index fb6a04600c1..df805d08cae 100644 --- a/pkg/acquisition/modules/syslog/syslog.go +++ b/pkg/acquisition/modules/syslog/syslog.go @@ -124,10 +124,10 @@ func (s *SyslogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (s *SyslogSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { s.logger = logger s.logger.Infof("Starting syslog datasource configuration") - s.metricsLevel = MetricsLevel + s.metricsLevel = metricsLevel err := s.UnmarshalConfig(yamlConfig) if err != nil { return err diff --git a/pkg/acquisition/modules/syslog/syslog_test.go b/pkg/acquisition/modules/syslog/syslog_test.go index 57fa3e8747b..3008ba5507b 100644 --- a/pkg/acquisition/modules/syslog/syslog_test.go +++ b/pkg/acquisition/modules/syslog/syslog_test.go @@ -10,6 +10,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/tomb.v2" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -168,7 +169,8 @@ listen_addr: 127.0.0.1`, } assert.Equal(t, ts.expectedLines, actualLines) tomb.Kill(nil) - tomb.Wait() + err = tomb.Wait() + require.NoError(t, err) }) } } diff --git a/pkg/acquisition/modules/victorialogs/internal/vlclient/types.go b/pkg/acquisition/modules/victorialogs/internal/vlclient/types.go new file mode 100644 index 00000000000..167a84e41b1 --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/internal/vlclient/types.go @@ -0,0 +1,12 @@ +package vlclient + +import ( + "time" +) + +// Log represents a VictoriaLogs log line +// See: https://docs.victoriametrics.com/victorialogs/querying/#querying-logs +type Log struct { + Message string `json:"_msg"` + Time time.Time `json:"_time"` +} diff --git a/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go b/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go new file mode 100644 index 00000000000..402754a1307 --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/internal/vlclient/vl_client.go @@ -0,0 +1,405 @@ +package vlclient + +import ( + "bufio" + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" + + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/crowdsec/pkg/apiclient/useragent" +) + +type VLClient struct { + Logger *log.Entry + + config Config + t *tomb.Tomb + failStart time.Time + currentTickerInterval time.Duration + requestHeaders map[string]string + + client *http.Client +} + +type Config struct { + URL string + Prefix string + Query string + Headers map[string]string + + Username string + Password string + + Since time.Duration + + FailMaxDuration time.Duration + + Limit int +} + +func updateURI(uri string, newStart time.Time) string { + u, _ := url.Parse(uri) + queryParams := u.Query() + + if !newStart.IsZero() { + // +1 the last timestamp to avoid getting the same result again. + updatedStart := newStart.Add(1 * time.Nanosecond) + queryParams.Set("start", updatedStart.Format(time.RFC3339Nano)) + } + + u.RawQuery = queryParams.Encode() + + return u.String() +} + +func (lc *VLClient) SetTomb(t *tomb.Tomb) { + lc.t = t +} + +func (lc *VLClient) shouldRetry() bool { + if lc.failStart.IsZero() { + lc.Logger.Warningf("VictoriaLogs is not available, will retry for %s", lc.config.FailMaxDuration) + lc.failStart = time.Now() + + return true + } + + if time.Since(lc.failStart) > lc.config.FailMaxDuration { + lc.Logger.Errorf("VictoriaLogs didn't manage to recover after %s, giving up", lc.config.FailMaxDuration) + return false + } + + return true +} + +func (lc *VLClient) increaseTicker(ticker *time.Ticker) { + maxTicker := 10 * time.Second + if lc.currentTickerInterval < maxTicker { + lc.currentTickerInterval *= 2 + if lc.currentTickerInterval > maxTicker { + lc.currentTickerInterval = maxTicker + } + + ticker.Reset(lc.currentTickerInterval) + } +} + +func (lc *VLClient) decreaseTicker(ticker *time.Ticker) { + minTicker := 100 * time.Millisecond + if lc.currentTickerInterval != minTicker { + lc.currentTickerInterval = minTicker + ticker.Reset(lc.currentTickerInterval) + } +} + +func (lc *VLClient) doQueryRange(ctx context.Context, uri string, c chan *Log, infinite bool) error { + lc.currentTickerInterval = 100 * time.Millisecond + ticker := time.NewTicker(lc.currentTickerInterval) + + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-lc.t.Dying(): + return lc.t.Err() + case <-ticker.C: + resp, err := lc.Get(ctx, uri) + if err != nil { + if ok := lc.shouldRetry(); !ok { + return fmt.Errorf("error querying range: %w", err) + } + + lc.increaseTicker(ticker) + + continue + } + + if resp.StatusCode != http.StatusOK { + lc.Logger.Warnf("bad HTTP response code for query range: %d", resp.StatusCode) + body, _ := io.ReadAll(resp.Body) + resp.Body.Close() + + if ok := lc.shouldRetry(); !ok { + return fmt.Errorf("bad HTTP response code: %d: %s: %w", resp.StatusCode, string(body), err) + } + + lc.increaseTicker(ticker) + + continue + } + + n, largestTime, err := lc.readResponse(ctx, resp, c) + if err != nil { + return err + } + + if !infinite && n < lc.config.Limit { + lc.Logger.Infof("Got less than %d results (%d), stopping", lc.config.Limit, n) + close(c) + + return nil + } + + lc.Logger.Debugf("(timer:%v) %d results (uri:%s)", lc.currentTickerInterval, n, uri) + + if infinite { + if n > 0 { + // as long as we get results, we keep lowest ticker + lc.decreaseTicker(ticker) + } else { + lc.increaseTicker(ticker) + } + } + + uri = updateURI(uri, largestTime) + } + } +} + +// Parses response from body in JSON-LD format and sends results to the channel +func (lc *VLClient) readResponse(ctx context.Context, resp *http.Response, c chan *Log) (int, time.Time, error) { + br := bufio.NewReaderSize(resp.Body, 64*1024) + + var ( + finishedReading bool + n int + latestTs time.Time + ) + + for !finishedReading { + select { + case <-ctx.Done(): + return n, latestTs, nil + default: + } + + b, err := br.ReadBytes('\n') + if err != nil { + if errors.Is(err, bufio.ErrBufferFull) { + lc.Logger.Infof("skipping line number #%d: line too long", n) + continue + } + + if errors.Is(err, io.EOF) { + // b can be != nil when EOF is returned, so we need to process it + finishedReading = true + } else if errors.Is(err, context.Canceled) { + return n, latestTs, nil + } else { + return n, latestTs, fmt.Errorf("cannot read line in response: %w", err) + } + } + + if len(b) == 0 { + continue + } + + b = bytes.Trim(b, "\n") + + var logLine Log + + if err := json.Unmarshal(b, &logLine); err != nil { + lc.Logger.Warnf("cannot unmarshal line in response: %s", string(b)) + continue + } + + n++ + + lc.Logger.Tracef("Got response: %+v", logLine) + c <- &logLine + + if logLine.Time.After(latestTs) { + latestTs = logLine.Time + } + } + + return n, latestTs, nil +} + +func (lc *VLClient) getURLFor(endpoint string, params map[string]string) string { + u, err := url.Parse(lc.config.URL) + if err != nil { + return "" + } + + queryParams := u.Query() + + for k, v := range params { + queryParams.Set(k, v) + } + + u.RawQuery = queryParams.Encode() + + u.Path, err = url.JoinPath(lc.config.Prefix, u.Path, endpoint) + if err != nil { + return "" + } + + return u.String() +} + +func (lc *VLClient) Ready(ctx context.Context) error { + tick := time.NewTicker(500 * time.Millisecond) + u := lc.getURLFor("", nil) + + for { + select { + case <-ctx.Done(): + tick.Stop() + return ctx.Err() + case <-lc.t.Dying(): + tick.Stop() + return lc.t.Err() + case <-tick.C: + lc.Logger.Debug("Checking if VictoriaLogs is ready") + + resp, err := lc.Get(ctx, u) + if err != nil { + lc.Logger.Warnf("Error checking if VictoriaLogs is ready: %s", err) + continue + } + + _ = resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + lc.Logger.Debugf("VictoriaLogs is not ready, status code: %d", resp.StatusCode) + continue + } + + lc.Logger.Info("VictoriaLogs is ready") + + return nil + } + } +} + +// Tail live-tailing for logs +// See: https://docs.victoriametrics.com/victorialogs/querying/#live-tailing +func (lc *VLClient) Tail(ctx context.Context) (chan *Log, error) { + t := time.Now().Add(-1 * lc.config.Since) + u := lc.getURLFor("select/logsql/tail", map[string]string{ + "limit": strconv.Itoa(lc.config.Limit), + "start": t.Format(time.RFC3339Nano), + "query": lc.config.Query, + }) + + lc.Logger.Debugf("Since: %s (%s)", lc.config.Since, t) + lc.Logger.Infof("Connecting to %s", u) + + var ( + resp *http.Response + err error + ) + + for { + resp, err = lc.Get(ctx, u) + lc.Logger.Tracef("Tail request done: %v | %s", resp, err) + + if err != nil { + if errors.Is(err, context.Canceled) { + return nil, nil + } + + if ok := lc.shouldRetry(); !ok { + return nil, fmt.Errorf("error tailing logs: %w", err) + } + + continue + } + + break + } + + if resp.StatusCode != http.StatusOK { + lc.Logger.Warnf("bad HTTP response code for tail request: %d", resp.StatusCode) + body, _ := io.ReadAll(resp.Body) + resp.Body.Close() + + if ok := lc.shouldRetry(); !ok { + return nil, fmt.Errorf("bad HTTP response code: %d: %s: %w", resp.StatusCode, string(body), err) + } + } + + responseChan := make(chan *Log) + + lc.t.Go(func() error { + _, _, err = lc.readResponse(ctx, resp, responseChan) + if err != nil { + return fmt.Errorf("error while reading tail response: %w", err) + } + + return nil + }) + + return responseChan, nil +} + +// QueryRange queries the logs +// See: https://docs.victoriametrics.com/victorialogs/querying/#querying-logs +func (lc *VLClient) QueryRange(ctx context.Context, infinite bool) chan *Log { + t := time.Now().Add(-1 * lc.config.Since) + u := lc.getURLFor("select/logsql/query", map[string]string{ + "query": lc.config.Query, + "start": t.Format(time.RFC3339Nano), + "limit": strconv.Itoa(lc.config.Limit), + }) + + c := make(chan *Log) + + lc.Logger.Debugf("Since: %s (%s)", lc.config.Since, t) + + lc.Logger.Infof("Connecting to %s", u) + lc.t.Go(func() error { + return lc.doQueryRange(ctx, u, c, infinite) + }) + + return c +} + +func (lc *VLClient) Get(ctx context.Context, url string) (*http.Response, error) { + request, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + for k, v := range lc.requestHeaders { + request.Header.Add(k, v) + } + + lc.Logger.Debugf("GET %s", url) + + return lc.client.Do(request) +} + +func NewVLClient(config Config) *VLClient { + headers := make(map[string]string) + for k, v := range config.Headers { + headers[k] = v + } + + if config.Username != "" || config.Password != "" { + headers["Authorization"] = "Basic " + base64.StdEncoding.EncodeToString([]byte(config.Username+":"+config.Password)) + } + + headers["User-Agent"] = useragent.Default() + + return &VLClient{ + Logger: log.WithField("component", "victorialogs-client"), + config: config, + requestHeaders: headers, + client: &http.Client{}, + } +} diff --git a/pkg/acquisition/modules/victorialogs/victorialogs.go b/pkg/acquisition/modules/victorialogs/victorialogs.go new file mode 100644 index 00000000000..c6bb3b320ba --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/victorialogs.go @@ -0,0 +1,369 @@ +package victorialogs + +import ( + "context" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + "gopkg.in/tomb.v2" + "gopkg.in/yaml.v2" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/victorialogs/internal/vlclient" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +const ( + defaultLimit int = 100 +) + +var linesRead = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "cs_victorialogssource_hits_total", + Help: "Total lines that were read.", + }, + []string{"source"}) + +type VLAuthConfiguration struct { + Username string `yaml:"username"` + Password string `yaml:"password"` +} + +type VLConfiguration struct { + URL string `yaml:"url"` // VictoriaLogs url + Prefix string `yaml:"prefix"` // VictoriaLogs prefix + Query string `yaml:"query"` // LogsQL query + Limit int `yaml:"limit"` // Limit of logs to read + Since time.Duration `yaml:"since"` + Headers map[string]string `yaml:"headers"` // HTTP headers for talking to VictoriaLogs + WaitForReady time.Duration `yaml:"wait_for_ready"` // Retry interval, default is 10 seconds + Auth VLAuthConfiguration `yaml:"auth"` + MaxFailureDuration time.Duration `yaml:"max_failure_duration"` // Max duration of failure before stopping the source + configuration.DataSourceCommonCfg `yaml:",inline"` +} + +type VLSource struct { + metricsLevel int + Config VLConfiguration + + Client *vlclient.VLClient + + logger *log.Entry +} + +func (l *VLSource) GetMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (l *VLSource) GetAggregMetrics() []prometheus.Collector { + return []prometheus.Collector{linesRead} +} + +func (l *VLSource) UnmarshalConfig(yamlConfig []byte) error { + err := yaml.UnmarshalStrict(yamlConfig, &l.Config) + if err != nil { + return fmt.Errorf("cannot parse VictoriaLogs acquisition configuration: %w", err) + } + + if l.Config.Query == "" { + return errors.New("VictoriaLogs query is mandatory") + } + + if l.Config.WaitForReady == 0 { + l.Config.WaitForReady = 10 * time.Second + } + + if l.Config.Mode == "" { + l.Config.Mode = configuration.TAIL_MODE + } + if l.Config.Prefix == "" { + l.Config.Prefix = "/" + } + + if !strings.HasSuffix(l.Config.Prefix, "/") { + l.Config.Prefix += "/" + } + + if l.Config.Limit == 0 { + l.Config.Limit = defaultLimit + } + + if l.Config.Mode == configuration.TAIL_MODE { + l.logger.Infof("Resetting since") + l.Config.Since = 0 + } + + if l.Config.MaxFailureDuration == 0 { + l.Config.MaxFailureDuration = 30 * time.Second + } + + return nil +} + +func (l *VLSource) Configure(config []byte, logger *log.Entry, metricsLevel int) error { + l.Config = VLConfiguration{} + l.logger = logger + l.metricsLevel = metricsLevel + err := l.UnmarshalConfig(config) + if err != nil { + return err + } + + l.logger.Infof("Since value: %s", l.Config.Since.String()) + + clientConfig := vlclient.Config{ + URL: l.Config.URL, + Headers: l.Config.Headers, + Limit: l.Config.Limit, + Query: l.Config.Query, + Since: l.Config.Since, + Username: l.Config.Auth.Username, + Password: l.Config.Auth.Password, + FailMaxDuration: l.Config.MaxFailureDuration, + } + + l.Client = vlclient.NewVLClient(clientConfig) + l.Client.Logger = logger.WithFields(log.Fields{"component": "victorialogs-client", "source": l.Config.URL}) + return nil +} + +func (l *VLSource) ConfigureByDSN(dsn string, labels map[string]string, logger *log.Entry, uuid string) error { + l.logger = logger + l.Config = VLConfiguration{} + l.Config.Mode = configuration.CAT_MODE + l.Config.Labels = labels + l.Config.UniqueId = uuid + + u, err := url.Parse(dsn) + if err != nil { + return fmt.Errorf("while parsing dsn '%s': %w", dsn, err) + } + if u.Scheme != "victorialogs" { + return fmt.Errorf("invalid DSN %s for VictoriaLogs source, must start with victorialogs://", dsn) + } + if u.Host == "" { + return errors.New("empty host") + } + scheme := "http" + + params := u.Query() + if q := params.Get("ssl"); q != "" { + scheme = "https" + } + if q := params.Get("query"); q != "" { + l.Config.Query = q + } + if w := params.Get("wait_for_ready"); w != "" { + l.Config.WaitForReady, err = time.ParseDuration(w) + if err != nil { + return err + } + } else { + l.Config.WaitForReady = 10 * time.Second + } + + if s := params.Get("since"); s != "" { + l.Config.Since, err = time.ParseDuration(s) + if err != nil { + return fmt.Errorf("invalid since in dsn: %w", err) + } + } + + if maxFailureDuration := params.Get("max_failure_duration"); maxFailureDuration != "" { + duration, err := time.ParseDuration(maxFailureDuration) + if err != nil { + return fmt.Errorf("invalid max_failure_duration in dsn: %w", err) + } + l.Config.MaxFailureDuration = duration + } else { + l.Config.MaxFailureDuration = 5 * time.Second // for OneShot mode it doesn't make sense to have longer duration + } + + if limit := params.Get("limit"); limit != "" { + limit, err := strconv.Atoi(limit) + if err != nil { + return fmt.Errorf("invalid limit in dsn: %w", err) + } + l.Config.Limit = limit + } + + if logLevel := params.Get("log_level"); logLevel != "" { + level, err := log.ParseLevel(logLevel) + if err != nil { + return fmt.Errorf("invalid log_level in dsn: %w", err) + } + l.Config.LogLevel = &level + l.logger.Logger.SetLevel(level) + } + + l.Config.URL = fmt.Sprintf("%s://%s", scheme, u.Host) + if u.User != nil { + l.Config.Auth.Username = u.User.Username() + l.Config.Auth.Password, _ = u.User.Password() + } + + clientConfig := vlclient.Config{ + URL: l.Config.URL, + Headers: l.Config.Headers, + Limit: l.Config.Limit, + Query: l.Config.Query, + Since: l.Config.Since, + Username: l.Config.Auth.Username, + Password: l.Config.Auth.Password, + } + + l.Client = vlclient.NewVLClient(clientConfig) + l.Client.Logger = logger.WithFields(log.Fields{"component": "victorialogs-client", "source": l.Config.URL}) + + return nil +} + +func (l *VLSource) GetMode() string { + return l.Config.Mode +} + +func (l *VLSource) GetName() string { + return "victorialogs" +} + +// OneShotAcquisition reads a set of file and returns when done +func (l *VLSource) OneShotAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + l.logger.Debug("VictoriaLogs one shot acquisition") + l.Client.SetTomb(t) + readyCtx, cancel := context.WithTimeout(ctx, l.Config.WaitForReady) + defer cancel() + err := l.Client.Ready(readyCtx) + if err != nil { + return fmt.Errorf("VictoriaLogs is not ready: %w", err) + } + + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + respChan, err := l.getResponseChan(ctx, false) + if err != nil { + return fmt.Errorf("error when starting acquisition: %w", err) + } + + for { + select { + case <-t.Dying(): + l.logger.Debug("VictoriaLogs one shot acquisition stopped") + return nil + case resp, ok := <-respChan: + if !ok { + l.logger.Info("VictoriaLogs acquisition completed") + return nil + } + l.readOneEntry(resp, l.Config.Labels, out) + } + } +} + +func (l *VLSource) readOneEntry(entry *vlclient.Log, labels map[string]string, out chan types.Event) { + ll := types.Line{} + ll.Raw = entry.Message + ll.Time = entry.Time + ll.Src = l.Config.URL + ll.Labels = labels + ll.Process = true + ll.Module = l.GetName() + + if l.metricsLevel != configuration.METRICS_NONE { + linesRead.With(prometheus.Labels{"source": l.Config.URL}).Inc() + } + expectMode := types.LIVE + if l.Config.UseTimeMachine { + expectMode = types.TIMEMACHINE + } + out <- types.Event{ + Line: ll, + Process: true, + Type: types.LOG, + ExpectMode: expectMode, + } +} + +func (l *VLSource) StreamingAcquisition(ctx context.Context, out chan types.Event, t *tomb.Tomb) error { + l.Client.SetTomb(t) + readyCtx, cancel := context.WithTimeout(ctx, l.Config.WaitForReady) + defer cancel() + err := l.Client.Ready(readyCtx) + if err != nil { + return fmt.Errorf("VictoriaLogs is not ready: %w", err) + } + + lctx, clientCancel := context.WithCancel(ctx) + //Don't defer clientCancel(), the client outlives this function call + + t.Go(func() error { + <-t.Dying() + clientCancel() + return nil + }) + + t.Go(func() error { + respChan, err := l.getResponseChan(lctx, true) + if err != nil { + clientCancel() + l.logger.Errorf("could not start VictoriaLogs tail: %s", err) + return fmt.Errorf("while starting VictoriaLogs tail: %w", err) + } + for { + select { + case resp, ok := <-respChan: + if !ok { + l.logger.Warnf("VictoriaLogs channel closed") + clientCancel() + return err + } + l.readOneEntry(resp, l.Config.Labels, out) + case <-t.Dying(): + clientCancel() + return nil + } + } + }) + return nil +} + +func (l *VLSource) getResponseChan(ctx context.Context, infinite bool) (chan *vlclient.Log, error) { + var ( + respChan chan *vlclient.Log + err error + ) + + if l.Config.Mode == configuration.TAIL_MODE { + respChan, err = l.Client.Tail(ctx) + if err != nil { + l.logger.Errorf("could not start VictoriaLogs tail: %s", err) + return respChan, fmt.Errorf("while starting VictoriaLogs tail: %w", err) + } + } else { + respChan = l.Client.QueryRange(ctx, infinite) + } + return respChan, err +} + +func (l *VLSource) CanRun() error { + return nil +} + +func (l *VLSource) GetUuid() string { + return l.Config.UniqueId +} + +func (l *VLSource) Dump() interface{} { + return l +} + +// SupportedModes returns the supported modes by the acquisition module +func (l *VLSource) SupportedModes() []string { + return []string{configuration.TAIL_MODE, configuration.CAT_MODE} +} diff --git a/pkg/acquisition/modules/victorialogs/victorialogs_test.go b/pkg/acquisition/modules/victorialogs/victorialogs_test.go new file mode 100644 index 00000000000..182b009c414 --- /dev/null +++ b/pkg/acquisition/modules/victorialogs/victorialogs_test.go @@ -0,0 +1,479 @@ +package victorialogs_test + +import ( + "bytes" + "context" + "fmt" + "io" + "math/rand" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/victorialogs" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func TestConfiguration(t *testing.T) { + log.Infof("Test 'TestConfigure'") + + tests := []struct { + config string + expectedErr string + password string + waitForReady time.Duration + testName string + }{ + { + config: `foobar: asd`, + expectedErr: "line 1: field foobar not found in type victorialogs.VLConfiguration", + testName: "Unknown field", + }, + { + config: ` +mode: tail +source: victorialogs`, + expectedErr: "query is mandatory", + testName: "Missing url", + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +`, + expectedErr: "query is mandatory", + testName: "Missing query", + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +query: > + {server="demo"} +`, + expectedErr: "", + testName: "Correct config", + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +wait_for_ready: 5s +query: > + {server="demo"} +`, + expectedErr: "", + testName: "Correct config with wait_for_ready", + waitForReady: 5 * time.Second, + }, + { + config: ` +mode: tail +source: victorialogs +url: http://localhost:9428/ +auth: + username: foo + password: bar +query: > + {server="demo"} +`, + expectedErr: "", + password: "bar", + testName: "Correct config with password", + }, + } + subLogger := log.WithField("type", "victorialogs") + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + vlSource := victorialogs.VLSource{} + err := vlSource.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) + cstest.AssertErrorContains(t, err, test.expectedErr) + + if test.password != "" { + p := vlSource.Config.Auth.Password + if test.password != p { + t.Fatalf("Password mismatch : %s != %s", test.password, p) + } + } + + if test.waitForReady != 0 { + if vlSource.Config.WaitForReady != test.waitForReady { + t.Fatalf("Wrong WaitForReady %v != %v", vlSource.Config.WaitForReady, test.waitForReady) + } + } + }) + } +} + +func TestConfigureDSN(t *testing.T) { + log.Infof("Test 'TestConfigureDSN'") + + tests := []struct { + name string + dsn string + expectedErr string + since time.Time + password string + scheme string + waitForReady time.Duration + }{ + { + name: "Wrong scheme", + dsn: "wrong://", + expectedErr: "invalid DSN wrong:// for VictoriaLogs source, must start with victorialogs://", + }, + { + name: "Correct DSN", + dsn: `victorialogs://localhost:9428/?query={server="demo"}`, + expectedErr: "", + }, + { + name: "Empty host", + dsn: "victorialogs://", + expectedErr: "empty host", + }, + { + name: "Invalid DSN", + dsn: "victorialogs", + expectedErr: "invalid DSN victorialogs for VictoriaLogs source, must start with victorialogs://", + }, + { + name: "Bad since param", + dsn: `victorialogs://127.0.0.1:9428/?since=3h&query={server="demo"}`, + since: time.Now().Add(-3 * time.Hour), + }, + { + name: "Basic Auth", + dsn: `victorialogs://login:password@localhost:3102/?query={server="demo"}`, + password: "password", + }, + { + name: "Correct DSN", + dsn: `victorialogs://localhost:9428/?query={server="demo"}&wait_for_ready=5s`, + expectedErr: "", + waitForReady: 5 * time.Second, + }, + { + name: "SSL DSN", + dsn: `victorialogs://localhost:9428/?ssl=true`, + scheme: "https", + }, + } + + for _, test := range tests { + subLogger := log.WithFields(log.Fields{ + "type": "victorialogs", + "name": test.name, + }) + + t.Logf("Test : %s", test.name) + + vlSource := &victorialogs.VLSource{} + err := vlSource.ConfigureByDSN(test.dsn, map[string]string{"type": "testtype"}, subLogger, "") + cstest.AssertErrorContains(t, err, test.expectedErr) + + noDuration, _ := time.ParseDuration("0s") + if vlSource.Config.Since != noDuration && vlSource.Config.Since.Round(time.Second) != time.Since(test.since).Round(time.Second) { + t.Fatalf("Invalid since %v", vlSource.Config.Since) + } + + if test.password != "" { + p := vlSource.Config.Auth.Password + if test.password != p { + t.Fatalf("Password mismatch : %s != %s", test.password, p) + } + } + + if test.scheme != "" { + url, _ := url.Parse(vlSource.Config.URL) + if test.scheme != url.Scheme { + t.Fatalf("Schema mismatch : %s != %s", test.scheme, url.Scheme) + } + } + + if test.waitForReady != 0 { + if vlSource.Config.WaitForReady != test.waitForReady { + t.Fatalf("Wrong WaitForReady %v != %v", vlSource.Config.WaitForReady, test.waitForReady) + } + } + } +} + +// Ingestion format docs: https://docs.victoriametrics.com/victorialogs/data-ingestion/#json-stream-api +func feedVLogs(ctx context.Context, logger *log.Entry, n int, title string) error { + bb := bytes.NewBuffer(nil) + for i := range n { + fmt.Fprintf(bb, + `{ "_time": %q,"_msg":"Log line #%d %v", "server": "demo", "key": %q} +`, time.Now().Format(time.RFC3339), i, title, title) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "http://127.0.0.1:9428/insert/jsonline?_stream_fields=server,key", bb) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + b, _ := io.ReadAll(resp.Body) + logger.Error(string(b)) + + return fmt.Errorf("Bad post status %d", resp.StatusCode) + } + + logger.Info(n, " Events sent") + // VictoriaLogs buffers data before saving to disk + // Default flush deadline is 2s, waiting 3s to be safe + time.Sleep(3 * time.Second) + + return nil +} + +func TestOneShotAcquisition(t *testing.T) { + ctx := context.Background() + + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + + log.SetOutput(os.Stdout) + log.SetLevel(log.InfoLevel) + log.Info("Test 'TestStreamingAcquisition'") + + key := strconv.Itoa(rand.Intn(1000)) + tests := []struct { + config string + }{ + { + config: fmt.Sprintf(` +mode: cat +source: victorialogs +url: http://127.0.0.1:9428 +query: > + {server=demo, key=%q} +since: 1h +`, key), + }, + } + + for _, ts := range tests { + logger := log.New() + subLogger := logger.WithField("type", "victorialogs") + vlSource := victorialogs.VLSource{} + + err := vlSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + err = feedVLogs(ctx, subLogger, 20, key) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + out := make(chan types.Event) + read := 0 + + go func() { + for { + <-out + + read++ + } + }() + + vlTomb := tomb.Tomb{} + + err = vlSource.OneShotAcquisition(ctx, out, &vlTomb) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + // Some logs might be buffered + assert.Greater(t, read, 10) + } +} + +func TestStreamingAcquisition(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + + log.SetOutput(os.Stdout) + log.SetLevel(log.InfoLevel) + log.Info("Test 'TestStreamingAcquisition'") + + title := time.Now().String() + tests := []struct { + name string + config string + expectedErr string + streamErr string + expectedLines int + }{ + { + name: "Bad port", + config: `mode: tail +source: victorialogs +url: "http://127.0.0.1:9429" +query: > + server:"demo"`, // Wrong port + expectedErr: "", + streamErr: `VictoriaLogs is not ready`, + expectedLines: 0, + }, + { + name: "ok", + config: `mode: tail +source: victorialogs +url: "http://127.0.0.1:9428" +query: > + server:"demo"`, + expectedErr: "", + streamErr: "", + expectedLines: 20, + }, + } + + ctx := context.Background() + + for _, ts := range tests { + t.Run(ts.name, func(t *testing.T) { + logger := log.New() + subLogger := logger.WithFields(log.Fields{ + "type": "victorialogs", + "name": ts.name, + }) + + out := make(chan types.Event) + vlTomb := tomb.Tomb{} + vlSource := victorialogs.VLSource{} + + err := vlSource.Configure([]byte(ts.config), subLogger, configuration.METRICS_NONE) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + err = vlSource.StreamingAcquisition(ctx, out, &vlTomb) + cstest.AssertErrorContains(t, err, ts.streamErr) + + if ts.streamErr != "" { + return + } + + time.Sleep(time.Second * 2) // We need to give time to start reading from the WS + + readTomb := tomb.Tomb{} + readCtx, cancel := context.WithTimeout(ctx, time.Second*10) + count := 0 + + readTomb.Go(func() error { + defer cancel() + + for { + select { + case <-readCtx.Done(): + return readCtx.Err() + case evt := <-out: + count++ + + if !strings.HasSuffix(evt.Line.Raw, title) { + return fmt.Errorf("Incorrect suffix : %s", evt.Line.Raw) + } + + if count == ts.expectedLines { + return nil + } + } + } + }) + + err = feedVLogs(ctx, subLogger, ts.expectedLines, title) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + err = readTomb.Wait() + + cancel() + + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + assert.Equal(t, ts.expectedLines, count) + }) + } +} + +func TestStopStreaming(t *testing.T) { + ctx := context.Background() + + if runtime.GOOS == "windows" { + t.Skip("Skipping test on windows") + } + + config := ` +mode: tail +source: victorialogs +url: http://127.0.0.1:9428 +query: > + server:"demo" +` + logger := log.New() + subLogger := logger.WithField("type", "victorialogs") + title := time.Now().String() + vlSource := victorialogs.VLSource{} + + err := vlSource.Configure([]byte(config), subLogger, configuration.METRICS_NONE) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + out := make(chan types.Event, 10) + + vlTomb := &tomb.Tomb{} + + err = vlSource.StreamingAcquisition(ctx, out, vlTomb) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + time.Sleep(time.Second * 2) + + err = feedVLogs(ctx, subLogger, 1, title) + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } + + vlTomb.Kill(nil) + + err = vlTomb.Wait() + if err != nil { + t.Fatalf("Unexpected error : %s", err) + } +} diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go index 8283bcc21a2..22186ea96cb 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows.go @@ -287,9 +287,9 @@ func (w *WinEventLogSource) UnmarshalConfig(yamlConfig []byte) error { return nil } -func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, MetricsLevel int) error { +func (w *WinEventLogSource) Configure(yamlConfig []byte, logger *log.Entry, metricsLevel int) error { w.logger = logger - w.metricsLevel = MetricsLevel + w.metricsLevel = metricsLevel err := w.UnmarshalConfig(yamlConfig) if err != nil { diff --git a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go index 2f6fe15450f..b4998de76c4 100644 --- a/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go +++ b/pkg/acquisition/modules/wineventlog/wineventlog_windows_test.go @@ -7,18 +7,22 @@ import ( "testing" "time" - "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" - "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" - "github.com/crowdsecurity/crowdsec/pkg/types" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/windows/svc/eventlog" "gopkg.in/tomb.v2" + + "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/acquisition/configuration" + "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" + "github.com/crowdsecurity/crowdsec/pkg/types" ) func TestBadConfiguration(t *testing.T) { - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) tests := []struct { config string @@ -62,7 +66,8 @@ xpath_query: test`, } func TestQueryBuilder(t *testing.T) { - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) tests := []struct { config string @@ -111,23 +116,26 @@ event_level: bla`, } subLogger := log.WithField("type", "windowseventlog") for _, test := range tests { - f := WinEventLogSource{} - f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) - q, err := f.buildXpathQuery() - if test.expectedErr != "" { - if err == nil { - t.Fatalf("expected error '%s' but got none", test.expectedErr) + t.Run(test.config, func(t *testing.T) { + f := WinEventLogSource{} + + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) + cstest.AssertErrorContains(t, err, test.expectedErr) + if test.expectedErr != "" { + return } - assert.Contains(t, err.Error(), test.expectedErr) - } else { + + q, err := f.buildXpathQuery() require.NoError(t, err) assert.Equal(t, test.expectedQuery, q) - } + }) } } func TestLiveAcquisition(t *testing.T) { - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) + ctx := context.Background() tests := []struct { @@ -185,8 +193,13 @@ event_ids: to := &tomb.Tomb{} c := make(chan types.Event) f := WinEventLogSource{} - f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) - f.StreamingAcquisition(ctx, c, to) + + err := f.Configure([]byte(test.config), subLogger, configuration.METRICS_NONE) + require.NoError(t, err) + + err = f.StreamingAcquisition(ctx, c, to) + require.NoError(t, err) + time.Sleep(time.Second) lines := test.expectedLines go func() { @@ -261,7 +274,8 @@ func TestOneShotAcquisition(t *testing.T) { }, } - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -269,15 +283,13 @@ func TestOneShotAcquisition(t *testing.T) { to := &tomb.Tomb{} c := make(chan types.Event) f := WinEventLogSource{} - err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "wineventlog"}, log.WithField("type", "windowseventlog"), "") + err := f.ConfigureByDSN(test.dsn, map[string]string{"type": "wineventlog"}, log.WithField("type", "windowseventlog"), "") + cstest.AssertErrorContains(t, err, test.expectedConfigureErr) if test.expectedConfigureErr != "" { - assert.Contains(t, err.Error(), test.expectedConfigureErr) return } - require.NoError(t, err) - go func() { for { select { diff --git a/pkg/acquisition/test_files/env.yaml b/pkg/acquisition/test_files/env.yaml new file mode 100644 index 00000000000..8abd4b16ca5 --- /dev/null +++ b/pkg/acquisition/test_files/env.yaml @@ -0,0 +1,6 @@ +labels: + test: foobar + non_existing: ${NON_EXISTING} +log_level: info +source: mock +toto: ${TEST_ENV} \ No newline at end of file diff --git a/pkg/acquisition/victorialogs.go b/pkg/acquisition/victorialogs.go new file mode 100644 index 00000000000..b097f0c8dfc --- /dev/null +++ b/pkg/acquisition/victorialogs.go @@ -0,0 +1,12 @@ +//go:build !no_datasource_victorialogs + +package acquisition + +import ( + "github.com/crowdsecurity/crowdsec/pkg/acquisition/modules/victorialogs" +) + +//nolint:gochecknoinits +func init() { + registerDataSource("victorialogs", func() DataSource { return &victorialogs.VLSource{} }) +} diff --git a/pkg/alertcontext/alertcontext.go b/pkg/alertcontext/alertcontext.go index 1b7d1e20018..0b38336a698 100644 --- a/pkg/alertcontext/alertcontext.go +++ b/pkg/alertcontext/alertcontext.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "net/http" + "reflect" "slices" "strconv" @@ -16,9 +17,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -const ( - MaxContextValueLen = 4000 -) +const MaxContextValueLen = 4000 var alertContext = Context{} @@ -34,7 +33,8 @@ func ValidateContextExpr(key string, expressions []string) error { _, err := expr.Compile(expression, exprhelpers.GetExprOptions(map[string]interface{}{ "evt": &types.Event{}, "match": &types.MatchedRule{}, - "req": &http.Request{}})...) + "req": &http.Request{}, + })...) if err != nil { return fmt.Errorf("compilation of '%s' failed: %w", expression, err) } @@ -79,7 +79,8 @@ func NewAlertContext(contextToSend map[string][]string, valueLength int) error { valueCompiled, err := expr.Compile(value, exprhelpers.GetExprOptions(map[string]interface{}{ "evt": &types.Event{}, "match": &types.MatchedRule{}, - "req": &http.Request{}})...) + "req": &http.Request{}, + })...) if err != nil { return fmt.Errorf("compilation of '%s' context value failed: %w", value, err) } @@ -114,6 +115,7 @@ func TruncateContextMap(contextMap map[string][]string, contextValueLen int) ([] } metas = append(metas, &meta) } + return metas, errors } @@ -150,20 +152,19 @@ func TruncateContext(values []string, contextValueLen int) (string, error) { } func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *http.Request, tmpContext map[string][]string) []error { - var errors []error - //if we're evaluating context for appsec event, match and request will be present. - //otherwise, only evt will be. + // if we're evaluating context for appsec event, match and request will be present. + // otherwise, only evt will be. if match == nil { match = types.NewMatchedRule() } + if request == nil { request = &http.Request{} } for key, values := range alertContext.ContextToSendCompiled { - if _, ok := tmpContext[key]; !ok { tmpContext[key] = make([]string, 0) } @@ -176,6 +177,7 @@ func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *h errors = append(errors, fmt.Errorf("failed to get value for %s: %w", key, err)) continue } + switch out := output.(type) { case string: val = out @@ -201,6 +203,10 @@ func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *h } } default: + r := reflect.ValueOf(output) + if r.IsZero() || r.IsNil() { + continue + } val := fmt.Sprintf("%v", output) if val != "" && !slices.Contains(tmpContext[key], val) { tmpContext[key] = append(tmpContext[key], val) @@ -208,6 +214,7 @@ func EvalAlertContextRules(evt types.Event, match *types.MatchedRule, request *h } } } + return errors } @@ -237,8 +244,8 @@ func EventToContext(events []types.Event) (models.Meta, []error) { tmpContext := make(map[string][]string) - for _, evt := range events { - tmpErrors := EvalAlertContextRules(evt, nil, nil, tmpContext) + for i := range events { + tmpErrors := EvalAlertContextRules(events[i], nil, nil, tmpContext) errors = append(errors, tmpErrors...) } diff --git a/pkg/alertcontext/alertcontext_test.go b/pkg/alertcontext/alertcontext_test.go index dc752ba8b09..9d9373bcd36 100644 --- a/pkg/alertcontext/alertcontext_test.go +++ b/pkg/alertcontext/alertcontext_test.go @@ -8,9 +8,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/crowdsecurity/go-cs-lib/ptr" + "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" - "github.com/crowdsecurity/go-cs-lib/ptr" ) func TestNewAlertContext(t *testing.T) { @@ -229,6 +230,7 @@ func TestValidateContextExpr(t *testing.T) { } for _, test := range tests { fmt.Printf("Running test '%s'\n", test.name) + err := ValidateContextExpr(test.key, test.exprs) if test.expectedErr == nil { require.NoError(t, err) @@ -239,7 +241,6 @@ func TestValidateContextExpr(t *testing.T) { } func TestAppsecEventToContext(t *testing.T) { - tests := []struct { name string contextToSend map[string][]string @@ -349,16 +350,62 @@ func TestAppsecEventToContext(t *testing.T) { } for _, test := range tests { - //reset cache + // reset cache alertContext = Context{} - //compile + // compile if err := NewAlertContext(test.contextToSend, 100); err != nil { t.Fatalf("failed to compile %s: %s", test.name, err) } - //run + // run metas, errors := AppsecEventToContext(test.match, test.req) assert.Len(t, errors, test.expectedErrLen) assert.ElementsMatch(t, test.expectedResult, metas) } } + +func TestEvalAlertContextRules(t *testing.T) { + tests := []struct { + name string + contextToSend map[string][]string + event types.Event + match types.MatchedRule + req *http.Request + expectedResult map[string][]string + expectedErrLen int + }{ + { + name: "no appsec match", + contextToSend: map[string][]string{ + "source_ip": {"evt.Parsed.source_ip"}, + "id": {"match.id"}, + }, + event: types.Event{ + Parsed: map[string]string{ + "source_ip": "1.2.3.4", + "source_machine": "mymachine", + "uri": "/test/test/test/../../../../../../../../", + }, + }, + expectedResult: map[string][]string{ + "source_ip": {"1.2.3.4"}, + "id": {}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + contextDict := make(map[string][]string) + + alertContext = Context{} + if err := NewAlertContext(test.contextToSend, 100); err != nil { + t.Fatalf("failed to compile %s: %s", test.name, err) + } + + errs := EvalAlertContextRules(test.event, &test.match, test.req, contextDict) + assert.Len(t, errs, test.expectedErrLen) + assert.Equal(t, test.expectedResult, contextDict) + }) + } +} diff --git a/pkg/apiclient/alerts_service_test.go b/pkg/apiclient/alerts_service_test.go index 0d1ff41685f..9df633fa8be 100644 --- a/pkg/apiclient/alerts_service_test.go +++ b/pkg/apiclient/alerts_service_test.go @@ -23,7 +23,8 @@ func TestAlertsListAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) @@ -202,7 +203,8 @@ func TestAlertsGetAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) @@ -368,13 +370,15 @@ func TestAlertsCreateAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusOK) - w.Write([]byte(`["3"]`)) + _, err := w.Write([]byte(`["3"]`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) @@ -408,14 +412,16 @@ func TestAlertsDeleteAsMachine(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"message":"0 deleted alerts"}`)) + _, err := w.Write([]byte(`{"message":"0 deleted alerts"}`)) + assert.NoError(t, err) }) log.Printf("URL is %s", urlx) diff --git a/pkg/apiclient/auth_jwt.go b/pkg/apiclient/auth_jwt.go index 193486ff065..c43e9fc291c 100644 --- a/pkg/apiclient/auth_jwt.go +++ b/pkg/apiclient/auth_jwt.go @@ -62,7 +62,6 @@ func (t *JWTTransport) refreshJwtToken() error { enc := json.NewEncoder(buf) enc.SetEscapeHTML(false) err = enc.Encode(auth) - if err != nil { return fmt.Errorf("could not encode jwt auth body: %w", err) } @@ -169,7 +168,6 @@ func (t *JWTTransport) prepareRequest(req *http.Request) (*http.Request, error) // RoundTrip implements the RoundTripper interface. func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { - var resp *http.Response attemptsCount := make(map[int]int) @@ -229,7 +227,6 @@ func (t *JWTTransport) RoundTrip(req *http.Request) (*http.Response, error) { } } return resp, nil - } func (t *JWTTransport) Client() *http.Client { diff --git a/pkg/apiclient/auth_key_test.go b/pkg/apiclient/auth_key_test.go index f686de6227a..b7cce3e15c9 100644 --- a/pkg/apiclient/auth_key_test.go +++ b/pkg/apiclient/auth_key_test.go @@ -24,10 +24,12 @@ func TestApiAuth(t *testing.T) { if r.Header.Get("X-Api-Key") == "ixu" { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) - w.Write([]byte(`null`)) + _, err := w.Write([]byte(`null`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusForbidden) - w.Write([]byte(`{"message":"access forbidden"}`)) + _, err := w.Write([]byte(`{"message":"access forbidden"}`)) + assert.NoError(t, err) } }) diff --git a/pkg/apiclient/client.go b/pkg/apiclient/client.go index 47d97a28344..ec473beca77 100644 --- a/pkg/apiclient/client.go +++ b/pkg/apiclient/client.go @@ -125,8 +125,8 @@ func NewClient(config *Config) (*ApiClient, error) { return c, nil } -func NewDefaultClient(URL *url.URL, prefix string, userAgent string, client *http.Client) (*ApiClient, error) { - transport, baseURL := createTransport(URL) +func NewDefaultClient(url *url.URL, prefix string, userAgent string, client *http.Client) (*ApiClient, error) { + transport, baseURL := createTransport(url) if client == nil { client = &http.Client{} diff --git a/pkg/apiclient/client_http.go b/pkg/apiclient/client_http.go index eeca929ea6e..c64404dc7ee 100644 --- a/pkg/apiclient/client_http.go +++ b/pkg/apiclient/client_http.go @@ -78,10 +78,11 @@ func (c *ApiClient) Do(ctx context.Context, req *http.Request, v interface{}) (* } // If the error type is *url.Error, sanitize its URL before returning. - if e, ok := err.(*url.Error); ok { - if url, err := url.Parse(e.URL); err == nil { - e.URL = url.String() - return newResponse(resp), e + var urlErr *url.Error + if errors.As(err, &urlErr) { + if parsedURL, parseErr := url.Parse(urlErr.URL); parseErr == nil { + urlErr.URL = parsedURL.String() + return newResponse(resp), urlErr } return newResponse(resp), err diff --git a/pkg/apiclient/client_http_test.go b/pkg/apiclient/client_http_test.go index 45cd8410a8e..0d6cf3d993e 100644 --- a/pkg/apiclient/client_http_test.go +++ b/pkg/apiclient/client_http_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" @@ -31,7 +32,8 @@ func TestNewRequestInvalid(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { diff --git a/pkg/apiclient/client_test.go b/pkg/apiclient/client_test.go index d1f58f33ad2..c172849c21e 100644 --- a/pkg/apiclient/client_test.go +++ b/pkg/apiclient/client_test.go @@ -56,13 +56,11 @@ func toUNCPath(path string) (string, error) { return uncPath, nil } -func setupUnixSocketWithPrefix(socket string, urlPrefix string) (mux *http.ServeMux, serverURL string, teardown func()) { +func setupUnixSocketWithPrefix(t *testing.T, socket string, urlPrefix string) (mux *http.ServeMux, serverURL string, teardown func()) { var err error if runtime.GOOS == "windows" { socket, err = toUNCPath(socket) - if err != nil { - log.Fatalf("converting to UNC path: %s", err) - } + require.NoError(t, err, "converting to UNC path") } mux = http.NewServeMux() @@ -103,7 +101,8 @@ func TestNewClientOk(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { @@ -120,7 +119,7 @@ func TestNewClientOk_UnixSocket(t *testing.T) { tmpDir := t.TempDir() socket := path.Join(tmpDir, "socket") - mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + mux, urlx, teardown := setupUnixSocketWithPrefix(t, socket, "v1") defer teardown() apiURL, err := url.Parse(urlx) @@ -140,7 +139,8 @@ func TestNewClientOk_UnixSocket(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { @@ -176,7 +176,8 @@ func TestNewClientKo(t *testing.T) { /*mock login*/ mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "bad login/password"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { @@ -202,7 +203,8 @@ func TestNewDefaultClient(t *testing.T) { mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + assert.NoError(t, err) }) _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) @@ -215,7 +217,7 @@ func TestNewDefaultClient_UnixSocket(t *testing.T) { tmpDir := t.TempDir() socket := path.Join(tmpDir, "socket") - mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + mux, urlx, teardown := setupUnixSocketWithPrefix(t, socket, "v1") defer teardown() apiURL, err := url.Parse(urlx) @@ -230,7 +232,8 @@ func TestNewDefaultClient_UnixSocket(t *testing.T) { mux.HandleFunc("/alerts", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + _, err := w.Write([]byte(`{"code": 401, "message" : "brr"}`)) + assert.NoError(t, err) }) _, _, err = client.Alerts.List(context.Background(), AlertsListOpts{}) @@ -268,7 +271,8 @@ func TestNewClientRegisterOK(t *testing.T) { mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) apiURL, err := url.Parse(urlx + "/") @@ -293,14 +297,15 @@ func TestNewClientRegisterOK_UnixSocket(t *testing.T) { tmpDir := t.TempDir() socket := path.Join(tmpDir, "socket") - mux, urlx, teardown := setupUnixSocketWithPrefix(socket, "v1") + mux, urlx, teardown := setupUnixSocketWithPrefix(t, socket, "v1") defer teardown() /*mock login*/ mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) apiURL, err := url.Parse(urlx) @@ -333,7 +338,8 @@ func TestNewClientBadAnswer(t *testing.T) { mux.HandleFunc("/watchers", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "POST") w.WriteHeader(http.StatusUnauthorized) - w.Write([]byte(`bad`)) + _, err := w.Write([]byte(`bad`)) + assert.NoError(t, err) }) apiURL, err := url.Parse(urlx + "/") diff --git a/pkg/apiclient/decisions_service_test.go b/pkg/apiclient/decisions_service_test.go index 942d14689ff..b8bc327a7d7 100644 --- a/pkg/apiclient/decisions_service_test.go +++ b/pkg/apiclient/decisions_service_test.go @@ -31,11 +31,12 @@ func TestDecisionsList(t *testing.T) { assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) assert.Equal(t, "ixu", r.Header.Get("X-Api-Key")) w.WriteHeader(http.StatusOK) - w.Write([]byte(`[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]`)) + _, err := w.Write([]byte(`[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusOK) - w.Write([]byte(`null`)) - // no results + _, err := w.Write([]byte(`null`)) + assert.NoError(t, err) } }) @@ -90,10 +91,12 @@ func TestDecisionsStream(t *testing.T) { if r.Method == http.MethodGet { if strings.Contains(r.URL.RawQuery, "startup=true") { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":null,"new":[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]}`)) + _, err := w.Write([]byte(`{"deleted":null,"new":[{"duration":"3h59m55.756182786s","id":4,"origin":"cscli","scenario":"manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'","scope":"Ip","type":"ban","value":"1.2.3.4"}]}`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":null,"new":null}`)) + _, err := w.Write([]byte(`{"deleted":null,"new":null}`)) + assert.NoError(t, err) } } }) @@ -163,10 +166,12 @@ func TestDecisionsStreamV3Compatibility(t *testing.T) { if r.Method == http.MethodGet { if strings.Contains(r.URL.RawQuery, "startup=true") { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}],"new":[{"scope":"ip", "scenario": "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'", "decisions":[{"duration":"3h59m55.756182786s","value":"1.2.3.4"}]}]}`)) + _, err := w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}],"new":[{"scope":"ip", "scenario": "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'", "decisions":[{"duration":"3h59m55.756182786s","value":"1.2.3.4"}]}]}`)) + assert.NoError(t, err) } else { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":null,"new":null}`)) + _, err := w.Write([]byte(`{"deleted":null,"new":null}`)) + assert.NoError(t, err) } } }) @@ -227,9 +232,10 @@ func TestDecisionsStreamV3(t *testing.T) { if r.Method == http.MethodGet { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}], + _, err := w.Write([]byte(`{"deleted":[{"scope":"ip","decisions":["1.2.3.5"]}], "new":[{"scope":"ip", "scenario": "manual 'ban' from '82929df7ee394b73b81252fe3b4e50203yaT2u6nXiaN7Ix9'", "decisions":[{"duration":"3h59m55.756182786s","value":"1.2.3.4"}]}], "links": {"blocklists":[{"name":"blocklist1","url":"/v3/blocklist","scope":"ip","remediation":"ban","duration":"24h"}]}}`)) + assert.NoError(t, err) } }) @@ -303,7 +309,8 @@ func TestDecisionsFromBlocklist(t *testing.T) { if r.Method == http.MethodGet { w.WriteHeader(http.StatusOK) - w.Write([]byte("1.2.3.4\r\n1.2.3.5")) + _, err := w.Write([]byte("1.2.3.4\r\n1.2.3.5")) + assert.NoError(t, err) } }) @@ -388,14 +395,16 @@ func TestDeleteDecisions(t *testing.T) { mux, urlx, teardown := setup() mux.HandleFunc("/watchers/login", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + _, err := w.Write([]byte(`{"code": 200, "expire": "2030-01-02T15:04:05Z", "token": "oklol"}`)) + assert.NoError(t, err) }) mux.HandleFunc("/decisions", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "DELETE") assert.Equal(t, "ip=1.2.3.4", r.URL.RawQuery) w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"nbDeleted":"1"}`)) + _, err := w.Write([]byte(`{"nbDeleted":"1"}`)) + assert.NoError(t, err) // w.Write([]byte(`{"message":"0 deleted alerts"}`)) }) diff --git a/pkg/apiserver/alerts_test.go b/pkg/apiserver/alerts_test.go index d86234e4813..c4edb42d475 100644 --- a/pkg/apiserver/alerts_test.go +++ b/pkg/apiserver/alerts_test.go @@ -103,13 +103,13 @@ func TestSimulatedAlert(t *testing.T) { // exclude decision in simulation mode w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", alertContent, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.NotContains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) // include decision in simulation mode w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", alertContent, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.178 performed crowdsecurity/ssh-bf (6 events over `) assert.Contains(t, w.Body.String(), `"message":"Ip 91.121.79.179 performed crowdsecurity/ssh-bf (6 events over `) } @@ -120,21 +120,21 @@ func TestCreateAlert(t *testing.T) { // Create Alert with invalid format w := lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", strings.NewReader("test"), "password") - assert.Equal(t, 400, w.Code) - assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.JSONEq(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create Alert with invalid input alertContent := GetAlertReaderFromFile(t, "./tests/invalidAlert_sample.json") w = lapi.RecordResponse(t, ctx, http.MethodPost, "/v1/alerts", alertContent, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"validation failure list:\n0.scenario in body is required\n0.scenario_hash in body is required\n0.scenario_version in body is required\n0.simulated in body is required\n0.source in body is required"}`, w.Body.String()) // Create Valid Alert w = lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") - assert.Equal(t, 201, w.Code) + assert.Equal(t, http.StatusCreated, w.Code) assert.Equal(t, `["1"]`, w.Body.String()) } @@ -142,7 +142,8 @@ func TestCreateAlertChannels(t *testing.T) { ctx := context.Background() apiServer, config := NewAPIServer(t, ctx) apiServer.controller.PluginChannel = make(chan csplugin.ProfileAlert) - apiServer.InitController() + err := apiServer.InitController() + require.NoError(t, err) loginResp := LoginToTestAPI(t, ctx, apiServer.router, config) lapi := LAPI{router: apiServer.router, loginResp: loginResp} @@ -175,13 +176,13 @@ func TestAlertListFilters(t *testing.T) { // bad filter w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", alertContent, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // get without filters w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) // check alert and decision assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) @@ -189,150 +190,150 @@ func TestAlertListFilters(t *testing.T) { // test decision_type filter (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ban", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test decision_type filter (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?decision_type=ratata", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test scope (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=Ip", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test scope (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scope=rarara", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test scenario (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/ssh-bf", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test scenario (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?scenario=crowdsecurity/nope", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test ip (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=91.121.79.195", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test ip (bad value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=99.122.77.195", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test ip (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?ip=gruueq", emptyBody, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"unable to convert 'gruueq' to int: invalid address: invalid ip address / range"}`, w.Body.String()) + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"invalid ip address 'gruueq'"}`, w.Body.String()) // test range (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=91.121.79.0/24&contains=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test range w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=99.122.77.0/24&contains=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test range (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?range=ratata", emptyBody, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"unable to convert 'ratata' to int: invalid address: invalid ip address / range"}`, w.Body.String()) + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"invalid ip address 'ratata'"}`, w.Body.String()) // test since (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1h", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test since (ok but yields no results) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1ns", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test since (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?since=1zuzu", emptyBody, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) // test until (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1ns", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test until (ok but no return) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1m", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test until (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?until=1zuzu", emptyBody, "password") - assert.Equal(t, 500, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) assert.Contains(t, w.Body.String(), `{"message":"while parsing duration: time: unknown unit`) // test simulated (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=true", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test simulated (ok) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?simulated=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test has active decision w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=true", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "Ip 91.121.79.195 performed 'crowdsecurity/ssh-bf' (6 events over ") assert.Contains(t, w.Body.String(), `scope":"Ip","simulated":false,"type":"ban","value":"91.121.79.195"`) // test has active decision w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=false", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Equal(t, "null", w.Body.String()) // test has active decision (invalid value) w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?has_active_decision=ratatqata", emptyBody, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"'ratatqata' is not a boolean: strconv.ParseBool: parsing \"ratatqata\": invalid syntax: unable to parse type"}`, w.Body.String()) } func TestAlertBulkInsert(t *testing.T) { @@ -343,7 +344,7 @@ func TestAlertBulkInsert(t *testing.T) { alertContent := GetAlertReaderFromFile(t, "./tests/alert_bulk.json") w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", alertContent, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) } func TestListAlert(t *testing.T) { @@ -353,13 +354,13 @@ func TestListAlert(t *testing.T) { // List Alert with invalid filter w := lapi.RecordResponse(t, ctx, "GET", "/v1/alerts?test=test", emptyBody, "password") - assert.Equal(t, 500, w.Code) - assert.Equal(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.JSONEq(t, `{"message":"Filter parameter 'test' is unknown (=test): invalid filter"}`, w.Body.String()) // List Alert w = lapi.RecordResponse(t, ctx, "GET", "/v1/alerts", emptyBody, "password") - assert.Equal(t, 200, w.Code) + assert.Equal(t, http.StatusOK, w.Code) assert.Contains(t, w.Body.String(), "crowdsecurity/test") } @@ -374,7 +375,7 @@ func TestCreateAlertErrors(t *testing.T) { req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", "ratata")) lapi.router.ServeHTTP(w, req) - assert.Equal(t, 401, w.Code) + assert.Equal(t, http.StatusUnauthorized, w.Code) // test invalid bearer w = httptest.NewRecorder() @@ -382,7 +383,7 @@ func TestCreateAlertErrors(t *testing.T) { req.Header.Add("User-Agent", UserAgent) req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", lapi.loginResp.Token+"s")) lapi.router.ServeHTTP(w, req) - assert.Equal(t, 401, w.Code) + assert.Equal(t, http.StatusUnauthorized, w.Code) } func TestDeleteAlert(t *testing.T) { @@ -396,8 +397,8 @@ func TestDeleteAlert(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) - assert.Equal(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) + assert.Equal(t, http.StatusForbidden, w.Code) + assert.JSONEq(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) // Delete Alert w = httptest.NewRecorder() @@ -405,8 +406,8 @@ func TestDeleteAlert(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.Equal(t, http.StatusOK, w.Code) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteAlertByID(t *testing.T) { @@ -420,8 +421,8 @@ func TestDeleteAlertByID(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.2:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) - assert.Equal(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) + assert.Equal(t, http.StatusForbidden, w.Code) + assert.JSONEq(t, `{"message":"access forbidden from this IP (127.0.0.2)"}`, w.Body.String()) // Delete Alert w = httptest.NewRecorder() @@ -429,8 +430,8 @@ func TestDeleteAlertByID(t *testing.T) { AddAuthHeaders(req, lapi.loginResp) req.RemoteAddr = "127.0.0.1:4242" lapi.router.ServeHTTP(w, req) - assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.Equal(t, http.StatusOK, w.Code) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteAlertTrustedIPS(t *testing.T) { @@ -463,7 +464,7 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { req.RemoteAddr = ip + ":1234" router.ServeHTTP(w, req) - assert.Equal(t, 403, w.Code) + assert.Equal(t, http.StatusForbidden, w.Code) assert.Contains(t, w.Body.String(), fmt.Sprintf(`{"message":"access forbidden from this IP (%s)"}`, ip)) } @@ -474,8 +475,8 @@ func TestDeleteAlertTrustedIPS(t *testing.T) { req.RemoteAddr = ip + ":1234" router.ServeHTTP(w, req) - assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.Equal(t, http.StatusOK, w.Code) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } lapi.InsertAlertFromFile(t, ctx, "./tests/alert_sample.json") diff --git a/pkg/apiserver/apic.go b/pkg/apiserver/apic.go index 51a85b1ea23..32847f7489a 100644 --- a/pkg/apiserver/apic.go +++ b/pkg/apiserver/apic.go @@ -332,7 +332,6 @@ func getScenarioTrustOfAlert(alert *models.Alert) string { } func shouldShareAlert(alert *models.Alert, consoleConfig *csconfig.ConsoleConfig, shareSignals bool) bool { - if !shareSignals { log.Debugf("sharing signals is disabled") return false diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 05f9150b037..88f1bd21dc4 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -46,10 +46,18 @@ type APIServer struct { consoleConfig *csconfig.ConsoleConfig } -func isBrokenConnection(err any) bool { - if ne, ok := err.(*net.OpError); ok { - if se, ok := ne.Err.(*os.SyscallError); ok { - if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { +func isBrokenConnection(maybeError any) bool { + err, ok := maybeError.(error) + if !ok { + return false + } + + var netOpError *net.OpError + if errors.As(err, &netOpError) { + var syscallError *os.SyscallError + if errors.As(netOpError.Err, &syscallError) { + if strings.Contains(strings.ToLower(syscallError.Error()), "broken pipe") || + strings.Contains(strings.ToLower(syscallError.Error()), "connection reset by peer") { return true } } @@ -57,21 +65,19 @@ func isBrokenConnection(err any) bool { // because of https://github.com/golang/net/blob/39120d07d75e76f0079fe5d27480bcb965a21e4c/http2/server.go // and because it seems gin doesn't handle those neither, we need to "hand define" some errors to properly catch them - if strErr, ok := err.(error); ok { - // stolen from http2/server.go in x/net - var ( - errClientDisconnected = errors.New("client disconnected") - errClosedBody = errors.New("body closed by handler") - errHandlerComplete = errors.New("http2: request body closed due to handler exiting") - errStreamClosed = errors.New("http2: stream closed") - ) + // stolen from http2/server.go in x/net + var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") + ) - if errors.Is(strErr, errClientDisconnected) || - errors.Is(strErr, errClosedBody) || - errors.Is(strErr, errHandlerComplete) || - errors.Is(strErr, errStreamClosed) { - return true - } + if errors.Is(err, errClientDisconnected) || + errors.Is(err, errClosedBody) || + errors.Is(err, errHandlerComplete) || + errors.Is(err, errStreamClosed) { + return true } return false @@ -209,7 +215,7 @@ func NewServer(ctx context.Context, config *csconfig.LocalApiServerCfg) (*APISer gin.DefaultWriter = clog.Writer() router.Use(gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string { - return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s \"%s\" %s\"\n", + return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s %q %s\"\n", param.ClientIP, param.TimeStamp.Format(time.RFC1123), param.Method, diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index cf4c91dedda..d8f24add75e 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -387,7 +387,7 @@ func TestLoggingDebugToFileConfig(t *testing.T) { cfg.LogLevel = ptr.Of(log.DebugLevel) // Configure logging - err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) + err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.LogFormat, cfg.CompressLogs, false) require.NoError(t, err) api, err := NewServer(ctx, &cfg) @@ -439,7 +439,7 @@ func TestLoggingErrorToFileConfig(t *testing.T) { cfg.LogLevel = ptr.Of(log.ErrorLevel) // Configure logging - err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.CompressLogs, false) + err := types.SetDefaultLoggerConfig(cfg.LogMedia, cfg.LogDir, *cfg.LogLevel, cfg.LogMaxSize, cfg.LogMaxFiles, cfg.LogMaxAge, cfg.LogFormat, cfg.CompressLogs, false) require.NoError(t, err) api, err := NewServer(ctx, &cfg) diff --git a/pkg/apiserver/controllers/v1/decisions.go b/pkg/apiserver/controllers/v1/decisions.go index ffefffc226b..6a316d8a2e4 100644 --- a/pkg/apiserver/controllers/v1/decisions.go +++ b/pkg/apiserver/controllers/v1/decisions.go @@ -394,8 +394,6 @@ func (c *Controller) StreamDecisionNonChunked(gctx *gin.Context, bouncerInfo *en func (c *Controller) StreamDecision(gctx *gin.Context) { var err error - ctx := gctx.Request.Context() - streamStartTime := time.Now().UTC() bouncerInfo, err := getBouncerFromContext(gctx) @@ -426,7 +424,8 @@ func (c *Controller) StreamDecision(gctx *gin.Context) { if err == nil { // Only update the last pull time if no error occurred when sending the decisions to avoid missing decisions - if err := c.DBClient.UpdateBouncerLastPull(ctx, streamStartTime, bouncerInfo.ID); err != nil { + // Do not reuse the context provided by gin because we already have sent the response to the client, so there's a chance for it to already be canceled + if err := c.DBClient.UpdateBouncerLastPull(context.Background(), streamStartTime, bouncerInfo.ID); err != nil { log.Errorf("unable to update bouncer '%s' pull: %v", bouncerInfo.Name, err) } } diff --git a/pkg/apiserver/controllers/v1/errors.go b/pkg/apiserver/controllers/v1/errors.go index d661de44b0e..d7b60c1a1b8 100644 --- a/pkg/apiserver/controllers/v1/errors.go +++ b/pkg/apiserver/controllers/v1/errors.go @@ -21,18 +21,6 @@ func (c *Controller) HandleDBErrors(gctx *gin.Context, err error) { case errors.Is(err, database.HashError): gctx.JSON(http.StatusBadRequest, gin.H{"message": err.Error()}) return - case errors.Is(err, database.InsertFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return - case errors.Is(err, database.QueryFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return - case errors.Is(err, database.ParseTimeFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return - case errors.Is(err, database.ParseDurationFail): - gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) - return default: gctx.JSON(http.StatusInternalServerError, gin.H{"message": err.Error()}) return diff --git a/pkg/apiserver/decisions_test.go b/pkg/apiserver/decisions_test.go index a0af6956443..cb5d2e1c4f1 100644 --- a/pkg/apiserver/decisions_test.go +++ b/pkg/apiserver/decisions_test.go @@ -22,19 +22,19 @@ func TestDeleteDecisionRange(t *testing.T) { // delete by ip wrong w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=1.2.3.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by range w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24&contains=false", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"2"}`, w.Body.String()) // delete by range : ensure it was already deleted w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?range=91.121.79.0/24", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) } func TestDeleteDecisionFilter(t *testing.T) { @@ -48,19 +48,19 @@ func TestDeleteDecisionFilter(t *testing.T) { w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=1.2.3.4", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by ip good w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?ip=91.121.79.179", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) // delete by scope/value w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scopes=Ip&value=91.121.79.178", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"1"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"1"}`, w.Body.String()) } func TestDeleteDecisionFilterByScenario(t *testing.T) { @@ -74,13 +74,13 @@ func TestDeleteDecisionFilterByScenario(t *testing.T) { w := lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bff", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"0"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"0"}`, w.Body.String()) // delete by scenario good w = lapi.RecordResponse(t, ctx, "DELETE", "/v1/decisions?scenario=crowdsecurity/ssh-bf", emptyBody, PASSWORD) assert.Equal(t, 200, w.Code) - assert.Equal(t, `{"nbDeleted":"2"}`, w.Body.String()) + assert.JSONEq(t, `{"nbDeleted":"2"}`, w.Body.String()) } func TestGetDecisionFilters(t *testing.T) { diff --git a/pkg/apiserver/jwt_test.go b/pkg/apiserver/jwt_test.go index f6f51763975..72ae0302ae4 100644 --- a/pkg/apiserver/jwt_test.go +++ b/pkg/apiserver/jwt_test.go @@ -23,7 +23,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"machine test not validated"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"machine test not validated"}`, w.Body.String()) // Login with machine not exist w = httptest.NewRecorder() @@ -32,7 +32,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"ent: machine not found"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"ent: machine not found"}`, w.Body.String()) // Login with invalid body w = httptest.NewRecorder() @@ -41,7 +41,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"missing: invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"missing: invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Login with invalid format w = httptest.NewRecorder() @@ -50,7 +50,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"validation failure list:\npassword in body is required"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"validation failure list:\npassword in body is required"}`, w.Body.String()) // Validate machine ValidateMachine(t, ctx, "test", config.API.Server.DbConfig) @@ -62,7 +62,7 @@ func TestLogin(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, 401, w.Code) - assert.Equal(t, `{"code":401,"message":"incorrect Username or Password"}`, w.Body.String()) + assert.JSONEq(t, `{"code":401,"message":"incorrect Username or Password"}`, w.Body.String()) // Login with valid machine w = httptest.NewRecorder() diff --git a/pkg/apiserver/machines_test.go b/pkg/apiserver/machines_test.go index 969f75707d6..57b96f54ddd 100644 --- a/pkg/apiserver/machines_test.go +++ b/pkg/apiserver/machines_test.go @@ -25,7 +25,7 @@ func TestCreateMachine(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusBadRequest, w.Code) - assert.Equal(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"invalid character 'e' in literal true (expecting 'r')"}`, w.Body.String()) // Create machine with invalid input w = httptest.NewRecorder() @@ -34,7 +34,7 @@ func TestCreateMachine(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusUnprocessableEntity, w.Code) - assert.Equal(t, `{"message":"validation failure list:\nmachine_id in body is required\npassword in body is required"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"validation failure list:\nmachine_id in body is required\npassword in body is required"}`, w.Body.String()) // Create machine b, err := json.Marshal(MachineTest) @@ -144,7 +144,7 @@ func TestCreateMachineAlreadyExist(t *testing.T) { router.ServeHTTP(w, req) assert.Equal(t, http.StatusForbidden, w.Code) - assert.Equal(t, `{"message":"user 'test': user already exist"}`, w.Body.String()) + assert.JSONEq(t, `{"message":"user 'test': user already exist"}`, w.Body.String()) } func TestAutoRegistration(t *testing.T) { diff --git a/pkg/apiserver/middlewares/v1/api_key.go b/pkg/apiserver/middlewares/v1/api_key.go index 3c154be4fab..df2f68930d6 100644 --- a/pkg/apiserver/middlewares/v1/api_key.go +++ b/pkg/apiserver/middlewares/v1/api_key.go @@ -174,7 +174,6 @@ func (a *APIKey) authPlain(c *gin.Context, logger *log.Entry) *ent.Bouncer { logger.Infof("Creating bouncer %s", bouncerName) bouncer, err = a.DbClient.CreateBouncer(ctx, bouncerName, clientIP, hashStr, types.ApiKeyAuthType, true) - if err != nil { logger.Errorf("while creating bouncer db entry: %s", err) return nil diff --git a/pkg/appsec/appsec.go b/pkg/appsec/appsec.go index 553db205b5d..5f01f76d993 100644 --- a/pkg/appsec/appsec.go +++ b/pkg/appsec/appsec.go @@ -158,7 +158,6 @@ func (wc *AppsecConfig) SetUpLogger() { /* wc.Name is actually the datasource name.*/ wc.Logger = wc.Logger.Dup().WithField("name", wc.Name) wc.Logger.Logger.SetLevel(*wc.LogLevel) - } func (wc *AppsecConfig) LoadByPath(file string) error { diff --git a/pkg/appsec/appsec_rule/appsec_rule.go b/pkg/appsec/appsec_rule/appsec_rule.go index 136d8b11cb7..9d47c0eed5c 100644 --- a/pkg/appsec/appsec_rule/appsec_rule.go +++ b/pkg/appsec/appsec_rule/appsec_rule.go @@ -47,7 +47,6 @@ type CustomRule struct { } func (v *CustomRule) Convert(ruleType string, appsecRuleName string) (string, []uint32, error) { - if v.Zones == nil && v.And == nil && v.Or == nil { return "", nil, errors.New("no zones defined") } diff --git a/pkg/appsec/appsec_rule/modsec_rule_test.go b/pkg/appsec/appsec_rule/modsec_rule_test.go index ffb8a15ff1f..74e9b85426e 100644 --- a/pkg/appsec/appsec_rule/modsec_rule_test.go +++ b/pkg/appsec/appsec_rule/modsec_rule_test.go @@ -88,7 +88,6 @@ func TestVPatchRuleString(t *testing.T) { rule: CustomRule{ And: []CustomRule{ { - Zones: []string{"ARGS"}, Variables: []string{"foo"}, Match: Match{Type: "regex", Value: "[^a-zA-Z]"}, @@ -161,7 +160,6 @@ SecRule ARGS_GET:foo "@rx [^a-zA-Z]" "id:1519945803,phase:2,deny,log,msg:'OR AND for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { actual, _, err := tt.rule.Convert(ModsecurityRuleType, tt.name) - if err != nil { t.Errorf("Error converting rule: %s", err) } diff --git a/pkg/appsec/appsec_rules_collection.go b/pkg/appsec/appsec_rules_collection.go index d283f95cb19..33e442e7f5b 100644 --- a/pkg/appsec/appsec_rules_collection.go +++ b/pkg/appsec/appsec_rules_collection.go @@ -15,6 +15,7 @@ import ( type AppsecCollection struct { collectionName string Rules []string + NativeRules []string } var APPSEC_RULE = "appsec-rule" @@ -88,14 +89,14 @@ func LoadCollection(pattern string, logger *log.Entry) ([]AppsecCollection, erro if strings.TrimSpace(line) == "" { continue } - appsecCol.Rules = append(appsecCol.Rules, line) + appsecCol.NativeRules = append(appsecCol.NativeRules, line) } } } if appsecRule.SecLangRules != nil { logger.Tracef("Adding inline rules %+v", appsecRule.SecLangRules) - appsecCol.Rules = append(appsecCol.Rules, appsecRule.SecLangRules...) + appsecCol.NativeRules = append(appsecCol.NativeRules, appsecRule.SecLangRules...) } if appsecRule.Rules != nil { diff --git a/pkg/appsec/coraza_logger.go b/pkg/appsec/coraza_logger.go index d2c1612cbd7..93e31be5876 100644 --- a/pkg/appsec/coraza_logger.go +++ b/pkg/appsec/coraza_logger.go @@ -124,7 +124,7 @@ func (e *crzLogEvent) Stringer(key string, val fmt.Stringer) dbg.Event { return e } -func (e crzLogEvent) IsEnabled() bool { +func (e *crzLogEvent) IsEnabled() bool { return !e.muted } diff --git a/pkg/appsec/request_test.go b/pkg/appsec/request_test.go index f8333e4e5f9..8b457e24dab 100644 --- a/pkg/appsec/request_test.go +++ b/pkg/appsec/request_test.go @@ -3,7 +3,6 @@ package appsec import "testing" func TestBodyDumper(t *testing.T) { - tests := []struct { name string req *ParsedRequest @@ -159,7 +158,6 @@ func TestBodyDumper(t *testing.T) { } for idx, test := range tests { - t.Run(test.name, func(t *testing.T) { orig_dr := test.req.DumpRequest() result := test.filter(orig_dr).GetFilteredRequest() @@ -177,5 +175,4 @@ func TestBodyDumper(t *testing.T) { } }) } - } diff --git a/pkg/cache/cache_test.go b/pkg/cache/cache_test.go index a4e0bd0127a..4da9fd5bf7b 100644 --- a/pkg/cache/cache_test.go +++ b/pkg/cache/cache_test.go @@ -5,26 +5,27 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCreateSetGet(t *testing.T) { err := CacheInit(CacheCfg{Name: "test", Size: 100, TTL: 1 * time.Second}) - assert.Empty(t, err) + require.NoError(t, err) //set & get err = SetKey("test", "testkey0", "testvalue1", nil) - assert.Empty(t, err) + require.NoError(t, err) ret, err := GetKey("test", "testkey0") assert.Equal(t, "testvalue1", ret) - assert.Empty(t, err) + require.NoError(t, err) //re-set err = SetKey("test", "testkey0", "testvalue2", nil) - assert.Empty(t, err) + require.NoError(t, err) assert.Equal(t, "testvalue1", ret) - assert.Empty(t, err) + require.NoError(t, err) //expire time.Sleep(1500 * time.Millisecond) ret, err = GetKey("test", "testkey0") assert.Equal(t, "", ret) - assert.Empty(t, err) + require.NoError(t, err) } diff --git a/pkg/csconfig/api.go b/pkg/csconfig/api.go index 5f2f8f9248b..d94d90aaf19 100644 --- a/pkg/csconfig/api.go +++ b/pkg/csconfig/api.go @@ -271,6 +271,7 @@ type LocalApiServerCfg struct { LogMaxSize int `yaml:"-"` LogMaxAge int `yaml:"-"` LogMaxFiles int `yaml:"-"` + LogFormat string `yaml:"-"` TrustedIPs []string `yaml:"trusted_ips,omitempty"` PapiLogLevel *log.Level `yaml:"papi_log_level"` DisableRemoteLapiRegistration bool `yaml:"disable_remote_lapi_registration,omitempty"` @@ -351,7 +352,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { log.Printf("push and pull to Central API disabled") } - //Set default values for CAPI push/pull + // Set default values for CAPI push/pull if c.API.Server.OnlineClient != nil { if c.API.Server.OnlineClient.PullConfig.Community == nil { c.API.Server.OnlineClient.PullConfig.Community = ptr.Of(true) @@ -391,6 +392,7 @@ func (c *Config) LoadAPIServer(inCli bool) error { c.API.Server.CompressLogs = c.Common.CompressLogs c.API.Server.LogMaxSize = c.Common.LogMaxSize c.API.Server.LogMaxAge = c.Common.LogMaxAge + c.API.Server.LogFormat = c.Common.LogFormat c.API.Server.LogMaxFiles = c.Common.LogMaxFiles if c.API.Server.UseForwardedForHeaders && c.API.Server.TrustedProxies == nil { diff --git a/pkg/csconfig/common.go b/pkg/csconfig/common.go index 7e1ef6e5c98..e312756ce20 100644 --- a/pkg/csconfig/common.go +++ b/pkg/csconfig/common.go @@ -12,11 +12,12 @@ type CommonCfg struct { Daemonize bool PidDir string `yaml:"pid_dir,omitempty"` // TODO: This is just for backward compat. Remove this later LogMedia string `yaml:"log_media"` - LogDir string `yaml:"log_dir,omitempty"` //if LogMedia = file + LogDir string `yaml:"log_dir,omitempty"` // if LogMedia = file LogLevel *log.Level `yaml:"log_level"` WorkingDir string `yaml:"working_dir,omitempty"` // TODO: This is just for backward compat. Remove this later CompressLogs *bool `yaml:"compress_logs,omitempty"` LogMaxSize int `yaml:"log_max_size,omitempty"` + LogFormat string `yaml:"log_format,omitempty"` LogMaxAge int `yaml:"log_max_age,omitempty"` LogMaxFiles int `yaml:"log_max_files,omitempty"` ForceColorLogs bool `yaml:"force_color_logs,omitempty"` @@ -24,6 +25,7 @@ type CommonCfg struct { func (c *Config) loadCommon() error { var err error + if c.Common == nil { c.Common = &CommonCfg{} } @@ -32,13 +34,15 @@ func (c *Config) loadCommon() error { c.Common.LogMedia = "stdout" } - var CommonCleanup = []*string{ + CommonCleanup := []*string{ &c.Common.LogDir, } + for _, k := range CommonCleanup { if *k == "" { continue } + *k, err = filepath.Abs(*k) if err != nil { return fmt.Errorf("failed to get absolute path of '%s': %w", *k, err) diff --git a/pkg/csconfig/config.go b/pkg/csconfig/config.go index 3bbdf607187..b0784e5e6f3 100644 --- a/pkg/csconfig/config.go +++ b/pkg/csconfig/config.go @@ -30,7 +30,7 @@ var globalConfig = Config{} // Config contains top-level defaults -> overridden by configuration file -> overridden by CLI flags type Config struct { // just a path to ourselves :p - FilePath *string `yaml:"-"` + FilePath string `yaml:"-"` Self []byte `yaml:"-"` Common *CommonCfg `yaml:"common,omitempty"` Prometheus *PrometheusCfg `yaml:"prometheus,omitempty"` @@ -45,9 +45,10 @@ type Config struct { Hub *LocalHubCfg `yaml:"-"` } -func NewConfig(configFile string, disableAgent bool, disableAPI bool, inCli bool) (*Config, string, error) { +// NewConfig +func NewConfig(configFile string, disableAgent bool, disableAPI bool, quiet bool) (*Config, string, error) { patcher := yamlpatch.NewPatcher(configFile, ".local") - patcher.SetQuiet(inCli) + patcher.SetQuiet(quiet) fcontent, err := patcher.MergedPatchContent() if err != nil { @@ -56,7 +57,7 @@ func NewConfig(configFile string, disableAgent bool, disableAPI bool, inCli bool configData := csstring.StrictExpand(string(fcontent), os.LookupEnv) cfg := Config{ - FilePath: &configFile, + FilePath: configFile, DisableAgent: disableAgent, DisableAPI: disableAPI, } diff --git a/pkg/csconfig/cscli.go b/pkg/csconfig/cscli.go index 9393156c0ed..ad119dc9e13 100644 --- a/pkg/csconfig/cscli.go +++ b/pkg/csconfig/cscli.go @@ -10,6 +10,7 @@ type CscliCfg struct { Color string `yaml:"color,omitempty"` HubBranch string `yaml:"hub_branch"` HubURLTemplate string `yaml:"__hub_url_template__,omitempty"` + HubWithContent bool `yaml:"hub_with_content,omitempty"` SimulationConfig *SimulationConfig `yaml:"-"` DbConfig *DatabaseCfg `yaml:"-"` diff --git a/pkg/csconfig/fflag.go b/pkg/csconfig/fflag.go index c86686889eb..ec1282c5a04 100644 --- a/pkg/csconfig/fflag.go +++ b/pkg/csconfig/fflag.go @@ -38,7 +38,7 @@ func LoadFeatureFlagsFile(configPath string, logger *log.Logger) error { func ListFeatureFlags() string { enabledFeatures := fflag.Crowdsec.GetEnabledFeatures() - msg := "" + msg := "none" if len(enabledFeatures) > 0 { msg = strings.Join(enabledFeatures, ", ") } diff --git a/pkg/csplugin/broker.go b/pkg/csplugin/broker.go index e996fa9b68c..f53c831e186 100644 --- a/pkg/csplugin/broker.go +++ b/pkg/csplugin/broker.go @@ -91,7 +91,6 @@ func (pb *PluginBroker) Init(ctx context.Context, pluginCfg *csconfig.PluginCfg, pb.watcher = PluginWatcher{} pb.watcher.Init(pb.pluginConfigByName, pb.alertsByPluginName) return nil - } func (pb *PluginBroker) Kill() { @@ -166,6 +165,7 @@ func (pb *PluginBroker) addProfileAlert(profileAlert ProfileAlert) { pb.watcher.Inserts <- pluginName } } + func (pb *PluginBroker) profilesContainPlugin(pluginName string) bool { for _, profileCfg := range pb.profileConfigs { for _, name := range profileCfg.Notifications { @@ -176,6 +176,7 @@ func (pb *PluginBroker) profilesContainPlugin(pluginName string) bool { } return false } + func (pb *PluginBroker) loadConfig(path string) error { files, err := listFilesAtPath(path) if err != nil { @@ -277,7 +278,6 @@ func (pb *PluginBroker) loadPlugins(ctx context.Context, path string) error { } func (pb *PluginBroker) loadNotificationPlugin(name string, binaryPath string) (protobufs.NotifierServer, error) { - handshake, err := getHandshake() if err != nil { return nil, err diff --git a/pkg/csplugin/listfiles_test.go b/pkg/csplugin/listfiles_test.go index c476d7a4e4a..32269f3f5f1 100644 --- a/pkg/csplugin/listfiles_test.go +++ b/pkg/csplugin/listfiles_test.go @@ -12,19 +12,22 @@ import ( ) func TestListFilesAtPath(t *testing.T) { - dir, err := os.MkdirTemp("", "test-listfiles") - require.NoError(t, err) - t.Cleanup(func() { - os.RemoveAll(dir) - }) - _, err = os.Create(filepath.Join(dir, "notification-gitter")) + dir := t.TempDir() + + f, err := os.Create(filepath.Join(dir, "notification-gitter")) require.NoError(t, err) - _, err = os.Create(filepath.Join(dir, "slack")) + require.NoError(t, f.Close()) + + f, err = os.Create(filepath.Join(dir, "slack")) require.NoError(t, err) + require.NoError(t, f.Close()) + err = os.Mkdir(filepath.Join(dir, "somedir"), 0o755) require.NoError(t, err) - _, err = os.Create(filepath.Join(dir, "somedir", "inner")) + + f, err = os.Create(filepath.Join(dir, "somedir", "inner")) require.NoError(t, err) + require.NoError(t, f.Close()) tests := []struct { name string diff --git a/pkg/csplugin/watcher_test.go b/pkg/csplugin/watcher_test.go index 84e63ec6493..9868b8433c3 100644 --- a/pkg/csplugin/watcher_test.go +++ b/pkg/csplugin/watcher_test.go @@ -15,13 +15,12 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/models" ) -func resetTestTomb(testTomb *tomb.Tomb, pw *PluginWatcher) { +func resetTestTomb(t *testing.T, testTomb *tomb.Tomb, pw *PluginWatcher) { testTomb.Kill(nil) <-pw.PluginEvents - if err := testTomb.Wait(); err != nil { - log.Fatal(err) - } + err := testTomb.Wait() + require.NoError(t, err) } func resetWatcherAlertCounter(pw *PluginWatcher) { @@ -72,7 +71,7 @@ func TestPluginWatcherInterval(t *testing.T) { err := listenChannelWithTimeout(ct, pw.PluginEvents) cstest.RequireErrorContains(t, err, "context deadline exceeded") - resetTestTomb(&testTomb, &pw) + resetTestTomb(t, &testTomb, &pw) testTomb = tomb.Tomb{} pw.Start(&testTomb) @@ -81,7 +80,7 @@ func TestPluginWatcherInterval(t *testing.T) { err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) - resetTestTomb(&testTomb, &pw) + resetTestTomb(t, &testTomb, &pw) // This is to avoid the int complaining } @@ -130,5 +129,5 @@ func TestPluginAlertCountWatcher(t *testing.T) { err = listenChannelWithTimeout(ct, pw.PluginEvents) require.NoError(t, err) - resetTestTomb(&testTomb, &pw) + resetTestTomb(t, &testTomb, &pw) } diff --git a/pkg/csprofiles/csprofiles.go b/pkg/csprofiles/csprofiles.go index 52cda1ed2e1..c509fb448e3 100644 --- a/pkg/csprofiles/csprofiles.go +++ b/pkg/csprofiles/csprofiles.go @@ -96,17 +96,17 @@ func NewProfile(profilesCfg []*csconfig.ProfileCfg) ([]*Runtime, error) { return profilesRuntime, nil } -func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*models.Decision, error) { +func (profile *Runtime) GenerateDecisionFromProfile(alert *models.Alert) ([]*models.Decision, error) { var decisions []*models.Decision - for _, refDecision := range Profile.Cfg.Decisions { + for _, refDecision := range profile.Cfg.Decisions { decision := models.Decision{} /*the reference decision from profile is in simulated mode */ if refDecision.Simulated != nil && *refDecision.Simulated { decision.Simulated = new(bool) *decision.Simulated = true /*the event is already in simulation mode */ - } else if Alert.Simulated != nil && *Alert.Simulated { + } else if alert.Simulated != nil && *alert.Simulated { decision.Simulated = new(bool) *decision.Simulated = true } @@ -116,7 +116,7 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod if refDecision.Scope != nil && *refDecision.Scope != "" { *decision.Scope = *refDecision.Scope } else { - *decision.Scope = *Alert.Source.Scope + *decision.Scope = *alert.Source.Scope } /*some fields are populated from the reference object : duration, scope, type*/ @@ -125,19 +125,19 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod *decision.Duration = *refDecision.Duration } - if Profile.Cfg.DurationExpr != "" && Profile.RuntimeDurationExpr != nil { + if profile.Cfg.DurationExpr != "" && profile.RuntimeDurationExpr != nil { profileDebug := false - if Profile.Cfg.Debug != nil && *Profile.Cfg.Debug { + if profile.Cfg.Debug != nil && *profile.Cfg.Debug { profileDebug = true } - duration, err := exprhelpers.Run(Profile.RuntimeDurationExpr, map[string]interface{}{"Alert": Alert}, Profile.Logger, profileDebug) + duration, err := exprhelpers.Run(profile.RuntimeDurationExpr, map[string]interface{}{"Alert": alert}, profile.Logger, profileDebug) if err != nil { - Profile.Logger.Warningf("Failed to run duration_expr : %v", err) + profile.Logger.Warningf("Failed to run duration_expr : %v", err) } else { durationStr := fmt.Sprint(duration) if _, err := time.ParseDuration(durationStr); err != nil { - Profile.Logger.Warningf("Failed to parse expr duration result '%s'", duration) + profile.Logger.Warningf("Failed to parse expr duration result '%s'", duration) } else { *decision.Duration = durationStr } @@ -149,7 +149,7 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod /*for the others, let's populate it from the alert and its source*/ decision.Value = new(string) - *decision.Value = *Alert.Source.Value + *decision.Value = *alert.Source.Value decision.Origin = new(string) *decision.Origin = types.CrowdSecOrigin @@ -158,7 +158,7 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod } decision.Scenario = new(string) - *decision.Scenario = *Alert.Scenario + *decision.Scenario = *alert.Scenario decisions = append(decisions, &decision) } @@ -166,21 +166,21 @@ func (Profile *Runtime) GenerateDecisionFromProfile(Alert *models.Alert) ([]*mod } // EvaluateProfile is going to evaluate an Alert against a profile to generate Decisions -func (Profile *Runtime) EvaluateProfile(Alert *models.Alert) ([]*models.Decision, bool, error) { +func (profile *Runtime) EvaluateProfile(alert *models.Alert) ([]*models.Decision, bool, error) { var decisions []*models.Decision matched := false - for eIdx, expression := range Profile.RuntimeFilters { + for eIdx, expression := range profile.RuntimeFilters { debugProfile := false - if Profile.Cfg.Debug != nil && *Profile.Cfg.Debug { + if profile.Cfg.Debug != nil && *profile.Cfg.Debug { debugProfile = true } - output, err := exprhelpers.Run(expression, map[string]interface{}{"Alert": Alert}, Profile.Logger, debugProfile) + output, err := exprhelpers.Run(expression, map[string]interface{}{"Alert": alert}, profile.Logger, debugProfile) if err != nil { - Profile.Logger.Warningf("failed to run profile expr for %s: %v", Profile.Cfg.Name, err) - return nil, matched, fmt.Errorf("while running expression %s: %w", Profile.Cfg.Filters[eIdx], err) + profile.Logger.Warningf("failed to run profile expr for %s: %v", profile.Cfg.Name, err) + return nil, matched, fmt.Errorf("while running expression %s: %w", profile.Cfg.Filters[eIdx], err) } switch out := output.(type) { @@ -188,22 +188,22 @@ func (Profile *Runtime) EvaluateProfile(Alert *models.Alert) ([]*models.Decision if out { matched = true /*the expression matched, create the associated decision*/ - subdecisions, err := Profile.GenerateDecisionFromProfile(Alert) + subdecisions, err := profile.GenerateDecisionFromProfile(alert) if err != nil { - return nil, matched, fmt.Errorf("while generating decision from profile %s: %w", Profile.Cfg.Name, err) + return nil, matched, fmt.Errorf("while generating decision from profile %s: %w", profile.Cfg.Name, err) } decisions = append(decisions, subdecisions...) } else { - Profile.Logger.Debugf("Profile %s filter is unsuccessful", Profile.Cfg.Name) + profile.Logger.Debugf("Profile %s filter is unsuccessful", profile.Cfg.Name) - if Profile.Cfg.OnFailure == "break" { + if profile.Cfg.OnFailure == "break" { break } } default: - return nil, matched, fmt.Errorf("unexpected type %t (%v) while running '%s'", output, output, Profile.Cfg.Filters[eIdx]) + return nil, matched, fmt.Errorf("unexpected type %t (%v) while running '%s'", output, output, profile.Cfg.Filters[eIdx]) } } diff --git a/pkg/csprofiles/csprofiles_test.go b/pkg/csprofiles/csprofiles_test.go index 0247243ddd3..dc3239fe5c1 100644 --- a/pkg/csprofiles/csprofiles_test.go +++ b/pkg/csprofiles/csprofiles_test.go @@ -119,7 +119,8 @@ func TestEvaluateProfile(t *testing.T) { Alert *models.Alert } - exprhelpers.Init(nil) + err := exprhelpers.Init(nil) + require.NoError(t, err) tests := []struct { name string @@ -132,7 +133,7 @@ func TestEvaluateProfile(t *testing.T) { name: "simple pass single expr", args: args{ profileCfg: &csconfig.ProfileCfg{ - Filters: []string{fmt.Sprintf("Alert.GetScenario() == \"%s\"", scenario)}, + Filters: []string{fmt.Sprintf("Alert.GetScenario() == %q", scenario)}, Debug: &boolFalse, }, Alert: &models.Alert{Remediation: true, Scenario: &scenario}, @@ -199,17 +200,22 @@ func TestEvaluateProfile(t *testing.T) { profilesCfg := []*csconfig.ProfileCfg{ tt.args.profileCfg, } + profile, err := NewProfile(profilesCfg) if err != nil { t.Errorf("failed to get newProfile : %+v", err) } + got, got1, _ := profile[0].EvaluateProfile(tt.args.Alert) + if !reflect.DeepEqual(len(got), tt.expectedDecisionCount) { t.Errorf("EvaluateProfile() got = %+v, want %+v", got, tt.expectedDecisionCount) } + if got1 != tt.expectedMatchStatus { t.Errorf("EvaluateProfile() got1 = %v, want %v", got1, tt.expectedMatchStatus) } + if tt.expectedDuration != "" { require.Equal(t, tt.expectedDuration, *got[0].Duration, "The two durations should be the same") } diff --git a/pkg/cticlient/example/fire.go b/pkg/cticlient/example/fire.go index e52922571ef..598175ce02c 100644 --- a/pkg/cticlient/example/fire.go +++ b/pkg/cticlient/example/fire.go @@ -57,6 +57,12 @@ func main() { }) } } - csvWriter.Write(csvHeader) - csvWriter.WriteAll(allItems) + + if err = csvWriter.Write(csvHeader); err != nil { + panic(err) + } + + if err = csvWriter.WriteAll(allItems); err != nil { + panic(err) + } } diff --git a/pkg/cticlient/types.go b/pkg/cticlient/types.go index 2ad0a6eb34e..5ea29d6c5b0 100644 --- a/pkg/cticlient/types.go +++ b/pkg/cticlient/types.go @@ -64,6 +64,9 @@ type CTIReferences struct { type SmokeItem struct { IpRangeScore int `json:"ip_range_score"` Ip string `json:"ip"` + Reputation string `json:"reputation"` + BackgroundNoise string `json:"background_noise"` + Confidence string `json:"confidence"` IpRange *string `json:"ip_range"` AsName *string `json:"as_name"` AsNum *int `json:"as_num"` @@ -77,6 +80,7 @@ type SmokeItem struct { BackgroundNoiseScore *int `json:"background_noise_score"` Scores CTIScores `json:"scores"` References []CTIReferences `json:"references"` + CVEs []string `json:"cves"` IsOk bool `json:"-"` } @@ -120,6 +124,10 @@ type FireItem struct { BackgroundNoiseScore *int `json:"background_noise_score"` Scores CTIScores `json:"scores"` References []CTIReferences `json:"references"` + CVEs []string `json:"cves"` + Reputation string `json:"reputation"` + BackgroundNoise string `json:"background_noise"` + Confidence string `json:"confidence"` State string `json:"state"` Expiration CustomTime `json:"expiration"` } @@ -209,8 +217,19 @@ func (c *SmokeItem) GetFalsePositives() []string { return ret } -func (c *SmokeItem) IsFalsePositive() bool { +func (c *SmokeItem) GetClassifications() []string { + ret := make([]string, 0) + if c.Classifications.Classifications != nil { + for _, b := range c.Classifications.Classifications { + ret = append(ret, b.Name) + } + } + + return ret +} + +func (c *SmokeItem) IsFalsePositive() bool { if c.Classifications.FalsePositives != nil { if len(c.Classifications.FalsePositives) > 0 { return true @@ -283,8 +302,19 @@ func (c *FireItem) GetFalsePositives() []string { return ret } -func (c *FireItem) IsFalsePositive() bool { +func (c *FireItem) GetClassifications() []string { + ret := make([]string, 0) + if c.Classifications.Classifications != nil { + for _, b := range c.Classifications.Classifications { + ret = append(ret, b.Name) + } + } + + return ret +} + +func (c *FireItem) IsFalsePositive() bool { if c.Classifications.FalsePositives != nil { if len(c.Classifications.FalsePositives) > 0 { return true diff --git a/pkg/cticlient/types_test.go b/pkg/cticlient/types_test.go index a7308af35e0..9c7840de324 100644 --- a/pkg/cticlient/types_test.go +++ b/pkg/cticlient/types_test.go @@ -40,8 +40,14 @@ func getSampleSmokeItem() SmokeItem { DaysAge: 1, }, Classifications: CTIClassifications{ - FalsePositives: []CTIClassification{}, - Classifications: []CTIClassification{}, + FalsePositives: []CTIClassification{}, + Classifications: []CTIClassification{ + { + Name: "profile:likely_botnet", + Label: "Likely Botnet", + Description: "IP appears to be a botnet.", + }, + }, }, AttackDetails: []*CTIAttackDetails{ { @@ -101,6 +107,7 @@ func TestBasicSmokeItem(t *testing.T) { assert.Equal(t, 3, item.GetBackgroundNoiseScore()) assert.Equal(t, []string{}, item.GetFalsePositives()) assert.False(t, item.IsFalsePositive()) + assert.Equal(t, []string{"profile:likely_botnet"}, item.GetClassifications()) } func TestEmptySmokeItem(t *testing.T) { @@ -112,4 +119,5 @@ func TestEmptySmokeItem(t *testing.T) { assert.Equal(t, 0, item.GetBackgroundNoiseScore()) assert.Equal(t, []string{}, item.GetFalsePositives()) assert.False(t, item.IsFalsePositive()) + assert.Equal(t, []string{}, item.GetClassifications()) } diff --git a/pkg/cwhub/cwhub.go b/pkg/cwhub/cwhub.go index 683f1853b43..b41d1d16312 100644 --- a/pkg/cwhub/cwhub.go +++ b/pkg/cwhub/cwhub.go @@ -20,14 +20,14 @@ func (t *hubTransport) RoundTrip(req *http.Request) (*http.Response, error) { return t.RoundTripper.RoundTrip(req) } -// hubClient is the HTTP client used to communicate with the CrowdSec Hub. -var hubClient = &http.Client{ +// HubClient is the HTTP client used to communicate with the CrowdSec Hub. +var HubClient = &http.Client{ Timeout: 120 * time.Second, Transport: &hubTransport{http.DefaultTransport}, } -// safePath returns a joined path and ensures that it does not escape the base directory. -func safePath(dir, filePath string) (string, error) { +// SafePath returns a joined path and ensures that it does not escape the base directory. +func SafePath(dir, filePath string) (string, error) { absBaseDir, err := filepath.Abs(filepath.Clean(dir)) if err != nil { return "", err diff --git a/pkg/cwhub/cwhub_test.go b/pkg/cwhub/cwhub_test.go index 17e7a0dc723..befd279ff65 100644 --- a/pkg/cwhub/cwhub_test.go +++ b/pkg/cwhub/cwhub_test.go @@ -29,10 +29,9 @@ const mockURLTemplate = "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s" var responseByPath map[string]string -// testHub initializes a temporary hub with an empty json file, optionally updating it. -func testHub(t *testing.T, update bool) *Hub { - tmpDir, err := os.MkdirTemp("", "testhub") - require.NoError(t, err) +// testHubOld initializes a temporary hub with an empty json file, optionally updating it. +func testHubOld(t *testing.T, update bool) *Hub { + tmpDir := t.TempDir() local := &csconfig.LocalHubCfg{ HubDir: filepath.Join(tmpDir, "crowdsec", "hub"), @@ -41,7 +40,7 @@ func testHub(t *testing.T, update bool) *Hub { InstallDataDir: filepath.Join(tmpDir, "installed-data"), } - err = os.MkdirAll(local.HubDir, 0o700) + err := os.MkdirAll(local.HubDir, 0o700) require.NoError(t, err) err = os.MkdirAll(local.InstallDir, 0o700) @@ -53,22 +52,17 @@ func testHub(t *testing.T, update bool) *Hub { err = os.WriteFile(local.HubIndexFile, []byte("{}"), 0o644) require.NoError(t, err) - t.Cleanup(func() { - os.RemoveAll(tmpDir) - }) - - remote := &RemoteHubCfg{ - Branch: "master", - URLTemplate: mockURLTemplate, - IndexPath: ".index.json", - } - - hub, err := NewHub(local, remote, log.StandardLogger()) + hub, err := NewHub(local, log.StandardLogger()) require.NoError(t, err) if update { + indexProvider := &Downloader{ + Branch: "master", + URLTemplate: mockURLTemplate, + } + ctx := context.Background() - err := hub.Update(ctx) + err = hub.Update(ctx, indexProvider, false) require.NoError(t, err) } @@ -83,16 +77,16 @@ func envSetup(t *testing.T) *Hub { setResponseByPath() log.SetLevel(log.DebugLevel) - defaultTransport := hubClient.Transport + defaultTransport := HubClient.Transport t.Cleanup(func() { - hubClient.Transport = defaultTransport + HubClient.Transport = defaultTransport }) // Mock the http client - hubClient.Transport = newMockTransport() + HubClient.Transport = newMockTransport() - hub := testHub(t, true) + hub := testHubOld(t, true) return hub } diff --git a/pkg/cwhub/dataset.go b/pkg/cwhub/dataset.go deleted file mode 100644 index 90bc9e057f9..00000000000 --- a/pkg/cwhub/dataset.go +++ /dev/null @@ -1,72 +0,0 @@ -package cwhub - -import ( - "context" - "errors" - "fmt" - "io" - "time" - - "github.com/sirupsen/logrus" - "gopkg.in/yaml.v3" - - "github.com/crowdsecurity/go-cs-lib/downloader" - - "github.com/crowdsecurity/crowdsec/pkg/types" -) - -// The DataSet is a list of data sources required by an item (built from the data: section in the yaml). -type DataSet struct { - Data []types.DataSource `yaml:"data,omitempty"` -} - -// downloadDataSet downloads all the data files for an item. -func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader io.Reader, logger *logrus.Logger) error { - dec := yaml.NewDecoder(reader) - - for { - data := &DataSet{} - - if err := dec.Decode(data); err != nil { - if errors.Is(err, io.EOF) { - break - } - - return fmt.Errorf("while reading file: %w", err) - } - - for _, dataS := range data.Data { - destPath, err := safePath(dataFolder, dataS.DestPath) - if err != nil { - return err - } - - d := downloader. - New(). - WithHTTPClient(hubClient). - ToFile(destPath). - CompareContent(). - WithLogger(logrus.WithField("url", dataS.SourceURL)) - - if !force { - d = d.WithLastModified(). - WithShelfLife(7 * 24 * time.Hour) - } - - downloaded, err := d.Download(ctx, dataS.SourceURL) - if err != nil { - return fmt.Errorf("while getting data: %w", err) - } - - if downloaded { - logger.Infof("Downloaded %s", destPath) - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("updated %s\n", destPath) - } - } - } - - return nil -} diff --git a/pkg/cwhub/doc.go b/pkg/cwhub/doc.go index f86b95c6454..fb7209b77ae 100644 --- a/pkg/cwhub/doc.go +++ b/pkg/cwhub/doc.go @@ -1,4 +1,5 @@ -// Package cwhub is responsible for installing and upgrading the local hub files for CrowdSec. +// Package cwhub is responsible for providing the state of the local hub to the security engine and cscli command. +// Installation, upgrade and removal of items or data files has been moved to pkg/hubops. // // # Definitions // @@ -84,31 +85,11 @@ // return fmt.Errorf("collection not found") // } // -// You can also install items if they have already been downloaded: +// Some commands require an object to provide the hub index, or contents: // -// // install a parser -// force := false -// downloadOnly := false -// err := parser.Install(force, downloadOnly) -// if err != nil { -// return fmt.Errorf("unable to install parser: %w", err) -// } -// -// As soon as you try to install an item that is not downloaded or is not up-to-date (meaning its computed hash -// does not correspond to the latest version available in the index), a download will be attempted and you'll -// get the error "remote hub configuration is not provided". -// -// To provide the remote hub configuration, use the second parameter of NewHub(): -// -// remoteHub := cwhub.RemoteHubCfg{ +// indexProvider := cwhub.Downloader{ // URLTemplate: "https://cdn-hub.crowdsec.net/crowdsecurity/%s/%s", // Branch: "master", -// IndexPath: ".index.json", -// } -// -// hub, err := cwhub.NewHub(localHub, remoteHub, logger) -// if err != nil { -// return fmt.Errorf("unable to initialize hub: %w", err) // } // // The URLTemplate is a string that will be used to build the URL of the remote hub. It must contain two @@ -116,7 +97,7 @@ // // Before calling hub.Load(), you can update the index file by calling the Update() method: // -// err := hub.Update(context.Background()) +// err := hub.Update(context.Background(), indexProvider) // if err != nil { // return fmt.Errorf("unable to update hub index: %w", err) // } diff --git a/pkg/cwhub/download.go b/pkg/cwhub/download.go new file mode 100644 index 00000000000..fa92e9960de --- /dev/null +++ b/pkg/cwhub/download.go @@ -0,0 +1,126 @@ +package cwhub + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/go-cs-lib/downloader" +) + +// no need to import the lib package to use this +type NotFoundError = downloader.NotFoundError + +// Downloader is used to retrieve index and items from a remote hub, with cache control. +type Downloader struct { + Branch string + URLTemplate string +} + +// IndexProvider retrieves and writes .index.json +type IndexProvider interface { + FetchIndex(ctx context.Context, indexFile string, withContent bool, logger *logrus.Logger) (bool, error) +} + +// ContentProvider retrieves and writes the YAML files with the item content. +type ContentProvider interface { + FetchContent(ctx context.Context, remotePath, destPath, wantHash string, logger *logrus.Logger) (bool, string, error) +} + +// urlTo builds the URL to download a file from the remote hub. +func (d *Downloader) urlTo(remotePath string) (string, error) { + // the template must contain two string placeholders + if fmt.Sprintf(d.URLTemplate, "%s", "%s") != d.URLTemplate { + return "", fmt.Errorf("invalid URL template '%s'", d.URLTemplate) + } + + return fmt.Sprintf(d.URLTemplate, d.Branch, remotePath), nil +} + +// addURLParam adds a parameter with a value (ex. "with_content=true") to the URL if it's not already present. +func addURLParam(rawURL string, param string, value string) (string, error) { + parsedURL, err := url.Parse(rawURL) + if err != nil { + return "", fmt.Errorf("failed to parse URL: %w", err) + } + + query := parsedURL.Query() + + if _, exists := query[param]; !exists { + query.Add(param, value) + } + + parsedURL.RawQuery = query.Encode() + + return parsedURL.String(), nil +} + +// FetchIndex downloads the index from the hub and writes it to the filesystem. +// It uses a temporary file to avoid partial downloads, and won't overwrite the original +// if it has not changed. +func (d *Downloader) FetchIndex(ctx context.Context, destPath string, withContent bool, logger *logrus.Logger) (bool, error) { + url, err := d.urlTo(".index.json") + if err != nil { + return false, fmt.Errorf("failed to build hub index request: %w", err) + } + + if withContent { + url, err = addURLParam(url, "with_content", "true") + if err != nil { + return false, fmt.Errorf("failed to add 'with_content' parameter to URL: %w", err) + } + } + + downloaded, err := downloader. + New(). + WithHTTPClient(HubClient). + ToFile(destPath). + WithETagFn(downloader.SHA256). + CompareContent(). + WithLogger(logger.WithField("url", url)). + BeforeRequest(func(_ *http.Request) { + fmt.Println("Downloading " + destPath) + }). + Download(ctx, url) + if err != nil { + return false, err + } + + return downloaded, nil +} + +// FetchContent downloads the content to the specified path, through a temporary file +// to avoid partial downloads. +// If the hash does not match, it will not overwrite and log a warning. +func (d *Downloader) FetchContent(ctx context.Context, remotePath, destPath, wantHash string, logger *logrus.Logger) (bool, string, error) { + url, err := d.urlTo(remotePath) + if err != nil { + return false, "", fmt.Errorf("failed to build request: %w", err) + } + + downloaded, err := downloader. + New(). + WithHTTPClient(HubClient). + ToFile(destPath). + WithETagFn(downloader.SHA256). + WithMakeDirs(true). + WithLogger(logger.WithField("url", url)). + CompareContent(). + VerifyHash("sha256", wantHash). + Download(ctx, url) + + var hasherr downloader.HashMismatchError + + switch { + case errors.As(err, &hasherr): + logger.Warnf("%s. The index file is outdated, please run 'cscli hub update' and try again", err.Error()) + case err != nil: + return false, "", err + } + + return downloaded, url, nil +} diff --git a/pkg/cwhub/download_test.go b/pkg/cwhub/download_test.go new file mode 100644 index 00000000000..7b0b99c28d8 --- /dev/null +++ b/pkg/cwhub/download_test.go @@ -0,0 +1,182 @@ +package cwhub + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "testing" + + "github.com/sirupsen/logrus" + logtest "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/crowdsecurity/go-cs-lib/cstest" +) + +func TestFetchIndex(t *testing.T) { + ctx := context.Background() + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } + + if r.URL.Query().Get("with_content") == "true" { + _, err := w.Write([]byte(`Hi I'm an index with content`)) + assert.NoError(t, err) + } else { + _, err := w.Write([]byte(`Hi I'm a minified index`)) + assert.NoError(t, err) + } + })) + defer mockServer.Close() + + discard := logrus.New() + discard.Out = io.Discard + + downloader := &Downloader{ + URLTemplate: mockServer.URL + "/%s/%s", + } + + destPath := filepath.Join(t.TempDir(), "index-here") + withContent := true + + var notFoundError NotFoundError + + // bad branch + + downloader.Branch = "dev" + + downloaded, err := downloader.FetchIndex(ctx, destPath, withContent, discard) + require.ErrorAs(t, err, ¬FoundError) + assert.False(t, downloaded) + + // ok + + downloader.Branch = "main" + + downloaded, err = downloader.FetchIndex(ctx, destPath, withContent, discard) + require.NoError(t, err) + assert.True(t, downloaded) + + content, err := os.ReadFile(destPath) + require.NoError(t, err) + assert.Equal(t, "Hi I'm an index with content", string(content)) + + // not "downloading" a second time + // since we don't have cache control in the mockServer, + // the file is downloaded to a temporary location but not replaced + + downloaded, err = downloader.FetchIndex(ctx, destPath, withContent, discard) + require.NoError(t, err) + assert.False(t, downloaded) + + // download without item content + + downloaded, err = downloader.FetchIndex(ctx, destPath, !withContent, discard) + require.NoError(t, err) + assert.True(t, downloaded) + + content, err = os.ReadFile(destPath) + require.NoError(t, err) + assert.Equal(t, "Hi I'm a minified index", string(content)) + + // bad domain name + + downloader.URLTemplate = "x/%s/%s" + downloaded, err = downloader.FetchIndex(ctx, destPath, !withContent, discard) + cstest.AssertErrorContains(t, err, `Get "x/main/.index.json": unsupported protocol scheme ""`) + assert.False(t, downloaded) + + downloader.URLTemplate = "http://x/%s/%s" + downloaded, err = downloader.FetchIndex(ctx, destPath, !withContent, discard) + // can be no such host, server misbehaving, etc + cstest.AssertErrorContains(t, err, `Get "http://x/main/.index.json": dial tcp: lookup x`) + assert.False(t, downloaded) +} + +func TestFetchContent(t *testing.T) { + ctx := context.Background() + + wantContent := "{'description':'linux'}" + wantHash := "e557cb9e1cb051bc3b6a695e4396c5f8e0eff4b7b0d2cc09f7684e1d52ea2224" + remotePath := "collections/crowdsecurity/linux.yaml" + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/"+remotePath { + w.WriteHeader(http.StatusNotFound) + } + + _, err := w.Write([]byte(wantContent)) + assert.NoError(t, err) + })) + defer mockServer.Close() + + wantURL := mockServer.URL + "/main/collections/crowdsecurity/linux.yaml" + + // bad branch + + hubDownloader := &Downloader{ + URLTemplate: mockServer.URL + "/%s/%s", + } + + discard := logrus.New() + discard.Out = io.Discard + + destPath := filepath.Join(t.TempDir(), "content-here") + + var notFoundError NotFoundError + + // bad branch + + hubDownloader.Branch = "dev" + + downloaded, url, err := hubDownloader.FetchContent(ctx, remotePath, destPath, wantHash, discard) + assert.Empty(t, url) + require.ErrorAs(t, err, ¬FoundError) + assert.False(t, downloaded) + + // bad path + + hubDownloader.Branch = "main" + + downloaded, url, err = hubDownloader.FetchContent(ctx, "collections/linux.yaml", destPath, wantHash, discard) + assert.Empty(t, url) + require.ErrorAs(t, err, ¬FoundError) + assert.False(t, downloaded) + + // hash mismatch: the file is not reported as downloaded because it's not replaced + + capture, hook := logtest.NewNullLogger() + capture.SetLevel(logrus.WarnLevel) + + downloaded, url, err = hubDownloader.FetchContent(ctx, remotePath, destPath, "1234", capture) + assert.Equal(t, wantURL, url) + require.NoError(t, err) + assert.False(t, downloaded) + cstest.RequireLogContains(t, hook, "hash mismatch: expected 1234, got "+wantHash) + + // ok + + downloaded, url, err = hubDownloader.FetchContent(ctx, remotePath, destPath, wantHash, discard) + assert.Equal(t, wantURL, url) + require.NoError(t, err) + assert.True(t, downloaded) + + content, err := os.ReadFile(destPath) + require.NoError(t, err) + assert.Equal(t, wantContent, string(content)) + + // not "downloading" a second time + // since we don't have cache control in the mockServer, + // the file is downloaded to a temporary location but not replaced + + downloaded, url, err = hubDownloader.FetchContent(ctx, remotePath, destPath, wantHash, discard) + assert.Equal(t, wantURL, url) + require.NoError(t, err) + assert.False(t, downloaded) +} diff --git a/pkg/cwhub/errors.go b/pkg/cwhub/errors.go deleted file mode 100644 index b0be444fcba..00000000000 --- a/pkg/cwhub/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package cwhub - -import ( - "errors" - "fmt" -) - -// ErrNilRemoteHub is returned when trying to download with a local-only configuration. -var ErrNilRemoteHub = errors.New("remote hub configuration is not provided. Please report this issue to the developers") - -// IndexNotFoundError is returned when the remote hub index is not found. -type IndexNotFoundError struct { - URL string - Branch string -} - -func (e IndexNotFoundError) Error() string { - return fmt.Sprintf("index not found at %s, branch '%s'. Please check the .cscli.hub_branch value if you specified it in config.yaml, or use 'master' if not sure", e.URL, e.Branch) -} diff --git a/pkg/cwhub/fetch.go b/pkg/cwhub/fetch.go new file mode 100644 index 00000000000..e8dacad4a6d --- /dev/null +++ b/pkg/cwhub/fetch.go @@ -0,0 +1,70 @@ +package cwhub + +import ( + "context" + "crypto" + "encoding/base64" + "encoding/hex" + "fmt" + "os" + "path/filepath" +) + +// writeEmbeddedContentTo writes the embedded content to the specified path and checks the hash. +// If the content is base64 encoded, it will be decoded before writing. Call this method only +// if item.Content if not empty. +func (i *Item) writeEmbeddedContentTo(destPath, wantHash string) error { + if i.Content == "" { + return fmt.Errorf("no embedded content for %s", i.Name) + } + + content, err := base64.StdEncoding.DecodeString(i.Content) + if err != nil { + content = []byte(i.Content) + } + + dir := filepath.Dir(destPath) + + if err := os.MkdirAll(dir, 0o755); err != nil { + return fmt.Errorf("while creating %s: %w", dir, err) + } + + // check sha256 + hash := crypto.SHA256.New() + if _, err := hash.Write(content); err != nil { + return fmt.Errorf("while hashing %s: %w", i.Name, err) + } + + gotHash := hex.EncodeToString(hash.Sum(nil)) + if gotHash != wantHash { + return fmt.Errorf("hash mismatch: expected %s, got %s. The index file is invalid, please run 'cscli hub update' and try again", wantHash, gotHash) + } + + if err := os.WriteFile(destPath, content, 0o600); err != nil { + return fmt.Errorf("while writing %s: %w", destPath, err) + } + + return nil +} + +// FetchContentTo writes the last version of the item's YAML file to the specified path. +// If the file is embedded in the index file, it will be written directly without downloads. +// Returns whether the file was downloaded (to inform if the security engine needs reloading) +// and the remote url for feedback purposes. +func (i *Item) FetchContentTo(ctx context.Context, contentProvider ContentProvider, destPath string) (bool, string, error) { + wantHash := i.latestHash() + if wantHash == "" { + return false, "", fmt.Errorf("%s: latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again", i.FQName()) + } + + // Use the embedded content if available + if i.Content != "" { + if err := i.writeEmbeddedContentTo(destPath, wantHash); err != nil { + return false, "", err + } + + return true, fmt.Sprintf("(embedded in %s)", i.hub.local.HubIndexFile), nil + } + + return contentProvider.FetchContent(ctx, i.RemotePath, destPath, wantHash, i.hub.logger) +} diff --git a/pkg/cwhub/hub.go b/pkg/cwhub/hub.go index f74a794a512..aeccb3268f7 100644 --- a/pkg/cwhub/hub.go +++ b/pkg/cwhub/hub.go @@ -22,7 +22,6 @@ type Hub struct { items HubItems // Items read from HubDir and InstallDir pathIndex map[string]*Item local *csconfig.LocalHubCfg - remote *RemoteHubCfg logger *logrus.Logger Warnings []string // Warnings encountered during sync } @@ -35,10 +34,9 @@ func (h *Hub) GetDataDir() string { // NewHub returns a new Hub instance with local and (optionally) remote configuration. // The hub is not synced automatically. Load() must be called to read the index, sync the local state, // and check for unmanaged items. -// All download operations (including updateIndex) return ErrNilRemoteHub if the remote configuration is not set. -func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, logger *logrus.Logger) (*Hub, error) { +func NewHub(local *csconfig.LocalHubCfg, logger *logrus.Logger) (*Hub, error) { if local == nil { - return nil, errors.New("no hub configuration found") + return nil, errors.New("no hub configuration provided") } if logger == nil { @@ -48,7 +46,6 @@ func NewHub(local *csconfig.LocalHubCfg, remote *RemoteHubCfg, logger *logrus.Lo hub := &Hub{ local: local, - remote: remote, logger: logger, pathIndex: make(map[string]*Item, 0), } @@ -61,14 +58,10 @@ func (h *Hub) Load() error { h.logger.Debugf("loading hub idx %s", h.local.HubIndexFile) if err := h.parseIndex(); err != nil { - return fmt.Errorf("failed to load hub index: %w", err) + return fmt.Errorf("invalid hub index: %w. Run 'sudo cscli hub update' to download the index again", err) } - if err := h.localSync(); err != nil { - return fmt.Errorf("failed to sync hub items: %w", err) - } - - return nil + return h.localSync() } // parseIndex takes the content of an index file and fills the map of associated parsers/scenarios/collections. @@ -82,21 +75,25 @@ func (h *Hub) parseIndex() error { return fmt.Errorf("failed to parse index: %w", err) } - h.logger.Debugf("%d item types in hub index", len(ItemTypes)) - // Iterate over the different types to complete the struct for _, itemType := range ItemTypes { - h.logger.Tracef("%s: %d items", itemType, len(h.GetItemMap(itemType))) - for name, item := range h.GetItemMap(itemType) { - item.hub = h - item.Name = name + if item == nil { + // likely defined as empty object or null in the index file + return fmt.Errorf("%s:%s has no index metadata", itemType, name) + } - // if the item has no (redundant) author, take it from the json key - if item.Author == "" && strings.Contains(name, "/") { - item.Author = strings.Split(name, "/")[0] + if item.RemotePath == "" { + return fmt.Errorf("%s:%s has no download path", itemType, name) } + if (itemType == PARSERS || itemType == POSTOVERFLOWS) && item.Stage == "" { + return fmt.Errorf("%s:%s has no stage", itemType, name) + } + + item.hub = h + item.Name = name + item.Type = itemType item.FileName = path.Base(item.RemotePath) @@ -152,28 +149,29 @@ func (h *Hub) ItemStats() []string { return ret } -// Update downloads the latest version of the index and writes it to disk if it changed. It cannot be called after Load() -// unless the hub is completely empty. -func (h *Hub) Update(ctx context.Context) error { - if len(h.pathIndex) > 0 { +var ErrUpdateAfterSync = errors.New("cannot update hub index after load/sync") + +// Update downloads the latest version of the index and writes it to disk if it changed. +// It cannot be called after Load() unless the index was completely empty. +func (h *Hub) Update(ctx context.Context, indexProvider IndexProvider, withContent bool) error { + if len(h.items) > 0 { // if this happens, it's a bug. - return errors.New("cannot update hub after items have been loaded") + return ErrUpdateAfterSync } - downloaded, err := h.remote.fetchIndex(ctx, h.local.HubIndexFile) + downloaded, err := indexProvider.FetchIndex(ctx, h.local.HubIndexFile, withContent, h.logger) if err != nil { return err } - if downloaded { - h.logger.Infof("Wrote index to %s", h.local.HubIndexFile) - } else { - h.logger.Info("hub index is up to date") + if !downloaded { + fmt.Println("Nothing to do, the hub index is up to date.") } return nil } +// addItem adds an item to the hub. It silently replaces an existing item with the same type and name. func (h *Hub) addItem(item *Item) { if h.items[item.Type] == nil { h.items[item.Type] = make(map[string]*Item) @@ -236,6 +234,7 @@ func (h *Hub) GetItemsByType(itemType string, sorted bool) []*Item { } idx := 0 + for _, item := range items { ret[idx] = item idx += 1 @@ -267,6 +266,7 @@ func (h *Hub) GetInstalledListForAPI() []string { ret := make([]string, len(scenarios)+len(appsecRules)) idx := 0 + for _, item := range scenarios { ret[idx] = item.Name idx += 1 diff --git a/pkg/cwhub/hub_test.go b/pkg/cwhub/hub_test.go index 1c2c9ccceca..461b59de78b 100644 --- a/pkg/cwhub/hub_test.go +++ b/pkg/cwhub/hub_test.go @@ -2,90 +2,261 @@ package cwhub import ( "context" - "fmt" + "net/http" + "net/http/httptest" "os" + "path/filepath" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/crowdsecurity/go-cs-lib/cstest" + + "github.com/crowdsecurity/crowdsec/pkg/csconfig" ) -func TestInitHubUpdate(t *testing.T) { - hub := envSetup(t) - remote := &RemoteHubCfg{ - URLTemplate: mockURLTemplate, - Branch: "master", - IndexPath: ".index.json", +// testHubCfg creates an empty hub structure in a temporary directory +// and returns its configuration object. +// +// This allow the reuse of the hub content for multiple instances +// of the Hub object. +func testHubCfg(t *testing.T) *csconfig.LocalHubCfg { + tempDir := t.TempDir() + + local := csconfig.LocalHubCfg{ + HubDir: filepath.Join(tempDir, "crowdsec", "hub"), + HubIndexFile: filepath.Join(tempDir, "crowdsec", "hub", ".index.json"), + InstallDir: filepath.Join(tempDir, "crowdsec"), + InstallDataDir: filepath.Join(tempDir, "installed-data"), } - _, err := NewHub(hub.local, remote, nil) + err := os.MkdirAll(local.HubDir, 0o755) require.NoError(t, err) - ctx := context.Background() + err = os.MkdirAll(local.InstallDir, 0o755) + require.NoError(t, err) - err = hub.Update(ctx) + err = os.MkdirAll(local.InstallDataDir, 0o755) require.NoError(t, err) + return &local +} + +func testHub(t *testing.T, localCfg *csconfig.LocalHubCfg, indexJson string) (*Hub, error) { + if localCfg == nil { + localCfg = testHubCfg(t) + } + + err := os.WriteFile(localCfg.HubIndexFile, []byte(indexJson), 0o644) + require.NoError(t, err) + + hub, err := NewHub(localCfg, nil) + require.NoError(t, err) err = hub.Load() + + return hub, err +} + +func TestIndexEmpty(t *testing.T) { + // an empty hub is valid, and should not have warnings + hub, err := testHub(t, nil, "{}") require.NoError(t, err) + assert.Empty(t, hub.Warnings) } -func TestUpdateIndex(t *testing.T) { - // bad url template - fmt.Println("Test 'bad URL'") +func TestIndexJSON(t *testing.T) { + // but it can't be an empty string + hub, err := testHub(t, nil, "") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: unexpected end of JSON input") + assert.Empty(t, hub.Warnings) + + // it must be valid json + hub, err = testHub(t, nil, "def not json") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: invalid character 'd' looking for beginning of value. Run 'sudo cscli hub update' to download the index again") + assert.Empty(t, hub.Warnings) + + hub, err = testHub(t, nil, "{") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: unexpected end of JSON input") + assert.Empty(t, hub.Warnings) - tmpIndex, err := os.CreateTemp("", "index.json") + // and by json we mean an object + hub, err = testHub(t, nil, "[]") + cstest.RequireErrorContains(t, err, "invalid hub index: failed to parse index: json: cannot unmarshal array into Go value of type cwhub.HubItems") + assert.Empty(t, hub.Warnings) +} + +func TestIndexUnknownItemType(t *testing.T) { + // Allow unknown fields in the top level object, likely new item types + hub, err := testHub(t, nil, `{"goodies": {}}`) require.NoError(t, err) + assert.Empty(t, hub.Warnings) +} - // close the file to avoid preventing the rename on windows - err = tmpIndex.Close() +func TestHubUpdate(t *testing.T) { + // update an empty hub with a index containing a parser. + hub, err := testHub(t, nil, "{}") require.NoError(t, err) - t.Cleanup(func() { - os.Remove(tmpIndex.Name()) - }) + index1 := ` +{ + "parsers": { + "author/pars1": { + "path": "parsers/s01-parse/pars1.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` - hub := envSetup(t) + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } - hub.remote = &RemoteHubCfg{ - URLTemplate: "x", - Branch: "", - IndexPath: "", + _, err = w.Write([]byte(index1)) + assert.NoError(t, err) + })) + defer mockServer.Close() + + ctx := context.Background() + + downloader := &Downloader{ + Branch: "main", + URLTemplate: mockServer.URL + "/%s/%s", } - hub.local.HubIndexFile = tmpIndex.Name() + err = hub.Update(ctx, downloader, true) + require.NoError(t, err) - ctx := context.Background() + err = hub.Load() + require.NoError(t, err) + + item := hub.GetItem("parsers", "author/pars1") + assert.NotEmpty(t, item) + assert.Equal(t, "author/pars1", item.Name) +} - err = hub.Update(ctx) - cstest.RequireErrorContains(t, err, "failed to build hub index request: invalid URL template 'x'") +func TestHubUpdateInvalidTemplate(t *testing.T) { + hub, err := testHub(t, nil, "{}") + require.NoError(t, err) - // bad domain - fmt.Println("Test 'bad domain'") + ctx := context.Background() - hub.remote = &RemoteHubCfg{ - URLTemplate: "https://baddomain/crowdsecurity/%s/%s", - Branch: "master", - IndexPath: ".index.json", + downloader := &Downloader{ + Branch: "main", + URLTemplate: "x", } - err = hub.Update(ctx) + err = hub.Update(ctx, downloader, true) + cstest.RequireErrorMessage(t, err, "failed to build hub index request: invalid URL template 'x'") +} + +func TestHubUpdateCannotWrite(t *testing.T) { + hub, err := testHub(t, nil, "{}") require.NoError(t, err) - // XXX: this is not failing - // cstest.RequireErrorContains(t, err, "failed http request for hub index: Get") - // bad target path - fmt.Println("Test 'bad target path'") + index1 := ` +{ + "parsers": { + "author/pars1": { + "path": "parsers/s01-parse/pars1.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } + + _, err = w.Write([]byte(index1)) + assert.NoError(t, err) + })) + defer mockServer.Close() + + ctx := context.Background() - hub.remote = &RemoteHubCfg{ - URLTemplate: mockURLTemplate, - Branch: "master", - IndexPath: ".index.json", + downloader := &Downloader{ + Branch: "main", + URLTemplate: mockServer.URL + "/%s/%s", } - hub.local.HubIndexFile = "/does/not/exist/index.json" + hub.local.HubIndexFile = "/proc/foo/bar/baz/.index.json" + + err = hub.Update(ctx, downloader, true) + cstest.RequireErrorContains(t, err, "failed to create temporary download file for /proc/foo/bar/baz/.index.json") +} + +func TestHubUpdateAfterLoad(t *testing.T) { + // Update() can't be called after Load() if the hub is not completely empty. + index1 := ` +{ + "parsers": { + "author/pars1": { + "path": "parsers/s01-parse/pars1.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` + hub, err := testHub(t, nil, index1) + require.NoError(t, err) + + index2 := ` +{ + "parsers": { + "author/pars2": { + "path": "parsers/s01-parse/pars2.yaml", + "stage": "s01-parse", + "version": "0.0", + "versions": { + "0.0": { + "digest": "44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a" + } + }, + "content": "{}" + } + } +}` + + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/main/.index.json" { + w.WriteHeader(http.StatusNotFound) + } + + _, err = w.Write([]byte(index2)) + assert.NoError(t, err) + })) + defer mockServer.Close() + + ctx := context.Background() + + downloader := &Downloader{ + Branch: "main", + URLTemplate: mockServer.URL + "/%s/%s", + } - err = hub.Update(ctx) - cstest.RequireErrorContains(t, err, "failed to create temporary download file for /does/not/exist/index.json:") + err = hub.Update(ctx, downloader, true) + require.ErrorIs(t, err, ErrUpdateAfterSync) } diff --git a/pkg/cwhub/item.go b/pkg/cwhub/item.go index 32d1acf94ff..38385d9399d 100644 --- a/pkg/cwhub/item.go +++ b/pkg/cwhub/item.go @@ -2,13 +2,15 @@ package cwhub import ( "encoding/json" + "errors" "fmt" + "io/fs" + "os" "path/filepath" "slices" "github.com/Masterminds/semver/v3" - - "github.com/crowdsecurity/crowdsec/pkg/emoji" + yaml "gopkg.in/yaml.v3" ) const ( @@ -42,59 +44,49 @@ type ItemVersion struct { Deprecated bool `json:"deprecated,omitempty" yaml:"deprecated,omitempty"` } -// ItemState is used to keep the local state (i.e. at runtime) of an item. -// This data is not stored in the index, but is displayed with "cscli ... inspect". -type ItemState struct { - LocalPath string `json:"local_path,omitempty" yaml:"local_path,omitempty"` - LocalVersion string `json:"local_version,omitempty" yaml:"local_version,omitempty"` - LocalHash string `json:"local_hash,omitempty" yaml:"local_hash,omitempty"` - Installed bool `json:"installed"` - Downloaded bool `json:"downloaded"` - UpToDate bool `json:"up_to_date"` - Tainted bool `json:"tainted"` - TaintedBy []string `json:"tainted_by,omitempty" yaml:"tainted_by,omitempty"` - BelongsToCollections []string `json:"belongs_to_collections,omitempty" yaml:"belongs_to_collections,omitempty"` +type Dependencies struct { + Parsers []string `json:"parsers,omitempty" yaml:"parsers,omitempty"` + PostOverflows []string `json:"postoverflows,omitempty" yaml:"postoverflows,omitempty"` + Scenarios []string `json:"scenarios,omitempty" yaml:"scenarios,omitempty"` + Collections []string `json:"collections,omitempty" yaml:"collections,omitempty"` + Contexts []string `json:"contexts,omitempty" yaml:"contexts,omitempty"` + AppsecConfigs []string `json:"appsec-configs,omitempty" yaml:"appsec-configs,omitempty"` + AppsecRules []string `json:"appsec-rules,omitempty" yaml:"appsec-rules,omitempty"` } -// IsLocal returns true if the item has been create by a user (not downloaded from the hub). -func (s *ItemState) IsLocal() bool { - return s.Installed && !s.Downloaded +// a group of items of the same type +type itemgroup struct { + typeName string + itemNames []string } -// Text returns the status of the item as a string (eg. "enabled,update-available"). -func (s *ItemState) Text() string { - ret := "disabled" - - if s.Installed { - ret = "enabled" +func (d Dependencies) byType() []itemgroup { + return []itemgroup{ + {PARSERS, d.Parsers}, + {POSTOVERFLOWS, d.PostOverflows}, + {SCENARIOS, d.Scenarios}, + {CONTEXTS, d.Contexts}, + {APPSEC_CONFIGS, d.AppsecConfigs}, + {APPSEC_RULES, d.AppsecRules}, + {COLLECTIONS, d.Collections}, } - - if s.IsLocal() { - ret += ",local" - } - - if s.Tainted { - ret += ",tainted" - } else if !s.UpToDate && !s.IsLocal() { - ret += ",update-available" - } - - return ret } -// Emoji returns the status of the item as an emoji (eg. emoji.Warning). -func (s *ItemState) Emoji() string { - switch { - case s.IsLocal(): - return emoji.House - case !s.Installed: - return emoji.Prohibited - case s.Tainted || (!s.UpToDate && !s.IsLocal()): - return emoji.Warning - case s.Installed: - return emoji.CheckMark - default: - return emoji.QuestionMark +// SubItems iterates over the sub-items in the struct, excluding the ones that were not found in the hub. +func (d Dependencies) SubItems(hub *Hub) func(func(*Item) bool) { + return func(yield func(*Item) bool) { + for _, typeGroup := range d.byType() { + for _, name := range typeGroup.itemNames { + s := hub.GetItem(typeGroup.typeName, name) + if s == nil { + continue + } + + if !yield(s) { + return + } + } + } } } @@ -104,46 +96,40 @@ type Item struct { State ItemState `json:"-" yaml:"-"` // local state, not stored in the index - Type string `json:"type,omitempty" yaml:"type,omitempty"` // one of the ItemTypes - Stage string `json:"stage,omitempty" yaml:"stage,omitempty"` // Stage for parser|postoverflow: s00-raw/s01-... - Name string `json:"name,omitempty" yaml:"name,omitempty"` // usually "author/name" - FileName string `json:"file_name,omitempty" yaml:"file_name,omitempty"` // eg. apache2-logs.yaml + Type string `json:"type,omitempty" yaml:"type,omitempty"` + Stage string `json:"stage,omitempty" yaml:"stage,omitempty"` // Stage for parser|postoverflow: s00-raw/s01-... + Name string `json:"name,omitempty" yaml:"name,omitempty"` // usually "author/name" + FileName string `json:"file_name,omitempty" yaml:"file_name,omitempty"` // eg. apache2-logs.yaml Description string `json:"description,omitempty" yaml:"description,omitempty"` - Content string `json:"content,omitempty" yaml:"-"` - Author string `json:"author,omitempty" yaml:"author,omitempty"` - References []string `json:"references,omitempty" yaml:"references,omitempty"` + Content string `json:"content,omitempty" yaml:"-"` + References []string `json:"references,omitempty" yaml:"references,omitempty"` + // NOTE: RemotePath could be derived from the other fields RemotePath string `json:"path,omitempty" yaml:"path,omitempty"` // path relative to the base URL eg. /parsers/stage/author/file.yaml Version string `json:"version,omitempty" yaml:"version,omitempty"` // the last available version Versions map[string]ItemVersion `json:"versions,omitempty" yaml:"-"` // all the known versions - // if it's a collection, it can have sub items - Parsers []string `json:"parsers,omitempty" yaml:"parsers,omitempty"` - PostOverflows []string `json:"postoverflows,omitempty" yaml:"postoverflows,omitempty"` - Scenarios []string `json:"scenarios,omitempty" yaml:"scenarios,omitempty"` - Collections []string `json:"collections,omitempty" yaml:"collections,omitempty"` - Contexts []string `json:"contexts,omitempty" yaml:"contexts,omitempty"` - AppsecConfigs []string `json:"appsec-configs,omitempty" yaml:"appsec-configs,omitempty"` - AppsecRules []string `json:"appsec-rules,omitempty" yaml:"appsec-rules,omitempty"` + // The index contains the dependencies of the "latest" version (collections only) + Dependencies } -// installPath returns the location of the symlink to the item in the hub, or the path of the item itself if it's local +// InstallPath returns the location of the symlink to the item in the hub, or the path of the item itself if it's local // (eg. /etc/crowdsec/collections/xyz.yaml). // Raises an error if the path goes outside of the install dir. -func (i *Item) installPath() (string, error) { +func (i *Item) InstallPath() (string, error) { p := i.Type if i.Stage != "" { p = filepath.Join(p, i.Stage) } - return safePath(i.hub.local.InstallDir, filepath.Join(p, i.FileName)) + return SafePath(i.hub.local.InstallDir, filepath.Join(p, i.FileName)) } -// downloadPath returns the location of the actual config file in the hub +// DownloadPath returns the location of the actual config file in the hub // (eg. /etc/crowdsec/hub/collections/author/xyz.yaml). // Raises an error if the path goes outside of the hub dir. -func (i *Item) downloadPath() (string, error) { - ret, err := safePath(i.hub.local.HubDir, i.RemotePath) +func (i *Item) DownloadPath() (string, error) { + ret, err := SafePath(i.hub.local.HubDir, i.RemotePath) if err != nil { return "", err } @@ -203,141 +189,128 @@ func (i Item) MarshalYAML() (interface{}, error) { }, nil } -// SubItems returns a slice of sub-items, excluding the ones that were not found. -func (i *Item) SubItems() []*Item { - sub := make([]*Item, 0) - - for _, name := range i.Parsers { - s := i.hub.GetItem(PARSERS, name) - if s == nil { - continue - } +// LatestDependencies returns a slice of sub-items of the "latest" available version of the item, as opposed to the version that is actually installed. The information comes from the index. +func (i *Item) LatestDependencies() Dependencies { + return i.Dependencies +} - sub = append(sub, s) +// CurrentSubItems returns a slice of sub-items of the installed version, excluding the ones that were not found. +// The list comes from the content file if parseable, otherwise from the index (same as LatestDependencies). +func (i *Item) CurrentDependencies() Dependencies { + if !i.HasSubItems() { + return Dependencies{} } - for _, name := range i.PostOverflows { - s := i.hub.GetItem(POSTOVERFLOWS, name) - if s == nil { - continue - } - - sub = append(sub, s) + if i.State.UpToDate { + return i.Dependencies } - for _, name := range i.Scenarios { - s := i.hub.GetItem(SCENARIOS, name) - if s == nil { - continue - } - - sub = append(sub, s) + contentPath, err := i.InstallPath() + if err != nil { + i.hub.logger.Warningf("can't access dependencies for %s, using index", i.FQName()) + return i.Dependencies } - for _, name := range i.Contexts { - s := i.hub.GetItem(CONTEXTS, name) - if s == nil { - continue - } + currentContent, err := os.ReadFile(contentPath) + if errors.Is(err, fs.ErrNotExist) { + return i.Dependencies + } - sub = append(sub, s) + if err != nil { + // a file might be corrupted, or in development + i.hub.logger.Warningf("can't read dependencies for %s, using index", i.FQName()) + return i.Dependencies } - for _, name := range i.AppsecConfigs { - s := i.hub.GetItem(APPSEC_CONFIGS, name) - if s == nil { - continue - } + var d Dependencies - sub = append(sub, s) + // XXX: assume collection content never has multiple documents + if err := yaml.Unmarshal(currentContent, &d); err != nil { + i.hub.logger.Warningf("can't parse dependencies for %s, using index", i.FQName()) + return i.Dependencies } - for _, name := range i.AppsecRules { - s := i.hub.GetItem(APPSEC_RULES, name) - if s == nil { - continue - } + return d +} - sub = append(sub, s) +func (i *Item) logMissingSubItems() { + for _, sub := range i.CurrentDependencies().byType() { + for _, subName := range sub.itemNames { + if i.hub.GetItem(sub.typeName, subName) == nil { + i.hub.logger.Errorf("can't find %s:%s, required by %s", sub.typeName, subName, i.Name) + } + } } +} + +// Ancestors returns a slice of items (typically collections) that have this item as a direct or indirect dependency. +func (i *Item) Ancestors() []*Item { + ret := make([]*Item, 0) - for _, name := range i.Collections { - s := i.hub.GetItem(COLLECTIONS, name) - if s == nil { + for _, parentName := range i.State.BelongsToCollections { + parent := i.hub.GetItem(COLLECTIONS, parentName) + if parent == nil { continue } - sub = append(sub, s) + ret = append(ret, parent) } - return sub + return ret } -func (i *Item) logMissingSubItems() { - if !i.HasSubItems() { - return - } +// SafeToRemoveDeps returns a slice of dependencies that can be safely removed when this item is removed. +// The returned slice can contain items that are not installed, or not downloaded. +func (i *Item) SafeToRemoveDeps() ([]*Item, error) { + ret := make([]*Item, 0) - for _, subName := range i.Parsers { - if i.hub.GetItem(PARSERS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, PARSERS, i.Name) - } + // can return err for circular dependencies + descendants, err := i.descendants() + if err != nil { + return nil, err } - for _, subName := range i.Scenarios { - if i.hub.GetItem(SCENARIOS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, SCENARIOS, i.Name) - } - } + ancestors := i.Ancestors() - for _, subName := range i.PostOverflows { - if i.hub.GetItem(POSTOVERFLOWS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, POSTOVERFLOWS, i.Name) - } - } + for sub := range i.CurrentDependencies().SubItems(i.hub) { + safe := true - for _, subName := range i.Contexts { - if i.hub.GetItem(CONTEXTS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, CONTEXTS, i.Name) - } - } + // if the sub depends on a collection that is not a direct or indirect dependency + // of the current item, it is not removed + for _, subParent := range sub.Ancestors() { + if !subParent.State.Installed { + continue + } - for _, subName := range i.AppsecConfigs { - if i.hub.GetItem(APPSEC_CONFIGS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, APPSEC_CONFIGS, i.Name) - } - } + // the ancestor that would block the removal of the sub item is also an ancestor + // of the item we are removing, so we don't want false warnings + // (e.g. crowdsecurity/sshd-logs was not removed because it also belongs to crowdsecurity/linux, + // while we are removing crowdsecurity/sshd) + if slices.Contains(ancestors, subParent) { + continue + } - for _, subName := range i.AppsecRules { - if i.hub.GetItem(APPSEC_RULES, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, APPSEC_RULES, i.Name) - } - } + // the sub-item belongs to the item we are removing, but we already knew that + if subParent == i { + continue + } - for _, subName := range i.Collections { - if i.hub.GetItem(COLLECTIONS, subName) == nil { - i.hub.logger.Errorf("can't find %s in %s, required by %s", subName, COLLECTIONS, i.Name) + if !slices.Contains(descendants, subParent) { + // not removing %s because it also belongs to %s", sub.FQName(), subParent.FQName()) + safe = false + break + } } - } -} - -// Ancestors returns a slice of items (typically collections) that have this item as a direct or indirect dependency. -func (i *Item) Ancestors() []*Item { - ret := make([]*Item, 0) - for _, parentName := range i.State.BelongsToCollections { - parent := i.hub.GetItem(COLLECTIONS, parentName) - if parent == nil { - continue + if safe { + ret = append(ret, sub) } - - ret = append(ret, parent) } - return ret + return ret, nil } -// descendants returns a list of all (direct or indirect) dependencies of the item. +// descendants returns a list of all (direct or indirect) dependencies of the item's current version. func (i *Item) descendants() ([]*Item, error) { var collectSubItems func(item *Item, visited map[*Item]bool, result *[]*Item) error @@ -352,7 +325,7 @@ func (i *Item) descendants() ([]*Item, error) { visited[item] = true - for _, subItem := range item.SubItems() { + for subItem := range item.CurrentDependencies().SubItems(item.hub) { if subItem == i { return fmt.Errorf("circular dependency detected: %s depends on %s", item.Name, i.Name) } diff --git a/pkg/cwhub/item_test.go b/pkg/cwhub/item_test.go index 703bbb5cb90..350861ff85e 100644 --- a/pkg/cwhub/item_test.go +++ b/pkg/cwhub/item_test.go @@ -6,39 +6,16 @@ import ( "github.com/stretchr/testify/require" ) -func TestItemStatus(t *testing.T) { +func TestItemStats(t *testing.T) { hub := envSetup(t) // get existing map x := hub.GetItemMap(COLLECTIONS) require.NotEmpty(t, x) - // Get item: good and bad - for k := range x { - item := hub.GetItem(COLLECTIONS, k) - require.NotNil(t, item) - - item.State.Installed = true - item.State.UpToDate = false - item.State.Tainted = false - item.State.Downloaded = true - - txt := item.State.Text() - require.Equal(t, "enabled,update-available", txt) - - item.State.Installed = true - item.State.UpToDate = false - item.State.Tainted = false - item.State.Downloaded = false - - txt = item.State.Text() - require.Equal(t, "enabled,local", txt) - } - stats := hub.ItemStats() require.Equal(t, []string{ "Loaded: 2 parsers, 1 scenarios, 3 collections", - "Unmanaged items: 3 local, 0 tainted", }, stats) } diff --git a/pkg/cwhub/iteminstall.go b/pkg/cwhub/iteminstall.go deleted file mode 100644 index 912897d0d7e..00000000000 --- a/pkg/cwhub/iteminstall.go +++ /dev/null @@ -1,73 +0,0 @@ -package cwhub - -import ( - "context" - "fmt" -) - -// enable enables the item by creating a symlink to the downloaded content, and also enables sub-items. -func (i *Item) enable() error { - if i.State.Installed { - if i.State.Tainted { - return fmt.Errorf("%s is tainted, won't overwrite unless --force", i.Name) - } - - if i.State.IsLocal() { - return fmt.Errorf("%s is local, won't overwrite", i.Name) - } - - // if it's a collection, check sub-items even if the collection file itself is up-to-date - if i.State.UpToDate && !i.HasSubItems() { - i.hub.logger.Tracef("%s is installed and up-to-date, skip.", i.Name) - return nil - } - } - - for _, sub := range i.SubItems() { - if err := sub.enable(); err != nil { - return fmt.Errorf("while installing %s: %w", sub.Name, err) - } - } - - if err := i.createInstallLink(); err != nil { - return err - } - - i.hub.logger.Infof("Enabled %s: %s", i.Type, i.Name) - i.State.Installed = true - - return nil -} - -// Install installs the item from the hub, downloading it if needed. -func (i *Item) Install(ctx context.Context, force bool, downloadOnly bool) error { - if downloadOnly && i.State.Downloaded && i.State.UpToDate { - i.hub.logger.Infof("%s is already downloaded and up-to-date", i.Name) - - if !force { - return nil - } - } - - downloaded, err := i.downloadLatest(ctx, force, true) - if err != nil { - return err - } - - if downloadOnly && downloaded { - return nil - } - - if err := i.enable(); err != nil { - return fmt.Errorf("while enabling %s: %w", i.Name, err) - } - - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("installed %s\n", i.Name) - - i.hub.logger.Infof("Enabled %s", i.Name) - - return nil -} diff --git a/pkg/cwhub/iteminstall_test.go b/pkg/cwhub/iteminstall_test.go index 5bfc7e8148e..ba47f2f4b4a 100644 --- a/pkg/cwhub/iteminstall_test.go +++ b/pkg/cwhub/iteminstall_test.go @@ -1,5 +1,9 @@ package cwhub +// XXX: these tests are obsolete + +/* + import ( "context" "os" @@ -103,7 +107,7 @@ func TestInstallParser(t *testing.T) { - force update it - check its status - remove it - */ + * hub := envSetup(t) // map iteration is random by itself @@ -126,7 +130,7 @@ func TestInstallCollection(t *testing.T) { - force update it - check its status - remove it - */ + * hub := envSetup(t) // map iteration is random by itself @@ -139,3 +143,5 @@ func TestInstallCollection(t *testing.T) { break } } + +*/ diff --git a/pkg/cwhub/itemlink.go b/pkg/cwhub/itemlink.go deleted file mode 100644 index 8a78d6805b7..00000000000 --- a/pkg/cwhub/itemlink.go +++ /dev/null @@ -1,78 +0,0 @@ -package cwhub - -import ( - "fmt" - "os" - "path/filepath" -) - -// createInstallLink creates a symlink between the actual config file at hub.HubDir and hub.ConfigDir. -func (i *Item) createInstallLink() error { - dest, err := i.installPath() - if err != nil { - return err - } - - destDir := filepath.Dir(dest) - if err = os.MkdirAll(destDir, os.ModePerm); err != nil { - return fmt.Errorf("while creating %s: %w", destDir, err) - } - - if _, err = os.Lstat(dest); !os.IsNotExist(err) { - i.hub.logger.Infof("%s already exists.", dest) - return nil - } - - src, err := i.downloadPath() - if err != nil { - return err - } - - if err = os.Symlink(src, dest); err != nil { - return fmt.Errorf("while creating symlink from %s to %s: %w", src, dest, err) - } - - return nil -} - -// removeInstallLink removes the symlink to the downloaded content. -func (i *Item) removeInstallLink() error { - syml, err := i.installPath() - if err != nil { - return err - } - - stat, err := os.Lstat(syml) - if err != nil { - return err - } - - // if it's managed by hub, it's a symlink to csconfig.GConfig.hub.HubDir / ... - if stat.Mode()&os.ModeSymlink == 0 { - i.hub.logger.Warningf("%s (%s) isn't a symlink, can't disable", i.Name, syml) - return fmt.Errorf("%s isn't managed by hub", i.Name) - } - - hubpath, err := os.Readlink(syml) - if err != nil { - return fmt.Errorf("while reading symlink: %w", err) - } - - src, err := i.downloadPath() - if err != nil { - return err - } - - if hubpath != src { - i.hub.logger.Warningf("%s (%s) isn't a symlink to %s", i.Name, syml, src) - return fmt.Errorf("%s isn't managed by hub", i.Name) - } - - if err := os.Remove(syml); err != nil { - return fmt.Errorf("while removing symlink: %w", err) - } - - i.hub.logger.Infof("Removed symlink [%s]: %s", i.Name, syml) - - return nil -} diff --git a/pkg/cwhub/itemremove.go b/pkg/cwhub/itemremove.go deleted file mode 100644 index eca0c856237..00000000000 --- a/pkg/cwhub/itemremove.go +++ /dev/null @@ -1,138 +0,0 @@ -package cwhub - -import ( - "fmt" - "os" - "slices" -) - -// purge removes the actual config file that was downloaded. -func (i *Item) purge() (bool, error) { - if !i.State.Downloaded { - i.hub.logger.Debugf("removing %s: not downloaded -- no need to remove", i.Name) - return false, nil - } - - src, err := i.downloadPath() - if err != nil { - return false, err - } - - if err := os.Remove(src); err != nil { - if os.IsNotExist(err) { - i.hub.logger.Debugf("%s doesn't exist, no need to remove", src) - return false, nil - } - - return false, fmt.Errorf("while removing file: %w", err) - } - - i.State.Downloaded = false - i.hub.logger.Infof("Removed source file [%s]: %s", i.Name, src) - - return true, nil -} - -// disable removes the install link, and optionally the downloaded content. -func (i *Item) disable(purge bool, force bool) (bool, error) { - didRemove := true - - err := i.removeInstallLink() - if os.IsNotExist(err) { - if !purge && !force { - link, _ := i.installPath() - return false, fmt.Errorf("link %s does not exist (override with --force or --purge)", link) - } - - didRemove = false - } else if err != nil { - return false, err - } - - i.State.Installed = false - didPurge := false - - if purge { - if didPurge, err = i.purge(); err != nil { - return didRemove, err - } - } - - ret := didRemove || didPurge - - return ret, nil -} - -// Remove disables the item, optionally removing the downloaded content. -func (i *Item) Remove(purge bool, force bool) (bool, error) { - if i.State.IsLocal() { - i.hub.logger.Warningf("%s is a local item, please delete manually", i.Name) - return false, nil - } - - if i.State.Tainted && !force { - return false, fmt.Errorf("%s is tainted, use '--force' to remove", i.Name) - } - - if !i.State.Installed && !purge { - i.hub.logger.Infof("removing %s: not installed -- no need to remove", i.Name) - return false, nil - } - - removed := false - - descendants, err := i.descendants() - if err != nil { - return false, err - } - - ancestors := i.Ancestors() - - for _, sub := range i.SubItems() { - if !sub.State.Installed { - continue - } - - // if the sub depends on a collection that is not a direct or indirect dependency - // of the current item, it is not removed - for _, subParent := range sub.Ancestors() { - if !purge && !subParent.State.Installed { - continue - } - - // the ancestor that would block the removal of the sub item is also an ancestor - // of the item we are removing, so we don't want false warnings - // (e.g. crowdsecurity/sshd-logs was not removed because it also belongs to crowdsecurity/linux, - // while we are removing crowdsecurity/sshd) - if slices.Contains(ancestors, subParent) { - continue - } - - // the sub-item belongs to the item we are removing, but we already knew that - if subParent == i { - continue - } - - if !slices.Contains(descendants, subParent) { - i.hub.logger.Infof("%s was not removed because it also belongs to %s", sub.Name, subParent.Name) - continue - } - } - - subRemoved, err := sub.Remove(purge, force) - if err != nil { - return false, fmt.Errorf("unable to disable %s: %w", i.Name, err) - } - - removed = removed || subRemoved - } - - didDisable, err := i.disable(purge, force) - if err != nil { - return false, fmt.Errorf("while removing %s: %w", i.Name, err) - } - - removed = removed || didDisable - - return removed, nil -} diff --git a/pkg/cwhub/itemupgrade.go b/pkg/cwhub/itemupgrade.go deleted file mode 100644 index 105e5ebec31..00000000000 --- a/pkg/cwhub/itemupgrade.go +++ /dev/null @@ -1,254 +0,0 @@ -package cwhub - -// Install, upgrade and remove items from the hub to the local configuration - -import ( - "context" - "crypto" - "encoding/base64" - "encoding/hex" - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/sirupsen/logrus" - - "github.com/crowdsecurity/go-cs-lib/downloader" - - "github.com/crowdsecurity/crowdsec/pkg/emoji" -) - -// Upgrade downloads and applies the last version of the item from the hub. -func (i *Item) Upgrade(ctx context.Context, force bool) (bool, error) { - if i.State.IsLocal() { - i.hub.logger.Infof("not upgrading %s: local item", i.Name) - return false, nil - } - - if !i.State.Downloaded { - return false, fmt.Errorf("can't upgrade %s: not installed", i.Name) - } - - if !i.State.Installed { - return false, fmt.Errorf("can't upgrade %s: downloaded but not installed", i.Name) - } - - if i.State.UpToDate { - i.hub.logger.Infof("%s: up-to-date", i.Name) - - if err := i.DownloadDataIfNeeded(ctx, force); err != nil { - return false, fmt.Errorf("%s: download failed: %w", i.Name, err) - } - - if !force { - // no upgrade needed - return false, nil - } - } - - if _, err := i.downloadLatest(ctx, force, true); err != nil { - return false, fmt.Errorf("%s: download failed: %w", i.Name, err) - } - - if !i.State.UpToDate { - if i.State.Tainted { - i.hub.logger.Warningf("%v %s is tainted, --force to overwrite", emoji.Warning, i.Name) - } - - return false, nil - } - - // a check on stdout is used while scripting to know if the hub has been upgraded - // and a configuration reload is required - // TODO: use a better way to communicate this - fmt.Printf("updated %s\n", i.Name) - i.hub.logger.Infof("%v %s: updated", emoji.Package, i.Name) - - return true, nil -} - -// downloadLatest downloads the latest version of the item to the hub directory. -func (i *Item) downloadLatest(ctx context.Context, overwrite bool, updateOnly bool) (bool, error) { - i.hub.logger.Debugf("Downloading %s %s", i.Type, i.Name) - - for _, sub := range i.SubItems() { - if !sub.State.Installed && updateOnly && sub.State.Downloaded { - i.hub.logger.Debugf("skipping upgrade of %s: not installed", i.Name) - continue - } - - i.hub.logger.Debugf("Download %s sub-item: %s %s (%t -> %t)", i.Name, sub.Type, sub.Name, i.State.Installed, updateOnly) - - // recurse as it's a collection - if sub.HasSubItems() { - i.hub.logger.Tracef("collection, recurse") - - if _, err := sub.downloadLatest(ctx, overwrite, updateOnly); err != nil { - return false, err - } - } - - downloaded := sub.State.Downloaded - - if _, err := sub.download(ctx, overwrite); err != nil { - return false, err - } - - // We need to enable an item when it has been added to a collection since latest release of the collection. - // We check if sub.Downloaded is false because maybe the item has been disabled by the user. - if !sub.State.Installed && !downloaded { - if err := sub.enable(); err != nil { - return false, fmt.Errorf("enabling '%s': %w", sub.Name, err) - } - } - } - - if !i.State.Installed && updateOnly && i.State.Downloaded && !overwrite { - i.hub.logger.Debugf("skipping upgrade of %s: not installed", i.Name) - return false, nil - } - - return i.download(ctx, overwrite) -} - -// FetchContentTo downloads the last version of the item's YAML file to the specified path. -func (i *Item) FetchContentTo(ctx context.Context, destPath string) (bool, string, error) { - wantHash := i.latestHash() - if wantHash == "" { - return false, "", errors.New("latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again") - } - - // Use the embedded content if available - if i.Content != "" { - // the content was historically base64 encoded - content, err := base64.StdEncoding.DecodeString(i.Content) - if err != nil { - content = []byte(i.Content) - } - - dir := filepath.Dir(destPath) - - if err := os.MkdirAll(dir, 0o755); err != nil { - return false, "", fmt.Errorf("while creating %s: %w", dir, err) - } - - // check sha256 - hash := crypto.SHA256.New() - if _, err := hash.Write(content); err != nil { - return false, "", fmt.Errorf("while hashing %s: %w", i.Name, err) - } - - gotHash := hex.EncodeToString(hash.Sum(nil)) - if gotHash != wantHash { - return false, "", fmt.Errorf("hash mismatch: expected %s, got %s. The index file is invalid, please run 'cscli hub update' and try again", wantHash, gotHash) - } - - if err := os.WriteFile(destPath, content, 0o600); err != nil { - return false, "", fmt.Errorf("while writing %s: %w", destPath, err) - } - - i.hub.logger.Debugf("Wrote %s content from .index.json to %s", i.Name, destPath) - - return true, fmt.Sprintf("(embedded in %s)", i.hub.local.HubIndexFile), nil - } - - url, err := i.hub.remote.urlTo(i.RemotePath) - if err != nil { - return false, "", fmt.Errorf("failed to build request: %w", err) - } - - d := downloader. - New(). - WithHTTPClient(hubClient). - ToFile(destPath). - WithETagFn(downloader.SHA256). - WithMakeDirs(true). - WithLogger(logrus.WithField("url", url)). - CompareContent(). - VerifyHash("sha256", wantHash) - - // TODO: recommend hub update if hash does not match - - downloaded, err := d.Download(ctx, url) - if err != nil { - return false, "", err - } - - return downloaded, url, nil -} - -// download downloads the item from the hub and writes it to the hub directory. -func (i *Item) download(ctx context.Context, overwrite bool) (bool, error) { - // ensure that target file is within target dir - finalPath, err := i.downloadPath() - if err != nil { - return false, err - } - - if i.State.IsLocal() { - i.hub.logger.Warningf("%s is local, can't download", i.Name) - return false, nil - } - - // if user didn't --force, don't overwrite local, tainted, up-to-date files - if !overwrite { - if i.State.Tainted { - i.hub.logger.Debugf("%s: tainted, not updated", i.Name) - return false, nil - } - - if i.State.UpToDate { - // We still have to check if data files are present - i.hub.logger.Debugf("%s: up-to-date, not updated", i.Name) - } - } - - downloaded, _, err := i.FetchContentTo(ctx, finalPath) - if err != nil { - return false, err - } - - if downloaded { - i.hub.logger.Infof("Downloaded %s", i.Name) - } - - i.State.Downloaded = true - i.State.Tainted = false - i.State.UpToDate = true - - // read content to get the list of data files - reader, err := os.Open(finalPath) - if err != nil { - return false, fmt.Errorf("while opening %s: %w", finalPath, err) - } - - defer reader.Close() - - if err = downloadDataSet(ctx, i.hub.local.InstallDataDir, overwrite, reader, i.hub.logger); err != nil { - return false, fmt.Errorf("while downloading data for %s: %w", i.FileName, err) - } - - return true, nil -} - -// DownloadDataIfNeeded downloads the data set for the item. -func (i *Item) DownloadDataIfNeeded(ctx context.Context, force bool) error { - itemFilePath, err := i.installPath() - if err != nil { - return err - } - - itemFile, err := os.Open(itemFilePath) - if err != nil { - return fmt.Errorf("while opening %s: %w", itemFilePath, err) - } - - defer itemFile.Close() - - if err = downloadDataSet(ctx, i.hub.local.InstallDataDir, force, itemFile, i.hub.logger); err != nil { - return fmt.Errorf("while downloading data for %s: %w", itemFilePath, err) - } - - return nil -} diff --git a/pkg/cwhub/itemupgrade_test.go b/pkg/cwhub/itemupgrade_test.go index 5f9e4d1944e..3225d2f013b 100644 --- a/pkg/cwhub/itemupgrade_test.go +++ b/pkg/cwhub/itemupgrade_test.go @@ -1,5 +1,7 @@ package cwhub +/* + import ( "context" "testing" @@ -36,10 +38,9 @@ func TestUpgradeItemNewScenarioInCollection(t *testing.T) { // collection receives an update. It now adds new scenario "crowdsecurity/barfoo_scenario" pushUpdateToCollectionInHub() - remote := &RemoteHubCfg{ + remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", - IndexPath: ".index.json", } hub, err := NewHub(hub.local, remote, nil) @@ -96,10 +97,9 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { require.NoError(t, err) require.True(t, didRemove) - remote := &RemoteHubCfg{ + remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", - IndexPath: ".index.json", } hub = getHubOrFail(t, hub.local, remote) @@ -130,7 +130,7 @@ func TestUpgradeItemInDisabledScenarioShouldNotBeInstalled(t *testing.T) { } // getHubOrFail refreshes the hub state (load index, sync) and returns the singleton, or fails the test. -func getHubOrFail(t *testing.T, local *csconfig.LocalHubCfg, remote *RemoteHubCfg) *Hub { +func getHubOrFail(t *testing.T, local *csconfig.LocalHubCfg, remote *Downloader) *Hub { hub, err := NewHub(local, remote, nil) require.NoError(t, err) @@ -168,10 +168,9 @@ func TestUpgradeItemNewScenarioIsInstalledWhenReferencedScenarioIsDisabled(t *te require.NoError(t, err) require.True(t, didRemove) - remote := &RemoteHubCfg{ + remote := &Downloader{ URLTemplate: mockURLTemplate, Branch: "master", - IndexPath: ".index.json", } hub = getHubOrFail(t, hub.local, remote) @@ -221,3 +220,5 @@ func pushUpdateToCollectionInHub() { responseByPath["/crowdsecurity/master/.index.json"] = fileToStringX("./testdata/index2.json") responseByPath["/crowdsecurity/master/collections/crowdsecurity/test_collection.yaml"] = fileToStringX("./testdata/collection_v2.yaml") } + +*/ diff --git a/pkg/cwhub/remote.go b/pkg/cwhub/remote.go deleted file mode 100644 index 8d2dc2dbb94..00000000000 --- a/pkg/cwhub/remote.go +++ /dev/null @@ -1,84 +0,0 @@ -package cwhub - -import ( - "context" - "fmt" - "net/url" - - "github.com/sirupsen/logrus" - - "github.com/crowdsecurity/go-cs-lib/downloader" -) - -// RemoteHubCfg is used to retrieve index and items from the remote hub. -type RemoteHubCfg struct { - Branch string - URLTemplate string - IndexPath string - EmbedItemContent bool -} - -// urlTo builds the URL to download a file from the remote hub. -func (r *RemoteHubCfg) urlTo(remotePath string) (string, error) { - if r == nil { - return "", ErrNilRemoteHub - } - - // the template must contain two string placeholders - if fmt.Sprintf(r.URLTemplate, "%s", "%s") != r.URLTemplate { - return "", fmt.Errorf("invalid URL template '%s'", r.URLTemplate) - } - - return fmt.Sprintf(r.URLTemplate, r.Branch, remotePath), nil -} - -// addURLParam adds the "with_content=true" parameter to the URL if it's not already present. -func addURLParam(rawURL string, param string, value string) (string, error) { - parsedURL, err := url.Parse(rawURL) - if err != nil { - return "", fmt.Errorf("failed to parse URL: %w", err) - } - - query := parsedURL.Query() - - if _, exists := query[param]; !exists { - query.Add(param, value) - } - - parsedURL.RawQuery = query.Encode() - - return parsedURL.String(), nil -} - -// fetchIndex downloads the index from the hub and returns the content. -func (r *RemoteHubCfg) fetchIndex(ctx context.Context, destPath string) (bool, error) { - if r == nil { - return false, ErrNilRemoteHub - } - - url, err := r.urlTo(r.IndexPath) - if err != nil { - return false, fmt.Errorf("failed to build hub index request: %w", err) - } - - if r.EmbedItemContent { - url, err = addURLParam(url, "with_content", "true") - if err != nil { - return false, fmt.Errorf("failed to add 'with_content' parameter to URL: %w", err) - } - } - - downloaded, err := downloader. - New(). - WithHTTPClient(hubClient). - ToFile(destPath). - WithETagFn(downloader.SHA256). - CompareContent(). - WithLogger(logrus.WithField("url", url)). - Download(ctx, url) - if err != nil { - return false, err - } - - return downloaded, nil -} diff --git a/pkg/cwhub/state.go b/pkg/cwhub/state.go new file mode 100644 index 00000000000..63a433151cd --- /dev/null +++ b/pkg/cwhub/state.go @@ -0,0 +1,62 @@ +package cwhub + +import ( + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +// ItemState is used to keep the local state (i.e. at runtime) of an item. +// This data is not stored in the index, but is displayed with "cscli ... inspect". +type ItemState struct { + LocalPath string `json:"local_path,omitempty" yaml:"local_path,omitempty"` + LocalVersion string `json:"local_version,omitempty" yaml:"local_version,omitempty"` + LocalHash string `json:"local_hash,omitempty" yaml:"local_hash,omitempty"` + Installed bool `json:"installed"` + local bool + Downloaded bool `json:"downloaded"` + UpToDate bool `json:"up_to_date"` + Tainted bool `json:"tainted"` + TaintedBy []string `json:"tainted_by,omitempty" yaml:"tainted_by,omitempty"` + BelongsToCollections []string `json:"belongs_to_collections,omitempty" yaml:"belongs_to_collections,omitempty"` +} + +// IsLocal returns true if the item has been create by a user (not downloaded from the hub). +func (s *ItemState) IsLocal() bool { + return s.local +} + +// Text returns the status of the item as a string (eg. "enabled,update-available"). +func (s *ItemState) Text() string { + ret := "disabled" + + if s.Installed { + ret = "enabled" + } + + if s.IsLocal() { + ret += ",local" + } + + if s.Tainted { + ret += ",tainted" + } else if !s.UpToDate && !s.IsLocal() { + ret += ",update-available" + } + + return ret +} + +// Emoji returns the status of the item as an emoji (eg. emoji.Warning). +func (s *ItemState) Emoji() string { + switch { + case s.IsLocal(): + return emoji.House + case !s.Installed: + return emoji.Prohibited + case s.Tainted || (!s.UpToDate && !s.IsLocal()): + return emoji.Warning + case s.Installed: + return emoji.CheckMark + default: + return emoji.QuestionMark + } +} diff --git a/pkg/cwhub/state_test.go b/pkg/cwhub/state_test.go new file mode 100644 index 00000000000..20741809ae2 --- /dev/null +++ b/pkg/cwhub/state_test.go @@ -0,0 +1,77 @@ +package cwhub + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +func TestItemStateText(t *testing.T) { + // Test the text representation of an item state. + type test struct { + state ItemState + want string + wantIcon string + } + + tests := []test{ + { + ItemState{ + Installed: true, + UpToDate: false, + Tainted: false, + Downloaded: true, + }, + "enabled,update-available", + emoji.Warning, + }, { + ItemState{ + Installed: true, + UpToDate: true, + Tainted: false, + Downloaded: true, + }, + "enabled", + emoji.CheckMark, + }, { + ItemState{ + Installed: true, + UpToDate: false, + local: true, + Tainted: false, + Downloaded: false, + }, + "enabled,local", + emoji.House, + }, { + ItemState{ + Installed: false, + UpToDate: false, + Tainted: false, + Downloaded: true, + }, + "disabled,update-available", + emoji.Prohibited, + }, { + ItemState{ + Installed: true, + UpToDate: false, + Tainted: true, + Downloaded: true, + }, + "enabled,tainted", + emoji.Warning, + }, + } + + for idx, tc := range tests { + t.Run("Test "+strconv.Itoa(idx), func(t *testing.T) { + got := tc.state.Text() + assert.Equal(t, tc.want, got) + assert.Equal(t, tc.wantIcon, tc.state.Emoji()) + }) + } +} diff --git a/pkg/cwhub/sync.go b/pkg/cwhub/sync.go index c82822e64ef..ed99f4806d5 100644 --- a/pkg/cwhub/sync.go +++ b/pkg/cwhub/sync.go @@ -50,9 +50,8 @@ func resolveSymlink(path string) (string, error) { } // isPathInside checks if a path is inside the given directory -// it can return false negatives if the filesystem is case insensitive func isPathInside(path, dir string) (bool, error) { - absFilePath, err := filepath.Abs(path) + absFile, err := filepath.Abs(path) if err != nil { return false, err } @@ -62,99 +61,145 @@ func isPathInside(path, dir string) (bool, error) { return false, err } - return strings.HasPrefix(absFilePath, absDir), nil -} + rel, err := filepath.Rel(absDir, absFile) + if err != nil { + return false, err + } -// information used to create a new Item, from a file path. -type itemFileInfo struct { - fname string - stage string - ftype string - fauthor string - inhub bool + return !strings.HasPrefix(rel, ".."), nil } -func (h *Hub) getItemFileInfo(path string, logger *logrus.Logger) (*itemFileInfo, error) { - var ret *itemFileInfo +// itemSpec contains some information needed to complete the items +// after they have been parsed from the index. itemSpecs are created by +// scanning the hub (/etc/crowdsec/hub/*) and install (/etc/crowdsec/*) directories. +// Only directories for the known types are scanned. +type itemSpec struct { + path string // full path to the file (or link) + fname string // name of the item: + // for local item, taken from the file content or defaults to the filename (including extension) + // for non-local items, always {author}/{name} + stage string // stage for parsers and overflows + ftype string // type, plural (collections, contexts etc.) + fauthor string // author - empty for local items + inhub bool // true if the spec comes from the hub dir + target string // the target of path if it's a link, otherwise == path + local bool // is this a spec for a local item? +} - hubDir := h.local.HubDir - installDir := h.local.InstallDir +func newHubItemSpec(path string, subs []string, logger *logrus.Logger) (*itemSpec, error) { + // .../hub/parsers/s00-raw/crowdsecurity/skip-pretag.yaml + // .../hub/scenarios/crowdsecurity/ssh_bf.yaml + // .../hub/profiles/crowdsecurity/linux.yaml + if len(subs) < 3 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) + } - subsHub := relativePathComponents(path, hubDir) - subsInstall := relativePathComponents(path, installDir) + ftype := subs[0] + if !slices.Contains(ItemTypes, ftype) { + // this doesn't really happen anymore, because we only scan the {hubtype} directories + return nil, fmt.Errorf("unknown configuration type '%s'", ftype) + } - switch { - case len(subsHub) > 0: - logger.Tracef("in hub dir") + stage := "" + fauthor := subs[1] + fname := subs[2] - // .../hub/parsers/s00-raw/crowdsecurity/skip-pretag.yaml - // .../hub/scenarios/crowdsecurity/ssh_bf.yaml - // .../hub/profiles/crowdsecurity/linux.yaml - if len(subsHub) < 3 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsHub)) + if ftype == PARSERS || ftype == POSTOVERFLOWS { + if len(subs) < 4 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) } - ftype := subsHub[0] - if !slices.Contains(ItemTypes, ftype) { - // this doesn't really happen anymore, because we only scan the {hubtype} directories - return nil, fmt.Errorf("unknown configuration type '%s'", ftype) - } + stage = subs[1] + fauthor = subs[2] + fname = subs[3] + } - stage := "" - fauthor := subsHub[1] - fname := subsHub[2] + spec := itemSpec{ + path: path, + inhub: true, + ftype: ftype, + stage: stage, + fauthor: fauthor, + fname: fname, + } - if ftype == PARSERS || ftype == POSTOVERFLOWS { - stage = subsHub[1] - fauthor = subsHub[2] - fname = subsHub[3] - } + return &spec, nil +} - ret = &itemFileInfo{ - inhub: true, - ftype: ftype, - stage: stage, - fauthor: fauthor, - fname: fname, - } +func newInstallItemSpec(path string, subs []string, logger *logrus.Logger) (*itemSpec, error) { + logger.Tracef("%s in install dir", path) - case len(subsInstall) > 0: - logger.Tracef("in install dir") + // .../config/parser/stage/file.yaml + // .../config/postoverflow/stage/file.yaml + // .../config/scenarios/scenar.yaml + // .../config/collections/linux.yaml //file is empty - // .../config/parser/stage/file.yaml - // .../config/postoverflow/stage/file.yaml - // .../config/scenarios/scenar.yaml - // .../config/collections/linux.yaml //file is empty + if len(subs) < 2 { + return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subs)) + } - if len(subsInstall) < 2 { - return nil, fmt.Errorf("path is too short: %s (%d)", path, len(subsInstall)) - } + // this can be in any number of subdirs, we join them to compose the item name - // this can be in any number of subdirs, we join them to compose the item name + ftype := subs[0] + stage := "" + fname := strings.Join(subs[1:], "/") - ftype := subsInstall[0] - stage := "" - fname := strings.Join(subsInstall[1:], "/") + if ftype == PARSERS || ftype == POSTOVERFLOWS { + stage = subs[1] + fname = strings.Join(subs[2:], "/") + } - if ftype == PARSERS || ftype == POSTOVERFLOWS { - stage = subsInstall[1] - fname = strings.Join(subsInstall[2:], "/") - } + spec := itemSpec{ + path: path, + inhub: false, + ftype: ftype, + stage: stage, + fauthor: "", + fname: fname, + } - ret = &itemFileInfo{ - inhub: false, - ftype: ftype, - stage: stage, - fauthor: "", - fname: fname, + return &spec, nil +} + +func newItemSpec(path, hubDir, installDir string, logger *logrus.Logger) (*itemSpec, error) { + var ( + spec *itemSpec + err error + ) + + if subs := relativePathComponents(path, hubDir); len(subs) > 0 { + spec, err = newHubItemSpec(path, subs, logger) + if err != nil { + return nil, err } - default: + } else if subs := relativePathComponents(path, installDir); len(subs) > 0 { + spec, err = newInstallItemSpec(path, subs, logger) + if err != nil { + return nil, err + } + } + + if spec == nil { return nil, fmt.Errorf("file '%s' is not from hub '%s' nor from the configuration directory '%s'", path, hubDir, installDir) } - logger.Tracef("CORRECTED [%s] by [%s] in stage [%s] of type [%s]", ret.fname, ret.fauthor, ret.stage, ret.ftype) + // follow the link to see if it falls in the hub directory + // if it's not a link, target == path + spec.target, err = resolveSymlink(spec.path) + if err != nil { + // target does not exist, the user might have removed the file + // or switched to a hub branch without it; or symlink loop + return nil, err + } - return ret, nil + targetInHub, err := isPathInside(spec.target, hubDir) + if err != nil { + return nil, ErrSkipPath + } + + spec.local = !targetInHub + + return spec, nil } // sortedVersions returns the input data, sorted in reverse order (new, old) by semver. @@ -164,6 +209,7 @@ func sortedVersions(raw []string) ([]string, error) { for idx, r := range raw { v, err := semver.NewVersion(r) if err != nil { + // TODO: should catch this during index parsing return nil, fmt.Errorf("%s: %w", r, err) } @@ -180,7 +226,7 @@ func sortedVersions(raw []string) ([]string, error) { return ret, nil } -func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { +func newLocalItem(h *Hub, path string, spec *itemSpec) (*Item, error) { type localItemName struct { Name string `yaml:"name"` } @@ -189,12 +235,13 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { item := &Item{ hub: h, - Name: info.fname, - Stage: info.stage, - Type: info.ftype, + Name: spec.fname, + Stage: spec.stage, + Type: spec.ftype, FileName: fileName, State: ItemState{ LocalPath: path, + local: true, Installed: true, UpToDate: true, }, @@ -220,22 +267,25 @@ func newLocalItem(h *Hub, path string, info *itemFileInfo) (*Item, error) { return item, nil } -func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { +// A sentinel to skip regular files because "nil, nil" is ambiguous. Returning SkipDir with files would skip the rest of the directory. +var ErrSkipPath = errors.New("sentinel") + +func (h *Hub) itemVisit(path string, f os.DirEntry, err error) (*itemSpec, error) { if err != nil { h.logger.Debugf("while syncing hub dir: %s", err) // there is a path error, we ignore the file - return nil + return nil, ErrSkipPath + } + + // permission errors, files removed while reading, etc. + if f == nil { + return nil, ErrSkipPath } // only happens if the current working directory was removed (!) path, err = filepath.Abs(path) if err != nil { - return err - } - - // permission errors, files removed while reading, etc. - if f == nil { - return nil + return nil, err } if f.IsDir() { @@ -244,106 +294,125 @@ func (h *Hub) itemVisit(path string, f os.DirEntry, err error) error { // - double dot prefix is used by k8s to mount config maps if strings.HasPrefix(f.Name(), ".") { h.logger.Tracef("skipping hidden directory %s", path) - return filepath.SkipDir + return nil, filepath.SkipDir } // keep traversing - return nil + return nil, nil } // we only care about YAML files if !isYAMLFileName(f.Name()) { - return nil + return nil, ErrSkipPath } - info, err := h.getItemFileInfo(path, h.logger) + spec, err := newItemSpec(path, h.local.HubDir, h.local.InstallDir, h.logger) if err != nil { h.logger.Warningf("Ignoring file %s: %s", path, err) - return nil + return nil, ErrSkipPath } - // follow the link to see if it falls in the hub directory - // if it's not a link, target == path - target, err := resolveSymlink(path) - if err != nil { - // target does not exist, the user might have removed the file - // or switched to a hub branch without it; or symlink loop - h.logger.Warningf("Ignoring file %s: %s", path, err) - return nil - } - - targetInHub, err := isPathInside(target, h.local.HubDir) - if err != nil { - h.logger.Warningf("Ignoring file %s: %s", path, err) - return nil - } - - // local (custom) item if the file or link target is not inside the hub dir - if !targetInHub { - h.logger.Tracef("%s is a local file, skip", path) - - item, err := newLocalItem(h, path, info) - if err != nil { - return err - } - - h.addItem(item) + return spec, nil +} - return nil +func updateNonLocalItem(h *Hub, path string, spec *itemSpec, symlinkTarget string) (*Item, error) { + // look for the matching index entry + tot := 0 + for range h.GetItemMap(spec.ftype) { + tot++ } - hubpath := target - - // try to find which configuration item it is - h.logger.Tracef("check [%s] of %s", info.fname, info.ftype) - - for _, item := range h.GetItemMap(info.ftype) { - if info.fname != item.FileName { + for _, item := range h.GetItemMap(spec.ftype) { + if spec.fname != item.FileName { continue } - if item.Stage != info.stage { + if item.Stage != spec.stage { continue } - // if we are walking hub dir, just mark present files as downloaded - if info.inhub { - // wrong author - if info.fauthor != item.Author { - continue - } - + // Downloaded item, in the hub dir. + if spec.inhub { // not the item we're looking for - if !item.validPath(info.fauthor, info.fname) { + if !item.validPath(spec.fauthor, spec.fname) { continue } - src, err := item.downloadPath() + src, err := item.DownloadPath() if err != nil { - return err + return nil, err } - if path == src { + if spec.path == src { h.logger.Tracef("marking %s as downloaded", item.Name) item.State.Downloaded = true } - } else if !hasPathSuffix(hubpath, item.RemotePath) { + } else if !hasPathSuffix(symlinkTarget, item.RemotePath) { // wrong file // ///.yaml continue } - err := item.setVersionState(path, info.inhub) + err := item.setVersionState(spec.path, spec.inhub) + if err != nil { + return nil, err + } + + return item, nil + } + + return nil, nil +} + +// addItemFromSpec adds an item to the hub based on the spec, or updates it if already present. +// +// When the item is: +// +// Local - an itemSpec instance is created while scanning the install directory +// and an Item instance will be added to the hub.items map. +// +// Not downloaded, not installed - an Item instance is already on hub.items (decoded from index) and left untouched. +// +// Downloaded, not installed - an Item instance is on hub.items (decoded from index) and an itemSpec instance is created +// to complete it (i.e. set version and state flags). +// +// Downloaded, installed - an Item instance is on hub.items and is complemented with two itemSpecs: one from the file +// on the hub directory, one from the link in the install directory. +func (h *Hub) addItemFromSpec(spec *itemSpec) error { + var ( + item *Item + err error + ) + + // Local item: links outside the hub directory. + // We add it, or overwrite the existing one if it happened to have the same name. + if spec.local { + item, err = newLocalItem(h, spec.path, spec) if err != nil { return err } - h.pathIndex[path] = item + // we now have the name declared in the file (for local), + // see if there's another installed item of the same name + theOtherItem := h.GetItem(spec.ftype, item.Name) + if theOtherItem != nil { + if theOtherItem.State.Installed { + h.logger.Warnf("multiple %s named %s: ignoring %s", spec.ftype, item.Name, theOtherItem.State.LocalPath) + } + } + } else { + item, err = updateNonLocalItem(h, spec.path, spec, spec.target) + if err != nil { + return err + } + } + if item == nil { + h.logger.Infof("Ignoring file %s of type %s", spec.path, spec.ftype) return nil } - h.logger.Infof("Ignoring file %s of type %s", path, info.ftype) + h.addItem(item) return nil } @@ -364,7 +433,7 @@ func (i *Item) checkSubItemVersions() []string { // ensure all the sub-items are installed, or tag the parent as tainted i.hub.logger.Tracef("checking submembers of %s installed:%t", i.Name, i.State.Installed) - for _, sub := range i.SubItems() { + for sub := range i.CurrentDependencies().SubItems(i.hub) { i.hub.logger.Tracef("check %s installed:%t", sub.Name, sub.State.Installed) if !i.State.Installed { @@ -398,7 +467,7 @@ func (i *Item) checkSubItemVersions() []string { if !sub.State.UpToDate { i.State.UpToDate = false - warn = append(warn, fmt.Sprintf("%s is tainted by outdated %s", i.Name, sub.FQName())) + warn = append(warn, fmt.Sprintf("%s is outdated because of %s", i.Name, sub.FQName())) continue } @@ -411,6 +480,8 @@ func (i *Item) checkSubItemVersions() []string { // syncDir scans a directory for items, and updates the Hub state accordingly. func (h *Hub) syncDir(dir string) error { + specs := []*itemSpec{} + // For each, scan PARSERS, POSTOVERFLOWS... and COLLECTIONS last for _, scan := range ItemTypes { // cpath: top-level item directory, either downloaded or installed items. @@ -423,11 +494,46 @@ func (h *Hub) syncDir(dir string) error { // explicit check for non existing directory, avoid spamming log.Debug if _, err = os.Stat(cpath); os.IsNotExist(err) { - h.logger.Tracef("directory %s doesn't exist, skipping", cpath) continue } - if err = filepath.WalkDir(cpath, h.itemVisit); err != nil { + // wrap itemVisit to collect spec results + specCollector := func(path string, f os.DirEntry, err error) error { + spec, err := h.itemVisit(path, f, err) + if err == nil && spec != nil { + specs = append(specs, spec) + } + + if errors.Is(err, ErrSkipPath) { + return nil + } + + return err + } + + if err = filepath.WalkDir(cpath, specCollector); err != nil { + return err + } + } + + // add non-local items first, so they can find the place in the index + // before it's overridden by local items in case of name collision + for _, spec := range specs { + if spec.local { + continue + } + + if err := h.addItemFromSpec(spec); err != nil { + return err + } + } + + for _, spec := range specs { + if !spec.local { + continue + } + + if err := h.addItemFromSpec(spec); err != nil { return err } } @@ -463,13 +569,14 @@ func removeDuplicates(sl []string) []string { // localSync updates the hub state with downloaded, installed and local items. func (h *Hub) localSync() error { - err := h.syncDir(h.local.InstallDir) - if err != nil { - return fmt.Errorf("failed to scan %s: %w", h.local.InstallDir, err) + // add downloaded files first, so they can find the place in the index + // before it's overridden by local items in case of name collision + if err := h.syncDir(h.local.HubDir); err != nil { + return fmt.Errorf("failed to sync %s: %w", h.local.HubDir, err) } - if err = h.syncDir(h.local.HubDir); err != nil { - return fmt.Errorf("failed to scan %s: %w", h.local.HubDir, err) + if err := h.syncDir(h.local.InstallDir); err != nil { + return fmt.Errorf("failed to sync %s: %w", h.local.InstallDir, err) } warnings := make([]string, 0) diff --git a/pkg/cwversion/component/component.go b/pkg/cwversion/component/component.go index 7ed596525e0..2c6374e4bb7 100644 --- a/pkg/cwversion/component/component.go +++ b/pkg/cwversion/component/component.go @@ -8,20 +8,21 @@ package component // Built is a map of all the known components, and whether they are built-in or not. // This is populated as soon as possible by the respective init() functions var Built = map[string]bool{ - "datasource_appsec": false, - "datasource_cloudwatch": false, - "datasource_docker": false, - "datasource_file": false, - "datasource_journalctl": false, - "datasource_k8s-audit": false, - "datasource_kafka": false, - "datasource_kinesis": false, - "datasource_loki": false, - "datasource_s3": false, - "datasource_syslog": false, - "datasource_wineventlog": false, - "datasource_http": false, - "cscli_setup": false, + "datasource_appsec": false, + "datasource_cloudwatch": false, + "datasource_docker": false, + "datasource_file": false, + "datasource_journalctl": false, + "datasource_k8s-audit": false, + "datasource_kafka": false, + "datasource_kinesis": false, + "datasource_loki": false, + "datasource_s3": false, + "datasource_syslog": false, + "datasource_wineventlog": false, + "datasource_victorialogs": false, + "datasource_http": false, + "cscli_setup": false, } func Register(name string) { diff --git a/pkg/cwversion/version.go b/pkg/cwversion/version.go index 2cb7de13e18..87d855444e7 100644 --- a/pkg/cwversion/version.go +++ b/pkg/cwversion/version.go @@ -2,6 +2,7 @@ package cwversion import ( "fmt" + "regexp" "strings" "github.com/crowdsecurity/go-cs-lib/maptools" @@ -57,10 +58,19 @@ func FullString() string { return ret } -// VersionStrip remove the tag from the version string, used to match with a hub branch -func VersionStrip() string { - ret := strings.Split(version.Version, "~") - ret = strings.Split(ret[0], "-") +// StripTags removes any tag (-rc, ~foo3, .r1, etc) from a version string +func StripTags(version string) string { + reVersion := regexp.MustCompile(`^v(\d+)\.(\d+)\.(\d+)`) + ret := reVersion.FindStringSubmatch(version) + + if len(ret) == 0 { + return version + } return ret[0] } + +// BaseVersion returns the version number used to match a hub branch. +func BaseVersion() string { + return StripTags(version.Version) +} diff --git a/pkg/cwversion/version_test.go b/pkg/cwversion/version_test.go new file mode 100644 index 00000000000..13293d4a479 --- /dev/null +++ b/pkg/cwversion/version_test.go @@ -0,0 +1,68 @@ +package cwversion + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStripTags(t *testing.T) { + tests := []struct { + name string + input string + want string + }{ + { + name: "no tag, valid version v1.2.3", + input: "v1.2.3", + want: "v1.2.3", + }, + { + name: "tag appended with dash", + input: "v1.2.3-rc1", + want: "v1.2.3", + }, + { + name: "tag appended with tilde", + input: "v1.2.3~foo3", + want: "v1.2.3", + }, + { + name: "tag appended with dot", + input: "v1.2.3.r1", + want: "v1.2.3", + }, + { + name: "tag appended directly", + input: "v1.2.3r1", + want: "v1.2.3", + }, + { + name: "multiple digits in version", + input: "v10.20.30-rc2", + want: "v10.20.30", + }, + { + name: "invalid version (no 'v' prefix)", + input: "1.2.3-tag", + want: "1.2.3-tag", + }, + { + name: "random string", + input: "some-random-string", + want: "some-random-string", + }, + { + name: "freebsd pre-release", + input: "v1.6.5.r1", + want: "v1.6.5", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := StripTags(tt.input) + require.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/database/alertfilter.go b/pkg/database/alertfilter.go new file mode 100644 index 00000000000..9e8cf53a450 --- /dev/null +++ b/pkg/database/alertfilter.go @@ -0,0 +1,258 @@ +package database + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "github.com/crowdsecurity/crowdsec/pkg/database/ent" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/alert" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" + "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +func handleSimulatedFilter(filter map[string][]string, predicates *[]predicate.Alert) { + /* the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ + if v, ok := filter["simulated"]; ok && v[0] == "false" { + *predicates = append(*predicates, alert.SimulatedEQ(false)) + } +} + +func handleOriginFilter(filter map[string][]string, predicates *[]predicate.Alert) { + if _, ok := filter["origin"]; ok { + filter["include_capi"] = []string{"true"} + } +} + +func handleScopeFilter(scope string, predicates *[]predicate.Alert) { + if strings.ToLower(scope) == "ip" { + scope = types.Ip + } else if strings.ToLower(scope) == "range" { + scope = types.Range + } + + *predicates = append(*predicates, alert.SourceScopeEQ(scope)) +} + +func handleTimeFilters(param, value string, predicates *[]predicate.Alert) error { + duration, err := ParseDuration(value) + if err != nil { + return fmt.Errorf("while parsing duration: %w", err) + } + + timePoint := time.Now().UTC().Add(-duration) + if timePoint.IsZero() { + return fmt.Errorf("empty time now() - %s", timePoint.String()) + } + + switch param { + case "since": + *predicates = append(*predicates, alert.StartedAtGTE(timePoint)) + case "created_before": + *predicates = append(*predicates, alert.CreatedAtLTE(timePoint)) + case "until": + *predicates = append(*predicates, alert.StartedAtLTE(timePoint)) + } + + return nil +} + +func handleIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), + alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + )) + } +} + +func handleIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { + if contains { // decision contains {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip < query.start_ip + alert.HasDecisionsWith(decision.StartIPLT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix <= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip > query.end_ip + alert.HasDecisionsWith(decision.EndIPGT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix >= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), + ), + ), + )) + } else { // decision is contained within {start_ip,end_ip} + *predicates = append(*predicates, alert.And( + // matching addr size + alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), + alert.Or( + // decision.start_ip > query.start_ip + alert.HasDecisionsWith(decision.StartIPGT(start_ip)), + alert.And( + // decision.start_ip == query.start_ip + alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), + // decision.start_suffix >= query.start_suffix + alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), + ), + ), + alert.Or( + // decision.end_ip < query.end_ip + alert.HasDecisionsWith(decision.EndIPLT(end_ip)), + alert.And( + // decision.end_ip == query.end_ip + alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), + // decision.end_suffix <= query.end_suffix + alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), + ), + ), + )) + } +} + +func handleIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { + if ip_sz == 4 { + handleIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz == 16 { + handleIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) + } else if ip_sz != 0 { + return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) + } + + return nil +} + +func handleIncludeCapiFilter(value string, predicates *[]predicate.Alert) error { + if value == "false" { + *predicates = append(*predicates, alert.And( + // do not show alerts with active decisions having origin CAPI or lists + alert.And( + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), + alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), + ), + alert.Not( + alert.And( + // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI + alert.Not(alert.HasDecisions()), + alert.Or( + alert.SourceScopeHasPrefix(types.ListOrigin+":"), + alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), + ), + ), + ), + )) + } else if value != "true" { + log.Errorf("invalid bool '%s' for include_capi", value) + } + + return nil +} + +func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { + predicates := make([]predicate.Alert, 0) + + var ( + err error + start_ip, start_sfx, end_ip, end_sfx int64 + hasActiveDecision bool + ip_sz int + ) + + contains := true + + /*if contains is true, return bans that *contains* the given value (value is the inner) + else, return bans that are *contained* by the given value (value is the outer)*/ + + handleSimulatedFilter(filter, &predicates) + handleOriginFilter(filter, &predicates) + + for param, value := range filter { + switch param { + case "contains": + contains, err = strconv.ParseBool(value[0]) + if err != nil { + return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) + } + case "scope": + handleScopeFilter(value[0], &predicates) + case "value": + predicates = append(predicates, alert.SourceValueEQ(value[0])) + case "scenario": + predicates = append(predicates, alert.HasDecisionsWith(decision.ScenarioEQ(value[0]))) + case "ip", "range": + ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) + if err != nil { + return nil, err + } + case "since", "created_before", "until": + if err := handleTimeFilters(param, value[0], &predicates); err != nil { + return nil, err + } + case "decision_type": + predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) + case "origin": + predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) + case "include_capi": // allows to exclude one or more specific origins + if err = handleIncludeCapiFilter(value[0], &predicates); err != nil { + return nil, err + } + case "has_active_decision": + if hasActiveDecision, err = strconv.ParseBool(value[0]); err != nil { + return nil, errors.Wrapf(ParseType, "'%s' is not a boolean: %s", value[0], err) + } + + if hasActiveDecision { + predicates = append(predicates, alert.HasDecisionsWith(decision.UntilGTE(time.Now().UTC()))) + } else { + predicates = append(predicates, alert.Not(alert.HasDecisions())) + } + case "limit": + continue + case "sort": + continue + case "simulated": + continue + case "with_decisions": + continue + default: + return nil, errors.Wrapf(InvalidFilter, "Filter parameter '%s' is unknown (=%s)", param, value[0]) + } + } + + if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { + return nil, err + } + + return predicates, nil +} + +func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]string) (*ent.AlertQuery, error) { + preds, err := AlertPredicatesFromFilter(filter) + if err != nil { + return nil, err + } + + return alerts.Where(preds...), nil +} diff --git a/pkg/database/alerts.go b/pkg/database/alerts.go index ede9c89fe9a..107abcbb1d0 100644 --- a/pkg/database/alerts.go +++ b/pkg/database/alerts.go @@ -20,7 +20,6 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/database/ent/decision" "github.com/crowdsecurity/crowdsec/pkg/database/ent/event" "github.com/crowdsecurity/crowdsec/pkg/database/ent/meta" - "github.com/crowdsecurity/crowdsec/pkg/database/ent/predicate" "github.com/crowdsecurity/crowdsec/pkg/models" "github.com/crowdsecurity/crowdsec/pkg/types" ) @@ -32,6 +31,14 @@ const ( maxLockRetries = 10 // how many times to retry a bulk operation when sqlite3.ErrBusy is encountered ) +func rollbackOnError(tx *ent.Tx, err error, msg string) error { + if rbErr := tx.Rollback(); rbErr != nil { + log.Errorf("rollback error: %v", rbErr) + } + + return fmt.Errorf("%s: %w", msg, err) +} + // CreateOrUpdateAlert is specific to PAPI : It checks if alert already exists, otherwise inserts it // if alert already exists, it checks it associated decisions already exists // if some associated decisions are missing (ie. previous insert ended up in error) it inserts them @@ -285,12 +292,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models duration, err := time.ParseDuration(*decisionItem.Duration) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, errors.Wrapf(ParseDurationFail, "decision duration '%+v' : %s", *decisionItem.Duration, err) + return 0,0,0, rollbackOnError(txClient, err, "parsing decision duration") } if decisionItem.Scope == nil { @@ -302,12 +304,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models if strings.ToLower(*decisionItem.Scope) == "ip" || strings.ToLower(*decisionItem.Scope) == "range" { sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(*decisionItem.Value) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, errors.Wrapf(InvalidIPOrRange, "invalid addr/range %s : %s", *decisionItem.Value, err) + return 0, 0, 0, rollbackOnError(txClient, err, "invalid ip addr/range") } } @@ -349,12 +346,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models decision.ValueIn(deleteChunk...), )).Exec(ctx) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, fmt.Errorf("while deleting older community blocklist decisions: %w", err) + return 0, 0, 0, rollbackOnError(txClient, err, "deleting older community blocklist decisions") } deleted += deletedDecisions @@ -365,12 +357,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models for _, builderChunk := range builderChunks { insertedDecisions, err := txClient.Decision.CreateBulk(builderChunk...).Save(ctx) if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, fmt.Errorf("while bulk creating decisions: %w", err) + return 0, 0, 0, rollbackOnError(txClient, err, "bulk creating decisions") } inserted += len(insertedDecisions) @@ -380,12 +367,7 @@ func (c *Client) UpdateCommunityBlocklist(ctx context.Context, alertItem *models err = txClient.Commit() if err != nil { - rollbackErr := txClient.Rollback() - if rollbackErr != nil { - log.Errorf("rollback error: %s", rollbackErr) - } - - return 0, 0, 0, fmt.Errorf("error committing transaction: %w", err) + return 0, 0, 0, rollbackOnError(txClient, err, "error committing transaction") } return alertRef.ID, inserted, deleted, nil @@ -660,7 +642,8 @@ func (c *Client) createAlertChunk(ctx context.Context, machineID string, owner * break } - if sqliteErr, ok := err.(sqlite3.Error); ok { + var sqliteErr sqlite3.Error + if errors.As(err, &sqliteErr) { if sqliteErr.Code == sqlite3.ErrBusy { // sqlite3.Error{ // Code: 5, @@ -727,247 +710,6 @@ func (c *Client) CreateAlert(ctx context.Context, machineID string, alertList [] return alertIDs, nil } -func handleSimulatedFilter(filter map[string][]string, predicates *[]predicate.Alert) { - /* the simulated filter is a bit different : if it's not present *or* set to false, specifically exclude records with simulated to true */ - if v, ok := filter["simulated"]; ok && v[0] == "false" { - *predicates = append(*predicates, alert.SimulatedEQ(false)) - } -} - -func handleOriginFilter(filter map[string][]string, predicates *[]predicate.Alert) { - if _, ok := filter["origin"]; ok { - filter["include_capi"] = []string{"true"} - } -} - -func handleScopeFilter(scope string, predicates *[]predicate.Alert) { - if strings.ToLower(scope) == "ip" { - scope = types.Ip - } else if strings.ToLower(scope) == "range" { - scope = types.Range - } - - *predicates = append(*predicates, alert.SourceScopeEQ(scope)) -} - -func handleTimeFilters(param, value string, predicates *[]predicate.Alert) error { - duration, err := ParseDuration(value) - if err != nil { - return fmt.Errorf("while parsing duration: %w", err) - } - - timePoint := time.Now().UTC().Add(-duration) - if timePoint.IsZero() { - return fmt.Errorf("empty time now() - %s", timePoint.String()) - } - - switch param { - case "since": - *predicates = append(*predicates, alert.StartedAtGTE(timePoint)) - case "created_before": - *predicates = append(*predicates, alert.CreatedAtLTE(timePoint)) - case "until": - *predicates = append(*predicates, alert.StartedAtLTE(timePoint)) - } - - return nil -} - -func handleIPv4Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { - if contains { // decision contains {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPLTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPGTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } else { // decision is contained within {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - alert.HasDecisionsWith(decision.StartIPGTE(start_ip)), - alert.HasDecisionsWith(decision.EndIPLTE(end_ip)), - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - )) - } -} - -func handleIPv6Predicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) { - if contains { // decision contains {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip < query.start_ip - alert.HasDecisionsWith(decision.StartIPLT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix <= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixLTE(start_sfx)), - ), - ), - alert.Or( - // decision.end_ip > query.end_ip - alert.HasDecisionsWith(decision.EndIPGT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix >= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixGTE(end_sfx)), - ), - ), - )) - } else { // decision is contained within {start_ip,end_ip} - *predicates = append(*predicates, alert.And( - // matching addr size - alert.HasDecisionsWith(decision.IPSizeEQ(int64(ip_sz))), - alert.Or( - // decision.start_ip > query.start_ip - alert.HasDecisionsWith(decision.StartIPGT(start_ip)), - alert.And( - // decision.start_ip == query.start_ip - alert.HasDecisionsWith(decision.StartIPEQ(start_ip)), - // decision.start_suffix >= query.start_suffix - alert.HasDecisionsWith(decision.StartSuffixGTE(start_sfx)), - ), - ), - alert.Or( - // decision.end_ip < query.end_ip - alert.HasDecisionsWith(decision.EndIPLT(end_ip)), - alert.And( - // decision.end_ip == query.end_ip - alert.HasDecisionsWith(decision.EndIPEQ(end_ip)), - // decision.end_suffix <= query.end_suffix - alert.HasDecisionsWith(decision.EndSuffixLTE(end_sfx)), - ), - ), - )) - } -} - -func handleIPPredicates(ip_sz int, contains bool, start_ip, start_sfx, end_ip, end_sfx int64, predicates *[]predicate.Alert) error { - if ip_sz == 4 { - handleIPv4Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) - } else if ip_sz == 16 { - handleIPv6Predicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, predicates) - } else if ip_sz != 0 { - return errors.Wrapf(InvalidFilter, "Unknown ip size %d", ip_sz) - } - - return nil -} - -func handleIncludeCapiFilter(value string, predicates *[]predicate.Alert) error { - if value == "false" { - *predicates = append(*predicates, alert.And( - // do not show alerts with active decisions having origin CAPI or lists - alert.And( - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.CAPIOrigin))), - alert.Not(alert.HasDecisionsWith(decision.OriginEQ(types.ListOrigin))), - ), - alert.Not( - alert.And( - // do not show neither alerts with no decisions if the Source Scope is lists: or CAPI - alert.Not(alert.HasDecisions()), - alert.Or( - alert.SourceScopeHasPrefix(types.ListOrigin+":"), - alert.SourceScopeEQ(types.CommunityBlocklistPullSourceScope), - ), - ), - ), - )) - } else if value != "true" { - log.Errorf("invalid bool '%s' for include_capi", value) - } - - return nil -} - -func AlertPredicatesFromFilter(filter map[string][]string) ([]predicate.Alert, error) { - predicates := make([]predicate.Alert, 0) - - var ( - err error - start_ip, start_sfx, end_ip, end_sfx int64 - hasActiveDecision bool - ip_sz int - ) - - contains := true - - /*if contains is true, return bans that *contains* the given value (value is the inner) - else, return bans that are *contained* by the given value (value is the outer)*/ - - handleSimulatedFilter(filter, &predicates) - handleOriginFilter(filter, &predicates) - - for param, value := range filter { - switch param { - case "contains": - contains, err = strconv.ParseBool(value[0]) - if err != nil { - return nil, errors.Wrapf(InvalidFilter, "invalid contains value : %s", err) - } - case "scope": - handleScopeFilter(value[0], &predicates) - case "value": - predicates = append(predicates, alert.SourceValueEQ(value[0])) - case "scenario": - predicates = append(predicates, alert.HasDecisionsWith(decision.ScenarioEQ(value[0]))) - case "ip", "range": - ip_sz, start_ip, start_sfx, end_ip, end_sfx, err = types.Addr2Ints(value[0]) - if err != nil { - return nil, errors.Wrapf(InvalidIPOrRange, "unable to convert '%s' to int: %s", value[0], err) - } - case "since", "created_before", "until": - if err := handleTimeFilters(param, value[0], &predicates); err != nil { - return nil, err - } - case "decision_type": - predicates = append(predicates, alert.HasDecisionsWith(decision.TypeEQ(value[0]))) - case "origin": - predicates = append(predicates, alert.HasDecisionsWith(decision.OriginEQ(value[0]))) - case "include_capi": // allows to exclude one or more specific origins - if err = handleIncludeCapiFilter(value[0], &predicates); err != nil { - return nil, err - } - case "has_active_decision": - if hasActiveDecision, err = strconv.ParseBool(value[0]); err != nil { - return nil, errors.Wrapf(ParseType, "'%s' is not a boolean: %s", value[0], err) - } - - if hasActiveDecision { - predicates = append(predicates, alert.HasDecisionsWith(decision.UntilGTE(time.Now().UTC()))) - } else { - predicates = append(predicates, alert.Not(alert.HasDecisions())) - } - case "limit": - continue - case "sort": - continue - case "simulated": - continue - case "with_decisions": - continue - default: - return nil, errors.Wrapf(InvalidFilter, "Filter parameter '%s' is unknown (=%s)", param, value[0]) - } - } - - if err := handleIPPredicates(ip_sz, contains, start_ip, start_sfx, end_ip, end_sfx, &predicates); err != nil { - return nil, err - } - - return predicates, nil -} - -func BuildAlertRequestFromFilter(alerts *ent.AlertQuery, filter map[string][]string) (*ent.AlertQuery, error) { - preds, err := AlertPredicatesFromFilter(filter) - if err != nil { - return nil, err - } - - return alerts.Where(preds...), nil -} - func (c *Client) AlertsCountPerScenario(ctx context.Context, filters map[string][]string) (map[string]int, error) { var res []struct { Scenario string diff --git a/pkg/database/database.go b/pkg/database/database.go index bb41dd3b645..80479710751 100644 --- a/pkg/database/database.go +++ b/pkg/database/database.go @@ -68,7 +68,7 @@ func NewClient(ctx context.Context, config *csconfig.DatabaseCfg) (*Client, erro return nil, err // unsupported database caught here } - if config.Type == "sqlite" { + if config.Type == "sqlite" && config.DbPath != ":memory:" { /*if it's the first startup, we want to touch and chmod file*/ if _, err = os.Stat(config.DbPath); os.IsNotExist(err) { f, err := os.OpenFile(config.DbPath, os.O_CREATE|os.O_RDWR, 0o600) diff --git a/pkg/database/errors.go b/pkg/database/errors.go index 77f92707e51..e0223be95b8 100644 --- a/pkg/database/errors.go +++ b/pkg/database/errors.go @@ -14,7 +14,6 @@ var ( ParseTimeFail = errors.New("unable to parse time") ParseDurationFail = errors.New("unable to parse duration") MarshalFail = errors.New("unable to serialize") - UnmarshalFail = errors.New("unable to parse") BulkError = errors.New("unable to insert bulk") ParseType = errors.New("unable to parse type") InvalidIPOrRange = errors.New("invalid ip address / range") diff --git a/pkg/database/flush.go b/pkg/database/flush.go index 8f646ddc961..4a3a93a406c 100644 --- a/pkg/database/flush.go +++ b/pkg/database/flush.go @@ -222,7 +222,7 @@ func (c *Client) FlushAgentsAndBouncers(ctx context.Context, agentsCfg *csconfig return nil } -func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) error { +func (c *Client) FlushAlerts(ctx context.Context, maxAge string, maxItems int) error { var ( deletedByAge int deletedByNbItem int @@ -247,22 +247,22 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e c.Log.Debugf("FlushAlerts (Total alerts): %d", totalAlerts) - if MaxAge != "" { + if maxAge != "" { filter := map[string][]string{ - "created_before": {MaxAge}, + "created_before": {maxAge}, } nbDeleted, err := c.DeleteAlertWithFilter(ctx, filter) if err != nil { c.Log.Warningf("FlushAlerts (max age): %s", err) - return fmt.Errorf("unable to flush alerts with filter until=%s: %w", MaxAge, err) + return fmt.Errorf("unable to flush alerts with filter until=%s: %w", maxAge, err) } c.Log.Debugf("FlushAlerts (deleted max age alerts): %d", nbDeleted) deletedByAge = nbDeleted } - if MaxItems > 0 { + if maxItems > 0 { // We get the highest id for the alerts // We subtract MaxItems to avoid deleting alerts that are not old enough // This gives us the oldest alert that we want to keep @@ -282,7 +282,7 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e } if len(lastAlert) != 0 { - maxid := lastAlert[0].ID - MaxItems + maxid := lastAlert[0].ID - maxItems c.Log.Debugf("FlushAlerts (max id): %d", maxid) @@ -299,12 +299,12 @@ func (c *Client) FlushAlerts(ctx context.Context, MaxAge string, MaxItems int) e if deletedByNbItem > 0 { c.Log.Infof("flushed %d/%d alerts because the max number of alerts has been reached (%d max)", - deletedByNbItem, totalAlerts, MaxItems) + deletedByNbItem, totalAlerts, maxItems) } if deletedByAge > 0 { c.Log.Infof("flushed %d/%d alerts because they were created %s ago or more", - deletedByAge, totalAlerts, MaxAge) + deletedByAge, totalAlerts, maxAge) } return nil diff --git a/pkg/database/machines.go b/pkg/database/machines.go index d8c02825312..1293633ed9e 100644 --- a/pkg/database/machines.go +++ b/pkg/database/machines.go @@ -34,14 +34,6 @@ func (c *Client) MachineUpdateBaseMetrics(ctx context.Context, machineID string, os := baseMetrics.Os features := strings.Join(baseMetrics.FeatureFlags, ",") - var heartbeat time.Time - - if len(baseMetrics.Metrics) == 0 { - heartbeat = time.Now().UTC() - } else { - heartbeat = time.Unix(*baseMetrics.Metrics[0].Meta.UtcNowTimestamp, 0) - } - hubState := map[string][]schema.ItemState{} for itemType, items := range hubItems { hubState[itemType] = []schema.ItemState{} @@ -61,7 +53,6 @@ func (c *Client) MachineUpdateBaseMetrics(ctx context.Context, machineID string, SetOsname(*os.Name). SetOsversion(*os.Version). SetFeatureflags(features). - SetLastHeartbeat(heartbeat). SetHubstate(hubState). SetDatasources(datasources). Save(ctx) diff --git a/pkg/dumps/parser_dump.go b/pkg/dumps/parser_dump.go index bc8f78dc203..bd385bec194 100644 --- a/pkg/dumps/parser_dump.go +++ b/pkg/dumps/parser_dump.go @@ -145,25 +145,25 @@ func (t *tree) processEvents(parserResults ParserResults) { } func (t *tree) processBuckets(bucketPour BucketPourInfo) { - for bname, evtlist := range bucketPour { - for _, evt := range evtlist { - if evt.Line.Raw == "" { + for bname, events := range bucketPour { + for i := range events { + if events[i].Line.Raw == "" { continue } // it might be bucket overflow being reprocessed, skip this - if _, ok := t.state[evt.Line.Time]; !ok { - t.state[evt.Line.Time] = make(map[string]map[string]ParserResult) - t.assoc[evt.Line.Time] = evt.Line.Raw + if _, ok := t.state[events[i].Line.Time]; !ok { + t.state[events[i].Line.Time] = make(map[string]map[string]ParserResult) + t.assoc[events[i].Line.Time] = events[i].Line.Raw } - // there is a trick : to know if an event successfully exit the parsers, we check if it reached the pour() phase + // there is a trick: to know if an event successfully exit the parsers, we check if it reached the pour() phase // we thus use a fake stage "buckets" and a fake parser "OK" to know if it entered - if _, ok := t.state[evt.Line.Time]["buckets"]; !ok { - t.state[evt.Line.Time]["buckets"] = make(map[string]ParserResult) + if _, ok := t.state[events[i].Line.Time]["buckets"]; !ok { + t.state[events[i].Line.Time]["buckets"] = make(map[string]ParserResult) } - t.state[evt.Line.Time]["buckets"][bname] = ParserResult{Success: true} + t.state[events[i].Line.Time]["buckets"][bname] = ParserResult{Success: true} } } } diff --git a/pkg/emoji/emoji.go b/pkg/emoji/emoji.go index 51295a85411..9b939249bf0 100644 --- a/pkg/emoji/emoji.go +++ b/pkg/emoji/emoji.go @@ -11,4 +11,8 @@ const ( QuestionMark = "\u2753" // ❓ RedCircle = "\U0001f534" // 🔴 Warning = "\u26a0\ufe0f" // ⚠️ + InboxTray = "\U0001f4e5" // 📥 + DownArrow = "\u2b07" // ⬇️ + Wastebasket = "\U0001f5d1" // 🗑 + Sync = "\U0001F504" // 🔄 official name is Anticlockwise Downwards and Upwards Open Circle Arrows and I'm not even joking ) diff --git a/pkg/exprhelpers/crowdsec_cti.go b/pkg/exprhelpers/crowdsec_cti.go index ccd67b27a49..900bd7824a8 100644 --- a/pkg/exprhelpers/crowdsec_cti.go +++ b/pkg/exprhelpers/crowdsec_cti.go @@ -12,42 +12,46 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var CTIUrl = "https://cti.api.crowdsec.net" -var CTIUrlSuffix = "/v2/smoke/" -var CTIApiKey = "" +var ( + CTIUrl = "https://cti.api.crowdsec.net" + CTIUrlSuffix = "/v2/smoke/" + CTIApiKey = "" +) // this is set for non-recoverable errors, such as 403 when querying API or empty API key var CTIApiEnabled = false // when hitting quotas or auth errors, we temporarily disable the API -var CTIBackOffUntil time.Time -var CTIBackOffDuration = 5 * time.Minute +var ( + CTIBackOffUntil time.Time + CTIBackOffDuration = 5 * time.Minute +) var ctiClient *cticlient.CrowdsecCTIClient -func InitCrowdsecCTI(Key *string, TTL *time.Duration, Size *int, LogLevel *log.Level) error { - if Key == nil || *Key == "" { +func InitCrowdsecCTI(key *string, ttl *time.Duration, size *int, logLevel *log.Level) error { + if key == nil || *key == "" { log.Warningf("CTI API key not set or empty, CTI will not be available") return cticlient.ErrDisabled } - CTIApiKey = *Key - if Size == nil { - Size = new(int) - *Size = 1000 + CTIApiKey = *key + if size == nil { + size = new(int) + *size = 1000 } - if TTL == nil { - TTL = new(time.Duration) - *TTL = 5 * time.Minute + if ttl == nil { + ttl = new(time.Duration) + *ttl = 5 * time.Minute } clog := log.New() if err := types.ConfigureLogger(clog); err != nil { return fmt.Errorf("while configuring datasource logger: %w", err) } - if LogLevel != nil { - clog.SetLevel(*LogLevel) + if logLevel != nil { + clog.SetLevel(*logLevel) } subLogger := clog.WithField("type", "crowdsec-cti") - CrowdsecCTIInitCache(*Size, *TTL) + CrowdsecCTIInitCache(*size, *ttl) ctiClient = cticlient.NewCrowdsecCTIClient(cticlient.WithAPIKey(CTIApiKey), cticlient.WithLogger(subLogger)) CTIApiEnabled = true return nil @@ -62,8 +66,10 @@ func ShutdownCrowdsecCTI() { } // Cache for responses -var CTICache gcache.Cache -var CacheExpiration time.Duration +var ( + CTICache gcache.Cache + CacheExpiration time.Duration +) func CrowdsecCTIInitCache(size int, ttl time.Duration) { CTICache = gcache.New(size).LRU().Build() diff --git a/pkg/exprhelpers/debugger.go b/pkg/exprhelpers/debugger.go index 2e47af6d1de..d44b8fc97e1 100644 --- a/pkg/exprhelpers/debugger.go +++ b/pkg/exprhelpers/debugger.go @@ -21,35 +21,35 @@ var IndentStep = 4 // we use this struct to store the output of the expr runtime type OpOutput struct { - Code string //relevant code part + Code string // relevant code part - CodeDepth int //level of nesting + CodeDepth int // level of nesting BlockStart bool BlockEnd bool - Func bool //true if it's a function call + Func bool // true if it's a function call FuncName string Args []string FuncResults []string // - Comparison bool //true if it's a comparison + Comparison bool // true if it's a comparison Negated bool Left string Right string // - JumpIf bool //true if it's conditional jump + JumpIf bool // true if it's conditional jump IfTrue bool IfFalse bool // - Condition bool //true if it's a condition + Condition bool // true if it's a condition ConditionIn bool ConditionContains bool - //used for comparisons, conditional jumps and conditions + // used for comparisons, conditional jumps and conditions StrConditionResult string - ConditionResult *bool //should always be present for conditions + ConditionResult *bool // should always be present for conditions // - Finalized bool //used when a node is finalized, we already fetched result from next OP + Finalized bool // used when a node is finalized, we already fetched result from next OP } func (o *OpOutput) String() string { @@ -57,6 +57,7 @@ func (o *OpOutput) String() string { if o.Code != "" { ret += fmt.Sprintf("[%s]", o.Code) } + ret += " " switch { @@ -68,19 +69,24 @@ func (o *OpOutput) String() string { if indent < 0 { indent = 0 } + ret = fmt.Sprintf("%*cBLOCK_END [%s]", indent, ' ', o.Code) + if o.StrConditionResult != "" { ret += fmt.Sprintf(" -> %s", o.StrConditionResult) } + return ret - //A block end can carry a value, for example if it's a count, any, all etc. XXX + // A block end can carry a value, for example if it's a count, any, all etc. XXX case o.Func: return ret + fmt.Sprintf("%s(%s) = %s", o.FuncName, strings.Join(o.Args, ", "), strings.Join(o.FuncResults, ", ")) case o.Comparison: if o.Negated { ret += "NOT " } + ret += fmt.Sprintf("%s == %s -> %s", o.Left, o.Right, o.StrConditionResult) + return ret case o.ConditionIn: return ret + fmt.Sprintf("%s in %s -> %s", o.Args[0], o.Args[1], o.StrConditionResult) @@ -91,18 +97,23 @@ func (o *OpOutput) String() string { if *o.ConditionResult { return ret + "OR -> false" } + return ret + "OR -> true" } + return ret + "OR(?)" case o.JumpIf && o.IfFalse: if o.ConditionResult != nil { if *o.ConditionResult { return ret + "AND -> true" } + return ret + "AND -> false" } + return ret + "AND(?)" } + return ret + "" } @@ -135,7 +146,7 @@ func (erp ExprRuntimeDebug) extractCode(ip int, program *vm.Program) string { func autoQuote(v any) string { switch x := v.(type) { case string: - //let's avoid printing long strings. it can happen ie. when we are debugging expr with `File()` or similar helpers + // let's avoid printing long strings. it can happen ie. when we are debugging expr with `File()` or similar helpers if len(x) > 40 { return fmt.Sprintf("%q", x[:40]+"...") } else { @@ -147,35 +158,40 @@ func autoQuote(v any) string { } func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, parts []string, outputs []OpOutput) ([]OpOutput, error) { - IdxOut := len(outputs) prevIdxOut := 0 currentDepth := 0 - //when there is a function call or comparison, we need to wait for the next instruction to get the result and "finalize" the previous one + // when there is a function call or comparison, we need to wait for the next instruction to get the result and "finalize" the previous one if IdxOut > 0 { prevIdxOut = IdxOut - 1 currentDepth = outputs[prevIdxOut].CodeDepth + if outputs[prevIdxOut].Func && !outputs[prevIdxOut].Finalized { stack := vm.Stack num_items := 1 + for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { outputs[prevIdxOut].FuncResults = append(outputs[prevIdxOut].FuncResults, autoQuote(stack[i])) num_items-- } + outputs[prevIdxOut].Finalized = true } else if (outputs[prevIdxOut].Comparison || outputs[prevIdxOut].Condition) && !outputs[prevIdxOut].Finalized { stack := vm.Stack outputs[prevIdxOut].StrConditionResult = fmt.Sprintf("%+v", stack) + if val, ok := stack[0].(bool); ok { outputs[prevIdxOut].ConditionResult = new(bool) *outputs[prevIdxOut].ConditionResult = val } + outputs[prevIdxOut].Finalized = true } } erp.Logger.Tracef("[STEP %d:%s] (stack:%+v) (parts:%+v) {depth:%d}", ip, parts[1], vm.Stack, parts, currentDepth) + out := OpOutput{} out.CodeDepth = currentDepth out.Code = erp.extractCode(ip, program) @@ -188,27 +204,28 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part case "OpEnd": out.CodeDepth -= IndentStep out.BlockEnd = true - //OpEnd can carry value, if it's any/all/count etc. + // OpEnd can carry value, if it's any/all/count etc. if len(vm.Stack) > 0 { out.StrConditionResult = fmt.Sprintf("%v", vm.Stack) } + outputs = append(outputs, out) case "OpNot": - //negate the previous condition + // negate the previous condition outputs[prevIdxOut].Negated = true - case "OpTrue": //generated when possible ? (1 == 1) + case "OpTrue": // generated when possible ? (1 == 1) out.Condition = true out.ConditionResult = new(bool) *out.ConditionResult = true out.StrConditionResult = "true" outputs = append(outputs, out) - case "OpFalse": //generated when possible ? (1 != 1) + case "OpFalse": // generated when possible ? (1 != 1) out.Condition = true out.ConditionResult = new(bool) *out.ConditionResult = false out.StrConditionResult = "false" outputs = append(outputs, out) - case "OpJumpIfTrue": //OR + case "OpJumpIfTrue": // OR stack := vm.Stack out.JumpIf = true out.IfTrue = true @@ -218,78 +235,88 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.ConditionResult = new(bool) *out.ConditionResult = val } + outputs = append(outputs, out) - case "OpJumpIfFalse": //AND + case "OpJumpIfFalse": // AND stack := vm.Stack out.JumpIf = true out.IfFalse = true out.StrConditionResult = fmt.Sprintf("%v", stack[0]) + if val, ok := stack[0].(bool); ok { out.ConditionResult = new(bool) *out.ConditionResult = val } + outputs = append(outputs, out) - case "OpCall1": //Op for function calls + case "OpCall1": // Op for function calls out.Func = true out.FuncName = parts[3] stack := vm.Stack + num_items := 1 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) num_items-- } + outputs = append(outputs, out) - case "OpCall2": //Op for function calls + case "OpCall2": // Op for function calls out.Func = true out.FuncName = parts[3] stack := vm.Stack + num_items := 2 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) num_items-- } + outputs = append(outputs, out) - case "OpCall3": //Op for function calls + case "OpCall3": // Op for function calls out.Func = true out.FuncName = parts[3] stack := vm.Stack + num_items := 3 for i := len(stack) - 1; i >= 0 && num_items > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) num_items-- } + outputs = append(outputs, out) - //double check OpCallFast and OpCallTyped + // double check OpCallFast and OpCallTyped case "OpCallFast", "OpCallTyped": // - case "OpCallN": //Op for function calls with more than 3 args + case "OpCallN": // Op for function calls with more than 3 args out.Func = true out.FuncName = parts[1] stack := vm.Stack - //for OpCallN, we get the number of args + // for OpCallN, we get the number of args if len(program.Arguments) >= ip { nb_args := program.Arguments[ip] if nb_args > 0 { - //we need to skip the top item on stack + // we need to skip the top item on stack for i := len(stack) - 2; i >= 0 && nb_args > 0; i-- { out.Args = append(out.Args, autoQuote(stack[i])) nb_args-- } } - } else { //let's blindly take the items on stack + } else { // let's blindly take the items on stack for _, val := range vm.Stack { out.Args = append(out.Args, autoQuote(val)) } } + outputs = append(outputs, out) - case "OpEqualString", "OpEqual", "OpEqualInt": //comparisons + case "OpEqualString", "OpEqual", "OpEqualInt": // comparisons stack := vm.Stack out.Comparison = true out.Left = autoQuote(stack[0]) out.Right = autoQuote(stack[1]) outputs = append(outputs, out) - case "OpIn": //in operator + case "OpIn": // in operator stack := vm.Stack out.Condition = true out.ConditionIn = true @@ -299,7 +326,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.Args = append(out.Args, autoQuote(stack[0])) out.Args = append(out.Args, autoQuote(stack[1])) outputs = append(outputs, out) - case "OpContains": //kind OpIn , but reverse + case "OpContains": // kind OpIn , but reverse stack := vm.Stack out.Condition = true out.ConditionContains = true @@ -310,6 +337,7 @@ func (erp ExprRuntimeDebug) ipDebug(ip int, vm *vm.VM, program *vm.Program, part out.Args = append(out.Args, autoQuote(stack[1])) outputs = append(outputs, out) } + return outputs, nil } @@ -319,10 +347,12 @@ func (erp ExprRuntimeDebug) ipSeek(ip int) []string { if len(parts) == 0 { continue } + if parts[0] == strconv.Itoa(ip) { return parts } } + return nil } @@ -330,19 +360,23 @@ func Run(program *vm.Program, env interface{}, logger *log.Entry, debug bool) (a if debug { dbgInfo, ret, err := RunWithDebug(program, env, logger) DisplayExprDebug(program, dbgInfo, logger, ret) + return ret, err } + return expr.Run(program, env) } func cleanTextForDebug(text string) string { text = strings.Join(strings.Fields(text), " ") text = strings.Trim(text, " \t\n") + return text } func DisplayExprDebug(program *vm.Program, outputs []OpOutput, logger *log.Entry, ret any) { logger.Debugf("dbg(result=%v): %s", ret, cleanTextForDebug(string(program.Source()))) + for _, output := range outputs { logger.Debugf("%s", output.String()) } @@ -360,46 +394,55 @@ func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]Op erp.Lines = lines go func() { - //We must never return until the execution of the program is done + // We must never return until the execution of the program is done var err error + erp.Logger.Tracef("[START] ip 0") + ops := erp.ipSeek(0) if ops == nil { log.Warningf("error while debugging expr: failed getting ops for ip 0") } + if outputs, err = erp.ipDebug(0, vm, program, ops, outputs); err != nil { log.Warningf("error while debugging expr: error while debugging at ip 0") } + vm.Step() + for ip := range vm.Position() { ops := erp.ipSeek(ip) if ops == nil { erp.Logger.Tracef("[DONE] ip %d", ip) break } + if outputs, err = erp.ipDebug(ip, vm, program, ops, outputs); err != nil { log.Warningf("error while debugging expr: error while debugging at ip %d", ip) } + vm.Step() } }() var return_error error + ret, err := vm.Run(program, env) - //if the expr runtime failed, we don't need to wait for the debug to finish + // if the expr runtime failed, we don't need to wait for the debug to finish if err != nil { return_error = err } - //the overall result of expression is the result of last op ? + // the overall result of expression is the result of last op ? if len(outputs) > 0 { lastOutIdx := len(outputs) if lastOutIdx > 0 { lastOutIdx -= 1 } + switch val := ret.(type) { case bool: log.Tracef("completing with bool %t", ret) - //if outputs[lastOutIdx].Comparison { + // if outputs[lastOutIdx].Comparison { outputs[lastOutIdx].StrConditionResult = fmt.Sprintf("%v", ret) outputs[lastOutIdx].ConditionResult = new(bool) *outputs[lastOutIdx].ConditionResult = val @@ -412,5 +455,6 @@ func RunWithDebug(program *vm.Program, env interface{}, logger *log.Entry) ([]Op } else { log.Tracef("no output from expr runtime") } + return outputs, ret, return_error } diff --git a/pkg/exprhelpers/debugger_test.go b/pkg/exprhelpers/debugger_test.go index 32144454084..0852d7ab2de 100644 --- a/pkg/exprhelpers/debugger_test.go +++ b/pkg/exprhelpers/debugger_test.go @@ -1,3 +1,4 @@ +//go:build expr_debug package exprhelpers import ( diff --git a/pkg/exprhelpers/debuggerstub_test.go b/pkg/exprhelpers/debuggerstub_test.go new file mode 100644 index 00000000000..cc41c793b47 --- /dev/null +++ b/pkg/exprhelpers/debuggerstub_test.go @@ -0,0 +1,10 @@ +//go:build !expr_debug +package exprhelpers + +import ( + "testing" +) + +func TestFailWithoutExprDebug(t *testing.T) { + t.Fatal("To test pkg/exprhelpers, you need the expr_debug build tag") +} diff --git a/pkg/exprhelpers/exprlib_test.go b/pkg/exprhelpers/exprlib_test.go index f2eb208ebfa..932db4b7da4 100644 --- a/pkg/exprhelpers/exprlib_test.go +++ b/pkg/exprhelpers/exprlib_test.go @@ -3,7 +3,6 @@ package exprhelpers import ( "context" "errors" - "os" "testing" "time" @@ -26,15 +25,12 @@ const TestFolder = "tests" func getDBClient(t *testing.T) *database.Client { t.Helper() - dbPath, err := os.CreateTemp("", "*sqlite") - require.NoError(t, err) - ctx := context.Background() testDBClient, err := database.NewClient(ctx, &csconfig.DatabaseCfg{ Type: "sqlite", DbName: "crowdsec", - DbPath: dbPath.Name(), + DbPath: ":memory:", }) require.NoError(t, err) diff --git a/pkg/exprhelpers/geoip.go b/pkg/exprhelpers/geoip.go index fb0c344d884..6d8813dc0ad 100644 --- a/pkg/exprhelpers/geoip.go +++ b/pkg/exprhelpers/geoip.go @@ -14,7 +14,6 @@ func GeoIPEnrich(params ...any) (any, error) { parsedIP := net.ParseIP(ip) city, err := geoIPCityReader.City(parsedIP) - if err != nil { return nil, err } @@ -31,7 +30,6 @@ func GeoIPASNEnrich(params ...any) (any, error) { parsedIP := net.ParseIP(ip) asn, err := geoIPASNReader.ASN(parsedIP) - if err != nil { return nil, err } @@ -50,7 +48,6 @@ func GeoIPRangeEnrich(params ...any) (any, error) { parsedIP := net.ParseIP(ip) rangeIP, ok, err := geoIPRangeReader.LookupNetwork(parsedIP, &dummy) - if err != nil { return nil, err } diff --git a/pkg/exprhelpers/helpers.go b/pkg/exprhelpers/helpers.go index 9bc991a8f2d..d0f6f2cfe22 100644 --- a/pkg/exprhelpers/helpers.go +++ b/pkg/exprhelpers/helpers.go @@ -29,8 +29,6 @@ import ( "github.com/umahmood/haversine" "github.com/wasilibs/go-re2" - "github.com/crowdsecurity/go-cs-lib/ptr" - "github.com/crowdsecurity/crowdsec/pkg/cache" "github.com/crowdsecurity/crowdsec/pkg/database" "github.com/crowdsecurity/crowdsec/pkg/fflag" @@ -129,32 +127,36 @@ func Init(databaseClient *database.Client) error { dataFileRegex = make(map[string][]*regexp.Regexp) dataFileRe2 = make(map[string][]*re2.Regexp) dbClient = databaseClient + XMLCacheInit() + return nil } -func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { +func RegexpCacheInit(filename string, cacheCfg types.DataSource) error { // cache is explicitly disabled - if CacheCfg.Cache != nil && !*CacheCfg.Cache { + if cacheCfg.Cache != nil && !*cacheCfg.Cache { return nil } // cache is implicitly disabled if no cache config is provided - if CacheCfg.Strategy == nil && CacheCfg.TTL == nil && CacheCfg.Size == nil { + if cacheCfg.Strategy == nil && cacheCfg.TTL == nil && cacheCfg.Size == nil { return nil } // cache is enabled - if CacheCfg.Size == nil { - CacheCfg.Size = ptr.Of(50) + size := 50 + if cacheCfg.Size != nil { + size = *cacheCfg.Size } - gc := gcache.New(*CacheCfg.Size) + gc := gcache.New(size) - if CacheCfg.Strategy == nil { - CacheCfg.Strategy = ptr.Of("LRU") + strategy := "LRU" + if cacheCfg.Strategy != nil { + strategy = *cacheCfg.Strategy } - switch *CacheCfg.Strategy { + switch strategy { case "LRU": gc = gc.LRU() case "LFU": @@ -162,11 +164,11 @@ func RegexpCacheInit(filename string, CacheCfg types.DataSource) error { case "ARC": gc = gc.ARC() default: - return fmt.Errorf("unknown cache strategy '%s'", *CacheCfg.Strategy) + return fmt.Errorf("unknown cache strategy '%s'", strategy) } - if CacheCfg.TTL != nil { - gc.Expiration(*CacheCfg.TTL) + if cacheCfg.TTL != nil { + gc.Expiration(*cacheCfg.TTL) } cache := gc.Build() @@ -240,6 +242,7 @@ func Distinct(params ...any) (any, error) { if rt := reflect.TypeOf(params[0]).Kind(); rt != reflect.Slice && rt != reflect.Array { return nil, nil } + array := params[0].([]interface{}) if array == nil { return []interface{}{}, nil @@ -254,6 +257,7 @@ func Distinct(params ...any) (any, error) { ret = append(ret, val) } } + return ret, nil } @@ -282,8 +286,10 @@ func flatten(args []interface{}, v reflect.Value) []interface{} { } func existsInFileMaps(filename string, ftype string) (bool, error) { - ok := false var err error + + ok := false + switch ftype { case "regex", "regexp": if fflag.Re2RegexpInfileSupport.IsEnabled() { @@ -296,10 +302,11 @@ func existsInFileMaps(filename string, ftype string) (bool, error) { default: err = fmt.Errorf("unknown data type '%s' for : '%s'", ftype, filename) } + return ok, err } -//Expr helpers +// Expr helpers // func Get(arr []string, index int) string { func Get(params ...any) (any, error) { @@ -315,10 +322,12 @@ func Get(params ...any) (any, error) { func Atof(params ...any) (any, error) { x := params[0].(string) log.Debugf("debug atof %s", x) + ret, err := strconv.ParseFloat(x, 64) if err != nil { log.Warningf("Atof : can't convert float '%s' : %v", x, err) } + return ret, nil } @@ -340,22 +349,28 @@ func Distance(params ...any) (any, error) { long1 := params[1].(string) lat2 := params[2].(string) long2 := params[3].(string) + lat1f, err := strconv.ParseFloat(lat1, 64) if err != nil { log.Warningf("lat1 is not a float : %v", err) + return 0.0, fmt.Errorf("lat1 is not a float : %v", err) } + long1f, err := strconv.ParseFloat(long1, 64) if err != nil { log.Warningf("long1 is not a float : %v", err) + return 0.0, fmt.Errorf("long1 is not a float : %v", err) } + lat2f, err := strconv.ParseFloat(lat2, 64) if err != nil { log.Warningf("lat2 is not a float : %v", err) return 0.0, fmt.Errorf("lat2 is not a float : %v", err) } + long2f, err := strconv.ParseFloat(long2, 64) if err != nil { log.Warningf("long2 is not a float : %v", err) @@ -363,7 +378,7 @@ func Distance(params ...any) (any, error) { return 0.0, fmt.Errorf("long2 is not a float : %v", err) } - //either set of coordinates is 0,0, return 0 to avoid FPs + // either set of coordinates is 0,0, return 0 to avoid FPs if (lat1f == 0.0 && long1f == 0.0) || (lat2f == 0.0 && long2f == 0.0) { log.Warningf("one of the coordinates is 0,0, returning 0") return 0.0, nil @@ -373,6 +388,7 @@ func Distance(params ...any) (any, error) { second := haversine.Coord{Lat: lat2f, Lon: long2f} _, km := haversine.Distance(first, second) + return km, nil } diff --git a/pkg/fflag/crowdsec.go b/pkg/fflag/crowdsec.go index d42d6a05ef6..ea397bfe5bc 100644 --- a/pkg/fflag/crowdsec.go +++ b/pkg/fflag/crowdsec.go @@ -2,12 +2,14 @@ package fflag var Crowdsec = FeatureRegister{EnvPrefix: "CROWDSEC_FEATURE_"} -var CscliSetup = &Feature{Name: "cscli_setup", Description: "Enable cscli setup command (service detection)"} -var DisableHttpRetryBackoff = &Feature{Name: "disable_http_retry_backoff", Description: "Disable http retry backoff"} -var ChunkedDecisionsStream = &Feature{Name: "chunked_decisions_stream", Description: "Enable chunked decisions stream"} -var PapiClient = &Feature{Name: "papi_client", Description: "Enable Polling API client", State: DeprecatedState} -var Re2GrokSupport = &Feature{Name: "re2_grok_support", Description: "Enable RE2 support for GROK patterns"} -var Re2RegexpInfileSupport = &Feature{Name: "re2_regexp_in_file_support", Description: "Enable RE2 support for RegexpInFile expr helper"} +var ( + CscliSetup = &Feature{Name: "cscli_setup", Description: "Enable cscli setup command (service detection)"} + DisableHttpRetryBackoff = &Feature{Name: "disable_http_retry_backoff", Description: "Disable http retry backoff"} + ChunkedDecisionsStream = &Feature{Name: "chunked_decisions_stream", Description: "Enable chunked decisions stream"} + PapiClient = &Feature{Name: "papi_client", Description: "Enable Polling API client", State: DeprecatedState} + Re2GrokSupport = &Feature{Name: "re2_grok_support", Description: "Enable RE2 support for GROK patterns"} + Re2RegexpInfileSupport = &Feature{Name: "re2_regexp_in_file_support", Description: "Enable RE2 support for RegexpInFile expr helper"} +) func RegisterAllFeatures() error { err := Crowdsec.RegisterFeature(CscliSetup) diff --git a/pkg/fflag/features_test.go b/pkg/fflag/features_test.go index 481e86573e8..bf8ddeca8fd 100644 --- a/pkg/fflag/features_test.go +++ b/pkg/fflag/features_test.go @@ -351,11 +351,9 @@ func TestSetFromYaml(t *testing.T) { } func TestSetFromYamlFile(t *testing.T) { - tmpfile, err := os.CreateTemp("", "test") + tmpfile, err := os.CreateTemp(t.TempDir(), "test") require.NoError(t, err) - defer os.Remove(tmpfile.Name()) - // write the config file _, err = tmpfile.WriteString("- experimental1") require.NoError(t, err) @@ -376,11 +374,13 @@ func TestGetEnabledFeatures(t *testing.T) { feat1, err := fr.GetFeature("new_standard") require.NoError(t, err) - feat1.Set(true) + err = feat1.Set(true) + require.Error(t, err, "the flag is deprecated") feat2, err := fr.GetFeature("experimental1") require.NoError(t, err) - feat2.Set(true) + err = feat2.Set(true) + require.NoError(t, err) expected := []string{ "experimental1", diff --git a/pkg/hubops/colorize.go b/pkg/hubops/colorize.go new file mode 100644 index 00000000000..3af2aecab93 --- /dev/null +++ b/pkg/hubops/colorize.go @@ -0,0 +1,38 @@ +package hubops + +import ( + "strings" + + "github.com/fatih/color" + + "github.com/crowdsecurity/crowdsec/pkg/emoji" +) + +// colorizeItemName splits the input string on "/" and colorizes the second part. +func colorizeItemName(fullname string) string { + parts := strings.SplitN(fullname, "/", 2) + if len(parts) == 2 { + bold := color.New(color.Bold) + author := parts[0] + name := parts[1] + return author + "/" + bold.Sprint(name) + } + return fullname +} + +func colorizeOpType(opType string) string { + switch opType { + case (&DownloadCommand{}).OperationType(): + return emoji.InboxTray + " " + color.BlueString(opType) + case (&EnableCommand{}).OperationType(): + return emoji.CheckMarkButton + " " + color.GreenString(opType) + case (&DisableCommand{}).OperationType(): + return emoji.CrossMark + " " + color.RedString(opType) + case (&PurgeCommand{}).OperationType(): + return emoji.Wastebasket + " " + color.RedString(opType) + case (&DataRefreshCommand{}).OperationType(): + return emoji.Sync + " " + opType + } + + return opType +} diff --git a/pkg/hubops/datarefresh.go b/pkg/hubops/datarefresh.go new file mode 100644 index 00000000000..985db8c1a11 --- /dev/null +++ b/pkg/hubops/datarefresh.go @@ -0,0 +1,75 @@ +package hubops + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// XXX: TODO: temporary for hubtests, but will have to go. +// DownloadDataIfNeeded downloads the data set for the item. +func DownloadDataIfNeeded(ctx context.Context, hub *cwhub.Hub, item *cwhub.Item, force bool) (bool, error) { + itemFilePath, err := item.InstallPath() + if err != nil { + return false, err + } + + itemFile, err := os.Open(itemFilePath) + if err != nil { + return false, fmt.Errorf("while opening %s: %w", itemFilePath, err) + } + + defer itemFile.Close() + + needReload, err := downloadDataSet(ctx, hub.GetDataDir(), force, itemFile) + if err != nil { + return needReload, fmt.Errorf("while downloading data for %s: %w", itemFilePath, err) + } + + return needReload, nil +} + +// DataRefreshCommand updates the data files associated with the installed hub items. +type DataRefreshCommand struct { + Force bool +} + +func NewDataRefreshCommand(force bool) *DataRefreshCommand { + return &DataRefreshCommand{Force: force} +} + +func (c *DataRefreshCommand) Prepare(plan *ActionPlan) (bool, error) { + // we can't prepare much at this point because we don't know which data files yet, + // and items needs to be downloaded/updated + // evertyhing will be done in Run() + return true, nil +} + +func (c *DataRefreshCommand) Run(ctx context.Context, plan *ActionPlan) error { + for _, itemType := range cwhub.ItemTypes { + for _, item := range plan.hub.GetInstalledByType(itemType, true) { + needReload, err := DownloadDataIfNeeded(ctx, plan.hub, item, c.Force) + if err != nil { + return err + } + + plan.ReloadNeeded = plan.ReloadNeeded || needReload + } + } + + return nil +} + +func (c *DataRefreshCommand) OperationType() string { + return "check & update data files" +} + +func (c *DataRefreshCommand) ItemType() string { + return "" +} + +func (c *DataRefreshCommand) Detail() string { + return "" +} diff --git a/pkg/hubops/disable.go b/pkg/hubops/disable.go new file mode 100644 index 00000000000..b6368e85036 --- /dev/null +++ b/pkg/hubops/disable.go @@ -0,0 +1,121 @@ +package hubops + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// RemoveInstallLink removes the item's symlink between the installation directory and the local hub. +func RemoveInstallLink(i *cwhub.Item) error { + syml, err := i.InstallPath() + if err != nil { + return err + } + + stat, err := os.Lstat(syml) + if err != nil { + return err + } + + // if it's managed by hub, it's a symlink to csconfig.GConfig.hub.HubDir / ... + if stat.Mode()&os.ModeSymlink == 0 { + return fmt.Errorf("%s isn't managed by hub", i.Name) + } + + hubpath, err := os.Readlink(syml) + if err != nil { + return fmt.Errorf("while reading symlink: %w", err) + } + + src, err := i.DownloadPath() + if err != nil { + return err + } + + if hubpath != src { + return fmt.Errorf("%s isn't managed by hub", i.Name) + } + + if err := os.Remove(syml); err != nil { + return fmt.Errorf("while removing symlink: %w", err) + } + + return nil +} + +// DisableCommand uninstalls an item and its dependencies, ensuring that no +// sub-item is left in an inconsistent state. +type DisableCommand struct { + Item *cwhub.Item + Force bool +} + +func NewDisableCommand(item *cwhub.Item, force bool) *DisableCommand { + return &DisableCommand{Item: item, Force: force} +} + +func (c *DisableCommand) Prepare(plan *ActionPlan) (bool, error) { + i := c.Item + + if i.State.IsLocal() { + plan.Warning(i.FQName() + " is a local item, please delete manually") + return false, nil + } + + if i.State.Tainted && !c.Force { + return false, fmt.Errorf("%s is tainted, use '--force' to remove", i.Name) + } + + if !i.State.Installed { + return false, nil + } + + subsToRemove, err := i.SafeToRemoveDeps() + if err != nil { + return false, err + } + + for _, sub := range subsToRemove { + if !sub.State.Installed { + continue + } + + if err := plan.AddCommand(NewDisableCommand(sub, c.Force)); err != nil { + return false, err + } + } + + return true, nil +} + +func (c *DisableCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Println("disabling " + colorizeItemName(i.FQName())) + + if err := RemoveInstallLink(i); err != nil { + return fmt.Errorf("while disabling %s: %w", i.FQName(), err) + } + + plan.ReloadNeeded = true + + i.State.Installed = false + i.State.Tainted = false + + return nil +} + +func (c *DisableCommand) OperationType() string { + return "disable" +} + +func (c *DisableCommand) ItemType() string { + return c.Item.Type +} + +func (c *DisableCommand) Detail() string { + return colorizeItemName(c.Item.Name) +} diff --git a/pkg/hubops/doc.go b/pkg/hubops/doc.go new file mode 100644 index 00000000000..b87a42653bc --- /dev/null +++ b/pkg/hubops/doc.go @@ -0,0 +1,45 @@ +/* +Package hubops is responsible for managing the local hub (items and data files) for CrowdSec. + +The index file itself (.index.json) is still managed by pkg/cwhub, which also provides the Hub +and Item structs. + +The hubops package is mostly used by cscli for the "cscli install/remove/upgrade ..." commands. + +It adopts a command-based pattern: a Plan contains a sequence of Commands. Both Plan and Command +have separate preparation and execution methods. + + - Command Interface: + The Command interface defines the contract for all operations that can be + performed on hub items. Each operation implements the Prepare and Run + methods, allowing for pre-execution setup and actual execution logic. + + - ActionPlan: + ActionPlan serves as a container for a sequence of Commands. It manages the + addition of commands, handles dependencies between them, and orchestrates their + execution. ActionPlan also provides a mechanism for interactive confirmation and dry-run. + +To perform operations on hub items, create an ActionPlan and add the desired +Commands to it. Once all commands are added, execute the ActionPlan to perform +the operations in the correct order, handling dependencies and user confirmations. + +Example: + + hub := cwhub.NewHub(...) + plan := hubops.NewActionPlan(hub) + + downloadCmd := hubops.NewDownloadCommand(item, force) + if err := plan.AddCommand(downloadCmd); err != nil { + logrus.Fatalf("Failed to add download command: %v", err) + } + + enableCmd := hubops.NewEnableCommand(item, force) + if err := plan.AddCommand(enableCmd); err != nil { + logrus.Fatalf("Failed to add enable command: %v", err) + } + + if err := plan.Execute(ctx, confirm, dryRun, verbose); err != nil { + logrus.Fatalf("Failed to execute action plan: %v", err) + } +*/ +package hubops diff --git a/pkg/hubops/download.go b/pkg/hubops/download.go new file mode 100644 index 00000000000..72aed542115 --- /dev/null +++ b/pkg/hubops/download.go @@ -0,0 +1,212 @@ +package hubops + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/fatih/color" + "github.com/sirupsen/logrus" + "gopkg.in/yaml.v3" + + "github.com/crowdsecurity/go-cs-lib/downloader" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/types" +) + +// DownloadCommand handles the downloading of hub items. +// It ensures that items are fetched from the hub (or from the index file if it also has content) +// managing dependencies and verifying the integrity of downloaded content. +// This is used by "cscli install" and "cscli upgrade". +// Tainted items require the force parameter, local items are skipped. +type DownloadCommand struct { + Item *cwhub.Item + Force bool + contentProvider cwhub.ContentProvider +} + +func NewDownloadCommand(item *cwhub.Item, contentProvider cwhub.ContentProvider, force bool) *DownloadCommand { + return &DownloadCommand{Item: item, Force: force, contentProvider: contentProvider} +} + +func (c *DownloadCommand) Prepare(plan *ActionPlan) (bool, error) { + i := c.Item + + if i.State.IsLocal() { + plan.Info(i.FQName() + " - not downloading local item") + return false, nil + } + + // XXX: if it's tainted do we upgrade the dependencies anyway? + if i.State.Tainted && !c.Force { + plan.Warning(i.FQName() + " is tainted, use '--force' to overwrite") + return false, nil + } + + toDisable := make(map[*cwhub.Item]struct{}) + + var disableKeys []*cwhub.Item + + if i.State.Installed { + for sub := range i.CurrentDependencies().SubItems(plan.hub) { + disableKeys = append(disableKeys, sub) + toDisable[sub] = struct{}{} + } + } + + for sub := range i.LatestDependencies().SubItems(plan.hub) { + if err := plan.AddCommand(NewDownloadCommand(sub, c.contentProvider, c.Force)); err != nil { + return false, err + } + + if i.State.Installed { + // ensure the _new_ dependencies are installed too + if err := plan.AddCommand(NewEnableCommand(sub, c.Force)); err != nil { + return false, err + } + + for _, sub2 := range disableKeys { + if sub2 == sub { + delete(toDisable, sub) + } + } + } + } + + for sub := range toDisable { + if err := plan.AddCommand(NewDisableCommand(sub, c.Force)); err != nil { + return false, err + } + } + + if i.State.Downloaded && i.State.UpToDate { + return false, nil + } + + return true, nil +} + +// The DataSet is a list of data sources required by an item (built from the data: section in the yaml). +type DataSet struct { + Data []types.DataSource `yaml:"data,omitempty"` +} + +// downloadDataSet downloads all the data files for an item. +func downloadDataSet(ctx context.Context, dataFolder string, force bool, reader io.Reader) (bool, error) { + needReload := false + + dec := yaml.NewDecoder(reader) + + for { + data := &DataSet{} + + if err := dec.Decode(data); err != nil { + if errors.Is(err, io.EOF) { + break + } + + return needReload, fmt.Errorf("while reading file: %w", err) + } + + for _, dataS := range data.Data { + // XXX: check context cancellation + destPath, err := cwhub.SafePath(dataFolder, dataS.DestPath) + if err != nil { + return needReload, err + } + + d := downloader. + New(). + WithHTTPClient(cwhub.HubClient). + ToFile(destPath). + CompareContent(). + BeforeRequest(func(req *http.Request) { + fmt.Printf("downloading %s\n", req.URL) + }). + WithLogger(logrus.WithField("url", dataS.SourceURL)) + + if !force { + d = d.WithLastModified(). + WithShelfLife(7 * 24 * time.Hour) + } + + downloaded, err := d.Download(ctx, dataS.SourceURL) + if err != nil { + return needReload, fmt.Errorf("while getting data: %w", err) + } + + needReload = needReload || downloaded + } + } + + return needReload, nil +} + +func (c *DownloadCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Printf("downloading %s\n", colorizeItemName(i.FQName())) + + // ensure that target file is within target dir + finalPath, err := i.DownloadPath() + if err != nil { + return err + } + + downloaded, _, err := i.FetchContentTo(ctx, c.contentProvider, finalPath) + if err != nil { + return fmt.Errorf("%s: %w", i.FQName(), err) + } + + if downloaded { + plan.ReloadNeeded = true + } + + i.State.Downloaded = true + i.State.Tainted = false + i.State.UpToDate = true + + // read content to get the list of data files + reader, err := os.Open(finalPath) + if err != nil { + return fmt.Errorf("while opening %s: %w", finalPath, err) + } + + defer reader.Close() + + needReload, err := downloadDataSet(ctx, plan.hub.GetDataDir(), c.Force, reader) + if err != nil { + return fmt.Errorf("while downloading data for %s: %w", i.FileName, err) + } + + if needReload { + plan.ReloadNeeded = true + } + + return nil +} + +func (c *DownloadCommand) OperationType() string { + return "download" +} + +func (c *DownloadCommand) ItemType() string { + return c.Item.Type +} + +func (c *DownloadCommand) Detail() string { + i := c.Item + + version := color.YellowString(i.Version) + + if i.State.Downloaded { + version = c.Item.State.LocalVersion + " -> " + color.YellowString(i.Version) + } + + return colorizeItemName(c.Item.Name) + " (" + version + ")" +} diff --git a/pkg/hubops/enable.go b/pkg/hubops/enable.go new file mode 100644 index 00000000000..40de40c8662 --- /dev/null +++ b/pkg/hubops/enable.go @@ -0,0 +1,113 @@ +package hubops + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// EnableCommand installs a hub item and its dependencies. +// In case this command is called during an upgrade, the sub-items list it taken from the +// latest version in the index, otherwise from the version that is currently installed. +type EnableCommand struct { + Item *cwhub.Item + Force bool + FromLatest bool +} + +func NewEnableCommand(item *cwhub.Item, force bool) *EnableCommand { + return &EnableCommand{Item: item, Force: force} +} + +func (c *EnableCommand) Prepare(plan *ActionPlan) (bool, error) { + var dependencies cwhub.Dependencies + + i := c.Item + + if c.FromLatest { + // we are upgrading + dependencies = i.LatestDependencies() + } else { + dependencies = i.CurrentDependencies() + } + + for sub := range dependencies.SubItems(plan.hub) { + if err := plan.AddCommand(NewEnableCommand(sub, c.Force)); err != nil { + return false, err + } + } + + if i.State.Installed { + return false, nil + } + + return true, nil +} + +// CreateInstallLink creates a symlink between the actual config file at hub.HubDir and hub.ConfigDir. +func CreateInstallLink(i *cwhub.Item) error { + dest, err := i.InstallPath() + if err != nil { + return err + } + + destDir := filepath.Dir(dest) + if err = os.MkdirAll(destDir, os.ModePerm); err != nil { + return fmt.Errorf("while creating %s: %w", destDir, err) + } + + if _, err = os.Lstat(dest); err == nil { + // already exists + return nil + } else if !os.IsNotExist(err) { + return fmt.Errorf("failed to stat %s: %w", dest, err) + } + + src, err := i.DownloadPath() + if err != nil { + return err + } + + if err = os.Symlink(src, dest); err != nil { + return fmt.Errorf("while creating symlink from %s to %s: %w", src, dest, err) + } + + return nil +} + +func (c *EnableCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Println("enabling " + colorizeItemName(i.FQName())) + + if !i.State.Downloaded { + // XXX: this a warning? + return fmt.Errorf("can't enable %s: not downloaded", i.FQName()) + } + + if err := CreateInstallLink(i); err != nil { + return fmt.Errorf("while enabling %s: %w", i.FQName(), err) + } + + plan.ReloadNeeded = true + + i.State.Installed = true + i.State.Tainted = false + + return nil +} + +func (c *EnableCommand) OperationType() string { + return "enable" +} + +func (c *EnableCommand) ItemType() string { + return c.Item.Type +} + +func (c *EnableCommand) Detail() string { + return colorizeItemName(c.Item.Name) +} diff --git a/pkg/hubops/plan.go b/pkg/hubops/plan.go new file mode 100644 index 00000000000..eb99056fab3 --- /dev/null +++ b/pkg/hubops/plan.go @@ -0,0 +1,250 @@ +package hubops + +import ( + "context" + "fmt" + "os" + "slices" + "strings" + + "github.com/AlecAivazis/survey/v2" + "github.com/fatih/color" + isatty "github.com/mattn/go-isatty" + + "github.com/crowdsecurity/go-cs-lib/slicetools" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// Command represents an operation that can be performed on a CrowdSec hub item. +// +// Each concrete implementation defines a Prepare() method to check for errors and preconditions, +// decide which sub-commands are required (like installing dependencies) and add them to the action plan. +type Command interface { + // Prepare sets up the command for execution within the given + // ActionPlan. It may add additional commands to the ActionPlan based + // on dependencies or prerequisites. Returns a boolean indicating + // whether the command execution should be skipped (it can be + // redundant, like installing something that is already installed) and + // an error if the preparation failed. + // NOTE: Returning an error will bubble up from the plan.AddCommand() method, + // but Prepare() might already have modified the plan's command slice. + Prepare(*ActionPlan) (bool, error) + + // Run executes the command within the provided context and ActionPlan. + // It performs the actual operation and returns an error if execution fails. + // NOTE: Returning an error will currently stop the execution of the action plan. + Run(ctx context.Context, plan *ActionPlan) error + + // OperationType returns a unique string representing the type of operation to perform + // (e.g., "download", "enable"). + OperationType() string + + // ItemType returns the type of item the operation is performed on + // (e.g., "collections"). Used in confirmation prompt and dry-run. + ItemType() string + + // Detail provides further details on the operation, + // such as the item's name and version. + Detail() string +} + +// UniqueKey generates a unique string key for a Command based on its operation type, item type, and detail. +// Is is used to avoid adding duplicate commands to the action plan. +func UniqueKey(c Command) string { + return fmt.Sprintf("%s:%s:%s", c.OperationType(), c.ItemType(), c.Detail()) +} + +// ActionPlan orchestrates the sequence of operations (Commands) to manage CrowdSec hub items. +type ActionPlan struct { + // hold the list of Commands to be executed as part of the action plan. + // If a command is skipped (i.e. calling Prepare() returned false), it won't be included in the slice. + commands []Command + + // Tracks unique commands + commandsTracker map[string]struct{} + + // A reference to the Hub instance, required for dependency lookup. + hub *cwhub.Hub + + // Indicates whether a reload of the CrowdSec service is required after executing the action plan. + ReloadNeeded bool +} + +func NewActionPlan(hub *cwhub.Hub) *ActionPlan { + return &ActionPlan{ + hub: hub, + commandsTracker: make(map[string]struct{}), + } +} + +func (p *ActionPlan) AddCommand(c Command) error { + ok, err := c.Prepare(p) + if err != nil { + return err + } + + if ok { + key := UniqueKey(c) + if _, exists := p.commandsTracker[key]; !exists { + p.commands = append(p.commands, c) + p.commandsTracker[key] = struct{}{} + } + } + + return nil +} + +func (p *ActionPlan) Info(msg string) { + fmt.Println(msg) +} + +func (p *ActionPlan) Warning(msg string) { + fmt.Printf("%s %s\n", color.YellowString("WARN"), msg) +} + +// Description returns a string representation of the action plan. +// If verbose is false, the operations are grouped by item type and operation type. +// If verbose is true, they are listed as they appear in the command slice. +func (p *ActionPlan) Description(verbose bool) string { + if verbose { + return p.verboseDescription() + } + + return p.compactDescription() +} + +func (p *ActionPlan) verboseDescription() string { + sb := strings.Builder{} + + // Here we display the commands in the order they will be executed. + for _, cmd := range p.commands { + sb.WriteString(colorizeOpType(cmd.OperationType()) + " " + cmd.ItemType() + ":" + cmd.Detail() + "\n") + } + + return sb.String() +} + +// describe the operations of a given type in a compact way. +func describe(opType string, desc map[string]map[string][]string, sb *strings.Builder) { + if _, ok := desc[opType]; !ok { + return + } + + sb.WriteString(colorizeOpType(opType) + "\n") + + // iterate cwhub.ItemTypes in reverse order, so we have collections first + for _, itemType := range slicetools.Backward(cwhub.ItemTypes) { + if desc[opType][itemType] == nil { + continue + } + + details := desc[opType][itemType] + // Sorting for user convenience, but it's not the same order the commands will be carried out. + slices.Sort(details) + + if itemType != "" { + sb.WriteString(" " + itemType + ": ") + } + + if len(details) != 0 { + sb.WriteString(strings.Join(details, ", ")) + sb.WriteString("\n") + } + } +} + +func (p *ActionPlan) compactDescription() string { + desc := make(map[string]map[string][]string) + + for _, cmd := range p.commands { + opType := cmd.OperationType() + itemType := cmd.ItemType() + detail := cmd.Detail() + + if _, ok := desc[opType]; !ok { + desc[opType] = make(map[string][]string) + } + + desc[opType][itemType] = append(desc[opType][itemType], detail) + } + + sb := strings.Builder{} + + // Enforce presentation order. + + describe("download", desc, &sb) + delete(desc, "download") + describe("enable", desc, &sb) + delete(desc, "enable") + describe("disable", desc, &sb) + delete(desc, "disable") + describe("remove", desc, &sb) + delete(desc, "remove") + + for optype := range desc { + describe(optype, desc, &sb) + } + + return sb.String() +} + +func (p *ActionPlan) Confirm(verbose bool) (bool, error) { + if !isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()) { + return true, nil + } + + fmt.Println("The following actions will be performed:\n" + p.Description(verbose)) + + var answer bool + + prompt := &survey.Confirm{ + Message: "Do you want to continue?", + Default: true, + } + + if err := survey.AskOne(prompt, &answer); err != nil { + return false, err + } + + fmt.Println() + + return answer, nil +} + +func (p *ActionPlan) Execute(ctx context.Context, confirm bool, dryRun bool, verbose bool) error { + var err error + + if len(p.commands) == 0 { + // XXX: show skipped commands, warnings? + fmt.Println("Nothing to do.") + return nil + } + + if dryRun { + fmt.Println("Action plan:\n" + p.Description(verbose)) + fmt.Println("Dry run, no action taken.") + + return nil + } + + if !confirm { + confirm, err = p.Confirm(verbose) + if err != nil { + return err + } + } + + if !confirm { + fmt.Println("Operation canceled.") + return nil + } + + for _, c := range p.commands { + if err := c.Run(ctx, p); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/hubops/purge.go b/pkg/hubops/purge.go new file mode 100644 index 00000000000..3b415b27428 --- /dev/null +++ b/pkg/hubops/purge.go @@ -0,0 +1,88 @@ +package hubops + +import ( + "context" + "fmt" + "os" + + "github.com/crowdsecurity/crowdsec/pkg/cwhub" +) + +// PurgeCommand removes the downloaded content of a hub item, effectively +// removing it from the local system. This command also removes the sub-items +// but not the associated data files. +type PurgeCommand struct { + Item *cwhub.Item + Force bool +} + +func NewPurgeCommand(item *cwhub.Item, force bool) *PurgeCommand { + return &PurgeCommand{Item: item, Force: force} +} + +func (c *PurgeCommand) Prepare(plan *ActionPlan) (bool, error) { + i := c.Item + + if i.State.IsLocal() { + // not downloaded, by definition + return false, nil + } + + if i.State.Tainted && !c.Force { + return false, fmt.Errorf("%s is tainted, use '--force' to remove", i.Name) + } + + subsToRemove, err := i.SafeToRemoveDeps() + if err != nil { + return false, err + } + + for _, sub := range subsToRemove { + if err := plan.AddCommand(NewPurgeCommand(sub, c.Force)); err != nil { + return false, err + } + } + + if !i.State.Downloaded { + return false, nil + } + + return true, nil +} + +func (c *PurgeCommand) Run(ctx context.Context, plan *ActionPlan) error { + i := c.Item + + fmt.Println("purging " + colorizeItemName(i.FQName())) + + src, err := i.DownloadPath() + if err != nil { + return err + } + + if err := os.Remove(src); err != nil { + if os.IsNotExist(err) { + return nil + } + + return fmt.Errorf("while removing file: %w", err) + } + + i.State.Downloaded = false + i.State.Tainted = false + i.State.UpToDate = false + + return nil +} + +func (c *PurgeCommand) OperationType() string { + return "purge (delete source)" +} + +func (c *PurgeCommand) ItemType() string { + return c.Item.Type +} + +func (c *PurgeCommand) Detail() string { + return colorizeItemName(c.Item.Name) +} diff --git a/pkg/hubtest/hubtest.go b/pkg/hubtest/hubtest.go index 93f5abaa879..6e5a11fff10 100644 --- a/pkg/hubtest/hubtest.go +++ b/pkg/hubtest/hubtest.go @@ -14,8 +14,8 @@ type HubTest struct { CrowdSecPath string CscliPath string HubPath string - HubTestPath string //generic parser/scenario tests .tests - HubAppsecTestPath string //dir specific to appsec tests .appsec-tests + HubTestPath string // generic parser/scenario tests .tests + HubAppsecTestPath string // dir specific to appsec tests .appsec-tests HubIndexFile string TemplateConfigPath string TemplateProfilePath string @@ -25,8 +25,8 @@ type HubTest struct { NucleiTargetHost string AppSecHost string - HubIndex *cwhub.Hub - Tests []*HubTestItem + HubIndex *cwhub.Hub + Tests []*HubTestItem } const ( @@ -93,7 +93,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT InstallDataDir: HubTestPath, } - hub, err := cwhub.NewHub(local, nil, nil) + hub, err := cwhub.NewHub(local, nil) if err != nil { return HubTest{}, err } @@ -130,7 +130,7 @@ func NewHubTest(hubPath string, crowdsecPath string, cscliPath string, isAppsecT InstallDataDir: HubTestPath, } - hub, err := cwhub.NewHub(local, nil, nil) + hub, err := cwhub.NewHub(local, nil) if err != nil { return HubTest{}, err } diff --git a/pkg/hubtest/hubtest_item.go b/pkg/hubtest/hubtest_item.go index bc9c8955d0d..75895dc729b 100644 --- a/pkg/hubtest/hubtest_item.go +++ b/pkg/hubtest/hubtest_item.go @@ -15,6 +15,7 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/csconfig" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" "github.com/crowdsecurity/crowdsec/pkg/parser" ) @@ -211,7 +212,7 @@ func (t *HubTestItem) InstallHub() error { } // load installed hub - hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil, nil) + hub, err := cwhub.NewHub(t.RuntimeHubConfig, nil) if err != nil { return err } @@ -224,7 +225,7 @@ func (t *HubTestItem) InstallHub() error { // install data for parsers if needed for _, item := range hub.GetInstalledByType(cwhub.PARSERS, true) { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } @@ -233,7 +234,7 @@ func (t *HubTestItem) InstallHub() error { // install data for scenarios if needed for _, item := range hub.GetInstalledByType(cwhub.SCENARIOS, true) { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } @@ -242,7 +243,7 @@ func (t *HubTestItem) InstallHub() error { // install data for postoverflows if needed for _, item := range hub.GetInstalledByType(cwhub.POSTOVERFLOWS, true) { - if err := item.DownloadDataIfNeeded(ctx, true); err != nil { + if _, err := hubops.DownloadDataIfNeeded(ctx, hub, item, true); err != nil { return fmt.Errorf("unable to download data for parser '%s': %+v", item.Name, err) } @@ -299,7 +300,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { crowdsecDaemon.Start() // wait for the appsec port to be available - if _, err := IsAlive(t.AppSecHost); err != nil { + if _, err = IsAlive(t.AppSecHost); err != nil { crowdsecLog, err2 := os.ReadFile(crowdsecLogFile) if err2 != nil { log.Errorf("unable to read crowdsec log file '%s': %s", crowdsecLogFile, err) @@ -318,7 +319,7 @@ func (t *HubTestItem) RunWithNucleiTemplate() error { } nucleiTargetHost := nucleiTargetParsedURL.Host - if _, err := IsAlive(nucleiTargetHost); err != nil { + if _, err = IsAlive(nucleiTargetHost); err != nil { return fmt.Errorf("target is down: %w", err) } @@ -381,7 +382,7 @@ func createDirs(dirs []string) error { return nil } -func (t *HubTestItem) RunWithLogFile() error { +func (t *HubTestItem) RunWithLogFile(patternDir string) error { testPath := filepath.Join(t.HubTestPath, t.Name) if _, err := os.Stat(testPath); os.IsNotExist(err) { return fmt.Errorf("test '%s' doesn't exist in '%s', exiting", t.Name, t.HubTestPath) @@ -416,11 +417,9 @@ func (t *HubTestItem) RunWithLogFile() error { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateSimulationPath, t.RuntimeSimulationFilePath, err) } - crowdsecPatternsFolder := csconfig.DefaultConfigPath("patterns") - // copy template patterns folder to runtime folder - if err = CopyDir(crowdsecPatternsFolder, t.RuntimePatternsPath); err != nil { - return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", crowdsecPatternsFolder, t.RuntimePatternsPath, err) + if err = CopyDir(patternDir, t.RuntimePatternsPath); err != nil { + return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", patternDir, t.RuntimePatternsPath, err) } // install the hub in the runtime folder @@ -565,7 +564,7 @@ func (t *HubTestItem) RunWithLogFile() error { return nil } -func (t *HubTestItem) Run() error { +func (t *HubTestItem) Run(patternDir string) error { var err error t.Success = false @@ -595,11 +594,9 @@ func (t *HubTestItem) Run() error { return fmt.Errorf("unable to copy '%s' to '%s': %v", t.TemplateSimulationPath, t.RuntimeSimulationFilePath, err) } - crowdsecPatternsFolder := csconfig.DefaultConfigPath("patterns") - // copy template patterns folder to runtime folder - if err = CopyDir(crowdsecPatternsFolder, t.RuntimePatternsPath); err != nil { - return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", crowdsecPatternsFolder, t.RuntimePatternsPath, err) + if err = CopyDir(patternDir, t.RuntimePatternsPath); err != nil { + return fmt.Errorf("unable to copy 'patterns' from '%s' to '%s': %w", patternDir, t.RuntimePatternsPath, err) } // create the appsec-configs dir @@ -633,9 +630,12 @@ func (t *HubTestItem) Run() error { } if t.Config.LogFile != "" { - return t.RunWithLogFile() - } else if t.Config.NucleiTemplate != "" { + return t.RunWithLogFile(patternDir) + } + + if t.Config.NucleiTemplate != "" { return t.RunWithNucleiTemplate() } + return fmt.Errorf("log file or nuclei template must be set in '%s'", t.Name) } diff --git a/pkg/hubtest/parser_assert.go b/pkg/hubtest/parser_assert.go index be4fdbdb5e6..90d952506d1 100644 --- a/pkg/hubtest/parser_assert.go +++ b/pkg/hubtest/parser_assert.go @@ -270,7 +270,7 @@ func (p *ParserAssert) AutoGenParserAssert() string { continue } - base := fmt.Sprintf(`results["%s"]["%s"][%d].Evt.Unmarshaled["%s"]`, stage, parser, pidx, ukey) + base := fmt.Sprintf("results[%q][%q][%d].Evt.Unmarshaled[%q]", stage, parser, pidx, ukey) for _, line := range p.buildUnmarshaledAssert(base, uval) { ret += line @@ -295,11 +295,11 @@ func (p *ParserAssert) buildUnmarshaledAssert(ekey string, eval interface{}) []s switch val := eval.(type) { case map[string]interface{}: for k, v := range val { - ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf(`%s["%s"]`, ekey, k), v)...) + ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf("%s[%q]", ekey, k), v)...) } case map[interface{}]interface{}: for k, v := range val { - ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf(`%s["%s"]`, ekey, k), v)...) + ret = append(ret, p.buildUnmarshaledAssert(fmt.Sprintf("%s[%q]", ekey, k), v)...) } case []interface{}: case string: diff --git a/pkg/leakybucket/bayesian.go b/pkg/leakybucket/bayesian.go index 357d51f597b..30e1b396ef8 100644 --- a/pkg/leakybucket/bayesian.go +++ b/pkg/leakybucket/bayesian.go @@ -31,9 +31,9 @@ type BayesianBucket struct { DumbProcessor } -func updateProbability(prior, probGivenEvil, ProbGivenBenign float32) float32 { +func updateProbability(prior, probGivenEvil, probGivenBenign float32) float32 { numerator := probGivenEvil * prior - denominator := numerator + ProbGivenBenign*(1-prior) + denominator := numerator + probGivenBenign*(1-prior) return numerator / denominator } diff --git a/pkg/leakybucket/blackhole.go b/pkg/leakybucket/blackhole.go index b12f169acd9..95ea18f723b 100644 --- a/pkg/leakybucket/blackhole.go +++ b/pkg/leakybucket/blackhole.go @@ -21,7 +21,6 @@ type Blackhole struct { func NewBlackhole(bucketFactory *BucketFactory) (*Blackhole, error) { duration, err := time.ParseDuration(bucketFactory.Blackhole) if err != nil { - bucketFactory.logger.Warning("Blackhole duration not valid, using 1h") return nil, fmt.Errorf("blackhole duration not valid '%s'", bucketFactory.Blackhole) } return &Blackhole{ @@ -49,7 +48,6 @@ func (bl *Blackhole) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, tmp = append(tmp, element) } else { leaky.logger.Debugf("%s left blackhole %s ago", element.key, leaky.Ovflw_ts.Sub(element.expiration)) - } } bl.hiddenKeys = tmp @@ -64,5 +62,4 @@ func (bl *Blackhole) OnBucketOverflow(bucketFactory *BucketFactory) func(*Leaky, leaky.logger.Debugf("Adding overflow to blackhole (%s)", leaky.First_ts) return alert, queue } - } diff --git a/pkg/leakybucket/bucket.go b/pkg/leakybucket/bucket.go index e981551af8f..e7ea6e3e240 100644 --- a/pkg/leakybucket/bucket.go +++ b/pkg/leakybucket/bucket.go @@ -204,7 +204,6 @@ func FromFactory(bucketFactory BucketFactory) *Leaky { /* for now mimic a leak routine */ //LeakRoutine us the life of a bucket. It dies when the bucket underflows or overflows func LeakRoutine(leaky *Leaky) error { - var ( durationTickerChan = make(<-chan time.Time) durationTicker *time.Ticker @@ -317,7 +316,7 @@ func LeakRoutine(leaky *Leaky) error { alert, err = NewAlert(leaky, ofw) if err != nil { - log.Errorf("%s", err) + log.Error(err) } for _, f := range leaky.BucketConfig.processors { alert, ofw = f.OnBucketOverflow(leaky.BucketConfig)(leaky, alert, ofw) diff --git a/pkg/leakybucket/buckets.go b/pkg/leakybucket/buckets.go index cfe8d7c302e..72948da1ad7 100644 --- a/pkg/leakybucket/buckets.go +++ b/pkg/leakybucket/buckets.go @@ -25,5 +25,4 @@ func NewBuckets() *Buckets { func GetKey(bucketCfg BucketFactory, stackkey string) string { return fmt.Sprintf("%x", sha1.Sum([]byte(bucketCfg.Filter+stackkey+bucketCfg.Name))) - } diff --git a/pkg/leakybucket/buckets_test.go b/pkg/leakybucket/buckets_test.go index 1da906cb555..90a751160cb 100644 --- a/pkg/leakybucket/buckets_test.go +++ b/pkg/leakybucket/buckets_test.go @@ -46,7 +46,7 @@ func TestBucket(t *testing.T) { InstallDataDir: testdata, } - hub, err := cwhub.NewHub(hubCfg, nil, nil) + hub, err := cwhub.NewHub(hubCfg, nil) require.NoError(t, err) err = hub.Load() @@ -139,14 +139,25 @@ func testOneBucket(t *testing.T, hub *cwhub.Hub, dir string, tomb *tomb.Tomb) er t.Fatalf("failed to parse %s : %s", stagecfg, err) } - files := []string{} + scenarios := []*cwhub.Item{} + for _, x := range stages { - files = append(files, x.Filename) + // XXX: LoadBuckets should take an interface, BucketProvider ScenarioProvider or w/e + item := &cwhub.Item{ + Name: x.Filename, + State: cwhub.ItemState{ + LocalVersion: "", + LocalPath: x.Filename, + LocalHash: "", + }, + } + + scenarios = append(scenarios, item) } cscfg := &csconfig.CrowdsecServiceCfg{} - holders, response, err := LoadBuckets(cscfg, hub, files, tomb, buckets, false) + holders, response, err := LoadBuckets(cscfg, hub, scenarios, tomb, buckets, false) if err != nil { t.Fatalf("failed loading bucket : %s", err) } @@ -184,7 +195,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res } dec := json.NewDecoder(yamlFile) dec.DisallowUnknownFields() - //dec.SetStrict(true) + // dec.SetStrict(true) tf := TestFile{} err = dec.Decode(&tf) if err != nil { @@ -196,7 +207,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res } var latest_ts time.Time for _, in := range tf.Lines { - //just to avoid any race during ingestion of funny scenarios + // just to avoid any race during ingestion of funny scenarios time.Sleep(50 * time.Millisecond) var ts time.Time @@ -226,7 +237,7 @@ func testFile(t *testing.T, file string, bs string, holders []BucketFactory, res time.Sleep(1 * time.Second) - //Read results from chan + // Read results from chan POLL_AGAIN: fails := 0 for fails < 2 { @@ -287,37 +298,37 @@ POLL_AGAIN: log.Tracef("Checking next expected result.") - //empty overflow + // empty overflow if out.Overflow.Alert == nil && expected.Overflow.Alert == nil { - //match stuff + // match stuff } else { if out.Overflow.Alert == nil || expected.Overflow.Alert == nil { log.Printf("Here ?") continue } - //Scenario + // Scenario if *out.Overflow.Alert.Scenario != *expected.Overflow.Alert.Scenario { log.Errorf("(scenario) %v != %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) continue } log.Infof("(scenario) %v == %v", *out.Overflow.Alert.Scenario, *expected.Overflow.Alert.Scenario) - //EventsCount + // EventsCount if *out.Overflow.Alert.EventsCount != *expected.Overflow.Alert.EventsCount { log.Errorf("(EventsCount) %d != %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) continue } log.Infof("(EventsCount) %d == %d", *out.Overflow.Alert.EventsCount, *expected.Overflow.Alert.EventsCount) - //Sources + // Sources if !reflect.DeepEqual(out.Overflow.Sources, expected.Overflow.Sources) { log.Errorf("(Sources %s != %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) continue } log.Infof("(Sources: %s == %s)", spew.Sdump(out.Overflow.Sources), spew.Sdump(expected.Overflow.Sources)) } - //Events + // Events // if !reflect.DeepEqual(out.Overflow.Alert.Events, expected.Overflow.Alert.Events) { // log.Errorf("(Events %s != %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) // valid = false @@ -326,10 +337,10 @@ POLL_AGAIN: // log.Infof("(Events: %s == %s)", spew.Sdump(out.Overflow.Alert.Events), spew.Sdump(expected.Overflow.Alert.Events)) // } - //CheckFailed: + // CheckFailed: log.Warningf("The test is valid, remove entry %d from expects, and %d from t.Results", eidx, ridx) - //don't do this at home : delete current element from list and redo + // don't do this at home : delete current element from list and redo results[eidx] = results[len(results)-1] results = results[:len(results)-1] tf.Results[ridx] = tf.Results[len(tf.Results)-1] diff --git a/pkg/leakybucket/conditional.go b/pkg/leakybucket/conditional.go index a203a639743..b3a84b07c21 100644 --- a/pkg/leakybucket/conditional.go +++ b/pkg/leakybucket/conditional.go @@ -11,8 +11,10 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var conditionalExprCache map[string]vm.Program -var conditionalExprCacheLock sync.Mutex +var ( + conditionalExprCache map[string]vm.Program + conditionalExprCacheLock sync.Mutex +) type ConditionalOverflow struct { ConditionalFilter string diff --git a/pkg/leakybucket/manager_load.go b/pkg/leakybucket/manager_load.go index b8310b8cb17..13ce1df75ae 100644 --- a/pkg/leakybucket/manager_load.go +++ b/pkg/leakybucket/manager_load.go @@ -7,7 +7,6 @@ import ( "io" "os" "path/filepath" - "strings" "sync" "time" @@ -201,44 +200,41 @@ func ValidateFactory(bucketFactory *BucketFactory) error { return fmt.Errorf("unknown bucket type '%s'", bucketFactory.Type) } - switch bucketFactory.ScopeType.Scope { - case types.Undefined: + return compileScopeFilter(bucketFactory) +} + +func compileScopeFilter(bucketFactory *BucketFactory) error { + if bucketFactory.ScopeType.Scope == types.Undefined { bucketFactory.ScopeType.Scope = types.Ip - case types.Ip: - case types.Range: - var ( - runTimeFilter *vm.Program - err error - ) + } + if bucketFactory.ScopeType.Scope == types.Ip { if bucketFactory.ScopeType.Filter != "" { - if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...); err != nil { - return fmt.Errorf("error compiling the scope filter: %w", err) - } - - bucketFactory.ScopeType.RunTimeFilter = runTimeFilter + return errors.New("filter is not allowed for IP scope") } - default: - // Compile the scope filter - var ( - runTimeFilter *vm.Program - err error - ) + return nil + } - if bucketFactory.ScopeType.Filter != "" { - if runTimeFilter, err = expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...); err != nil { - return fmt.Errorf("error compiling the scope filter: %w", err) - } + if bucketFactory.ScopeType.Scope == types.Range && bucketFactory.ScopeType.Filter == "" { + return nil + } - bucketFactory.ScopeType.RunTimeFilter = runTimeFilter - } + if bucketFactory.ScopeType.Filter == "" { + return errors.New("filter is mandatory for non-IP, non-Range scope") + } + + runTimeFilter, err := expr.Compile(bucketFactory.ScopeType.Filter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("error compiling the scope filter: %w", err) } + bucketFactory.ScopeType.RunTimeFilter = runTimeFilter + return nil } -func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []string, tomb *tomb.Tomb, buckets *Buckets, orderEvent bool) ([]BucketFactory, chan types.Event, error) { +func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, scenarios []*cwhub.Item, tomb *tomb.Tomb, buckets *Buckets, orderEvent bool) ([]BucketFactory, chan types.Event, error) { var ( ret = []BucketFactory{} response chan types.Event @@ -246,18 +242,15 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str response = make(chan types.Event, 1) - for _, f := range files { - log.Debugf("Loading '%s'", f) + for _, item := range scenarios { + log.Debugf("Loading '%s'", item.State.LocalPath) - if !strings.HasSuffix(f, ".yaml") && !strings.HasSuffix(f, ".yml") { - log.Debugf("Skipping %s : not a yaml file", f) - continue - } + itemPath := item.State.LocalPath // process the yaml - bucketConfigurationFile, err := os.Open(f) + bucketConfigurationFile, err := os.Open(itemPath) if err != nil { - log.Errorf("Can't access leaky configuration file %s", f) + log.Errorf("Can't access leaky configuration file %s", itemPath) return nil, nil, err } @@ -271,8 +264,8 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str err = dec.Decode(&bucketFactory) if err != nil { if !errors.Is(err, io.EOF) { - log.Errorf("Bad yaml in %s: %v", f, err) - return nil, nil, fmt.Errorf("bad yaml in %s: %w", f, err) + log.Errorf("Bad yaml in %s: %v", itemPath, err) + return nil, nil, fmt.Errorf("bad yaml in %s: %w", itemPath, err) } log.Tracef("End of yaml file") @@ -288,7 +281,7 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str } // check compat if bucketFactory.FormatVersion == "" { - log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, f) + log.Tracef("no version in %s : %s, assuming '1.0'", bucketFactory.Name, itemPath) bucketFactory.FormatVersion = "1.0" } @@ -302,22 +295,17 @@ func LoadBuckets(cscfg *csconfig.CrowdsecServiceCfg, hub *cwhub.Hub, files []str continue } - bucketFactory.Filename = filepath.Clean(f) + bucketFactory.Filename = filepath.Clean(itemPath) bucketFactory.BucketName = seed.Generate() bucketFactory.ret = response - hubItem := hub.GetItemByPath(bucketFactory.Filename) - if hubItem == nil { - log.Errorf("scenario %s (%s) could not be found in hub (ignore if in unit tests)", bucketFactory.Name, bucketFactory.Filename) - } else { - if cscfg.SimulationConfig != nil { - bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(hubItem.Name) - } - - bucketFactory.ScenarioVersion = hubItem.State.LocalVersion - bucketFactory.hash = hubItem.State.LocalHash + if cscfg.SimulationConfig != nil { + bucketFactory.Simulated = cscfg.SimulationConfig.IsSimulated(bucketFactory.Name) } + bucketFactory.ScenarioVersion = item.State.LocalVersion + bucketFactory.hash = item.State.LocalHash + bucketFactory.wgDumpState = buckets.wgDumpState bucketFactory.wgPour = buckets.wgPour @@ -348,7 +336,7 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Debug { clog := log.New() - if err := types.ConfigureLogger(clog); err != nil { + if err = types.ConfigureLogger(clog); err != nil { return fmt.Errorf("while creating bucket-specific logger: %w", err) } @@ -417,11 +405,22 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.Distinct != "" { bucketFactory.logger.Tracef("Adding a non duplicate filter") bucketFactory.processors = append(bucketFactory.processors, &Uniq{}) + bucketFactory.logger.Infof("Compiling distinct '%s'", bucketFactory.Distinct) + //we're compiling and discarding the expression to be able to detect it during loading + _, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("invalid distinct '%s' in %s: %w", bucketFactory.Distinct, bucketFactory.Filename, err) + } } if bucketFactory.CancelOnFilter != "" { bucketFactory.logger.Tracef("Adding a cancel_on filter") bucketFactory.processors = append(bucketFactory.processors, &CancelOnFilter{}) + //we're compiling and discarding the expression to be able to detect it during loading + _, err = expr.Compile(bucketFactory.CancelOnFilter, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("invalid cancel_on '%s' in %s: %w", bucketFactory.CancelOnFilter, bucketFactory.Filename, err) + } } if bucketFactory.OverflowFilter != "" { @@ -451,6 +450,11 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { if bucketFactory.ConditionalOverflow != "" { bucketFactory.logger.Tracef("Adding conditional overflow") bucketFactory.processors = append(bucketFactory.processors, &ConditionalOverflow{}) + //we're compiling and discarding the expression to be able to detect it during loading + _, err = expr.Compile(bucketFactory.ConditionalOverflow, exprhelpers.GetExprOptions(map[string]interface{}{"queue": &types.Queue{}, "leaky": &Leaky{}, "evt": &types.Event{}})...) + if err != nil { + return fmt.Errorf("invalid condition '%s' in %s: %w", bucketFactory.ConditionalOverflow, bucketFactory.Filename, err) + } } if bucketFactory.BayesianThreshold != 0 { @@ -470,7 +474,9 @@ func LoadBucket(bucketFactory *BucketFactory, tomb *tomb.Tomb) error { } if data.Type == "regexp" { // cache only makes sense for regexp - exprhelpers.RegexpCacheInit(data.DestPath, *data) + if err := exprhelpers.RegexpCacheInit(data.DestPath, *data); err != nil { + bucketFactory.logger.Error(err.Error()) + } } } @@ -496,7 +502,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac return fmt.Errorf("can't parse state file %s: %w", file, err) } - for k, v := range state { + for k := range state { var tbucket *Leaky log.Debugf("Reloading bucket %s", k) @@ -509,30 +515,30 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac found := false for _, h := range bucketFactories { - if h.Name != v.Name { + if h.Name != state[k].Name { continue } log.Debugf("found factory %s/%s -> %s", h.Author, h.Name, h.Description) // check in which mode the bucket was - if v.Mode == types.TIMEMACHINE { + if state[k].Mode == types.TIMEMACHINE { tbucket = NewTimeMachine(h) - } else if v.Mode == types.LIVE { + } else if state[k].Mode == types.LIVE { tbucket = NewLeaky(h) } else { - log.Errorf("Unknown bucket type : %d", v.Mode) + log.Errorf("Unknown bucket type : %d", state[k].Mode) } /*Trying to restore queue state*/ - tbucket.Queue = v.Queue + tbucket.Queue = state[k].Queue /*Trying to set the limiter to the saved values*/ - tbucket.Limiter.Load(v.SerializedState) + tbucket.Limiter.Load(state[k].SerializedState) tbucket.In = make(chan *types.Event) tbucket.Mapkey = k tbucket.Signal = make(chan bool, 1) - tbucket.First_ts = v.First_ts - tbucket.Last_ts = v.Last_ts - tbucket.Ovflw_ts = v.Ovflw_ts - tbucket.Total_count = v.Total_count + tbucket.First_ts = state[k].First_ts + tbucket.Last_ts = state[k].Last_ts + tbucket.Ovflw_ts = state[k].Ovflw_ts + tbucket.Total_count = state[k].Total_count buckets.Bucket_map.Store(k, tbucket) h.tomb.Go(func() error { return LeakRoutine(tbucket) @@ -545,7 +551,7 @@ func LoadBucketsState(file string, buckets *Buckets, bucketFactories []BucketFac } if !found { - return fmt.Errorf("unable to find holder for bucket %s: %s", k, spew.Sdump(v)) + return fmt.Errorf("unable to find holder for bucket %s: %s", k, spew.Sdump(state[k])) } } diff --git a/pkg/leakybucket/manager_load_test.go b/pkg/leakybucket/manager_load_test.go index 513f11ff373..6b40deb8c9e 100644 --- a/pkg/leakybucket/manager_load_test.go +++ b/pkg/leakybucket/manager_load_test.go @@ -51,93 +51,100 @@ func TestBadBucketsConfig(t *testing.T) { } func TestLeakyBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - //leaky with bad capacity + CfgTests := []cfgTest{ + // leaky with bad capacity {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 0}, false, false}, - //leaky with empty leakspeed + // leaky with empty leakspeed {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1}, false, false}, - //leaky with missing filter + // leaky with missing filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s"}, false, true}, - //leaky with invalid leakspeed + // leaky with invalid leakspeed {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "abs", Filter: "true"}, false, false}, - //leaky with valid filter + // leaky with valid filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true"}, true, true}, - //leaky with invalid filter + // leaky with invalid filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "xu"}, false, true}, - //leaky with valid filter + // leaky with invalid uniq + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", Distinct: "foo"}, false, true}, + // leaky with valid uniq + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", Distinct: "evt.Parsed.foobar"}, true, true}, + // leaky with valid filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true"}, true, true}, - //leaky with bad overflow filter + // leaky with bad overflow filter {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", OverflowFilter: "xu"}, false, true}, + // leaky with valid overflow filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", OverflowFilter: "true"}, true, true}, + // leaky with invalid cancel_on filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", CancelOnFilter: "xu"}, false, true}, + // leaky with valid cancel_on filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", CancelOnFilter: "true"}, true, true}, + // leaky with invalid conditional overflow filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", ConditionalOverflow: "xu"}, false, true}, + // leaky with valid conditional overflow filter + {BucketFactory{Name: "test", Description: "test1", Type: "leaky", Capacity: 1, LeakSpeed: "1s", Filter: "true", ConditionalOverflow: "true"}, true, true}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestBlackholeConfig(t *testing.T) { - var CfgTests = []cfgTest{ - //basic bh + CfgTests := []cfgTest{ + // basic bh {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true", Blackhole: "15s"}, true, true}, - //bad bh + // bad bh {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true", Blackhole: "abc"}, false, true}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestTriggerBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - //basic valid counter + CfgTests := []cfgTest{ + // basic valid counter {BucketFactory{Name: "test", Description: "test1", Type: "trigger", Filter: "true"}, true, true}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestCounterBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - - //basic valid counter + CfgTests := []cfgTest{ + // basic valid counter {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Duration: "5s", Filter: "true"}, true, true}, - //missing duration + // missing duration {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Filter: "true"}, false, false}, - //bad duration + // bad duration {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: -1, Duration: "abc", Filter: "true"}, false, false}, - //capacity must be -1 + // capacity must be -1 {BucketFactory{Name: "test", Description: "test1", Type: "counter", Capacity: 0, Duration: "5s", Filter: "true"}, false, false}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } func TestBayesianBucketsConfig(t *testing.T) { - var CfgTests = []cfgTest{ - - //basic valid counter + CfgTests := []cfgTest{ + // basic valid counter {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 0.5, BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, true, true}, - //bad capacity + // bad capacity {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: 1, Filter: "true", BayesianPrior: 0.5, BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //missing prior + // missing prior {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //missing threshold + // missing threshold {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //bad prior + // bad prior {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 1.5, BayesianThreshold: 0.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, - //bad threshold + // bad threshold {BucketFactory{Name: "test", Description: "test1", Type: "bayesian", Capacity: -1, Filter: "true", BayesianPrior: 0.5, BayesianThreshold: 1.5, BayesianConditions: []RawBayesianCondition{{ConditionalFilterName: "true", ProbGivenEvil: 0.5, ProbGivenBenign: 0.5}}}, false, false}, } if err := runTest(CfgTests); err != nil { t.Fatalf("%s", err) } - } diff --git a/pkg/leakybucket/manager_run.go b/pkg/leakybucket/manager_run.go index 2858d8b5635..e6712e6e47e 100644 --- a/pkg/leakybucket/manager_run.go +++ b/pkg/leakybucket/manager_run.go @@ -17,9 +17,11 @@ import ( "github.com/crowdsecurity/crowdsec/pkg/types" ) -var serialized map[string]Leaky -var BucketPourCache map[string][]types.Event -var BucketPourTrack bool +var ( + serialized map[string]Leaky + BucketPourCache map[string][]types.Event + BucketPourTrack bool +) /* The leaky routines lifecycle are based on "real" time. @@ -243,7 +245,6 @@ func PourItemToBucket(bucket *Leaky, holder BucketFactory, buckets *Buckets, par } func LoadOrStoreBucketFromHolder(partitionKey string, buckets *Buckets, holder BucketFactory, expectMode int) (*Leaky, error) { - biface, ok := buckets.Bucket_map.Load(partitionKey) /* the bucket doesn't exist, create it !*/ @@ -283,9 +284,7 @@ func LoadOrStoreBucketFromHolder(partitionKey string, buckets *Buckets, holder B var orderEvent map[string]*sync.WaitGroup func PourItemToHolders(parsed types.Event, holders []BucketFactory, buckets *Buckets) (bool, error) { - var ( - ok, condition, poured bool - ) + var ok, condition, poured bool if BucketPourTrack { if BucketPourCache == nil { diff --git a/pkg/leakybucket/overflow_filter.go b/pkg/leakybucket/overflow_filter.go index 01dd491ed41..b37e431fadf 100644 --- a/pkg/leakybucket/overflow_filter.go +++ b/pkg/leakybucket/overflow_filter.go @@ -36,10 +36,10 @@ func NewOverflowFilter(g *BucketFactory) (*OverflowFilter, error) { return &u, nil } -func (u *OverflowFilter) OnBucketOverflow(Bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *types.Queue) (types.RuntimeAlert, *types.Queue) { +func (u *OverflowFilter) OnBucketOverflow(bucket *BucketFactory) func(*Leaky, types.RuntimeAlert, *types.Queue) (types.RuntimeAlert, *types.Queue) { return func(l *Leaky, s types.RuntimeAlert, q *types.Queue) (types.RuntimeAlert, *types.Queue) { el, err := exprhelpers.Run(u.FilterRuntime, map[string]interface{}{ - "queue": q, "signal": s, "leaky": l}, l.logger, Bucket.Debug) + "queue": q, "signal": s, "leaky": l}, l.logger, bucket.Debug) if err != nil { l.logger.Errorf("Failed running overflow filter: %s", err) return s, q diff --git a/pkg/leakybucket/overflows.go b/pkg/leakybucket/overflows.go index 39b0e6a0ec4..9357caefaff 100644 --- a/pkg/leakybucket/overflows.go +++ b/pkg/leakybucket/overflows.go @@ -149,6 +149,7 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro leaky.logger.Tracef("Valid range from %s : %s", src.IP, src.Range) } } + if leaky.scopeType.Scope == types.Ip { src.Value = &src.IP } else if leaky.scopeType.Scope == types.Range { @@ -198,22 +199,24 @@ func eventSources(evt types.Event, leaky *Leaky) (map[string]models.Source, erro func EventsFromQueue(queue *types.Queue) []*models.Event { events := []*models.Event{} - for _, evt := range queue.Queue { - if evt.Meta == nil { + qEvents := queue.GetQueue() + + for idx := range qEvents { + if qEvents[idx].Meta == nil { continue } meta := models.Meta{} // we want consistence - skeys := make([]string, 0, len(evt.Meta)) - for k := range evt.Meta { + skeys := make([]string, 0, len(qEvents[idx].Meta)) + for k := range qEvents[idx].Meta { skeys = append(skeys, k) } sort.Strings(skeys) for _, k := range skeys { - v := evt.Meta[k] + v := qEvents[idx].Meta[k] subMeta := models.MetaItems0{Key: k, Value: v} meta = append(meta, &subMeta) } @@ -223,15 +226,15 @@ func EventsFromQueue(queue *types.Queue) []*models.Event { Meta: meta, } // either MarshaledTime is present and is extracted from log - if evt.MarshaledTime != "" { - tmpTimeStamp := evt.MarshaledTime + if qEvents[idx].MarshaledTime != "" { + tmpTimeStamp := qEvents[idx].MarshaledTime ovflwEvent.Timestamp = &tmpTimeStamp - } else if !evt.Time.IsZero() { // or .Time has been set during parse as time.Now().UTC() + } else if !qEvents[idx].Time.IsZero() { // or .Time has been set during parse as time.Now().UTC() ovflwEvent.Timestamp = new(string) - raw, err := evt.Time.MarshalText() + raw, err := qEvents[idx].Time.MarshalText() if err != nil { - log.Warningf("while serializing time '%s' : %s", evt.Time.String(), err) + log.Warningf("while serializing time '%s' : %s", qEvents[idx].Time.String(), err) } else { *ovflwEvent.Timestamp = string(raw) } @@ -253,8 +256,9 @@ func alertFormatSource(leaky *Leaky, queue *types.Queue) (map[string]models.Sour log.Debugf("Formatting (%s) - scope Info : scope_type:%s / scope_filter:%s", leaky.Name, leaky.scopeType.Scope, leaky.scopeType.Filter) - for _, evt := range queue.Queue { - srcs, err := SourceFromEvent(evt, leaky) + qEvents := queue.GetQueue() + for idx := range qEvents { + srcs, err := SourceFromEvent(qEvents[idx], leaky) if err != nil { return nil, "", fmt.Errorf("while extracting scope from bucket %s: %w", leaky.Name, err) } @@ -359,9 +363,7 @@ func NewAlert(leaky *Leaky, queue *types.Queue) (types.RuntimeAlert, error) { } if err := newApiAlert.Validate(strfmt.Default); err != nil { - log.Errorf("Generated alerts isn't valid") - log.Errorf("->%s", spew.Sdump(newApiAlert)) - log.Fatalf("error : %s", err) + return runtimeAlert, fmt.Errorf("invalid generated alert: %w: %s", err, spew.Sdump(newApiAlert)) } runtimeAlert.APIAlerts = append(runtimeAlert.APIAlerts, newApiAlert) diff --git a/pkg/leakybucket/processor.go b/pkg/leakybucket/processor.go index 81af3000c1c..dc5330a612e 100644 --- a/pkg/leakybucket/processor.go +++ b/pkg/leakybucket/processor.go @@ -10,8 +10,7 @@ type Processor interface { AfterBucketPour(Bucket *BucketFactory) func(types.Event, *Leaky) *types.Event } -type DumbProcessor struct { -} +type DumbProcessor struct{} func (d *DumbProcessor) OnBucketInit(bucketFactory *BucketFactory) error { return nil diff --git a/pkg/leakybucket/reset_filter.go b/pkg/leakybucket/reset_filter.go index 452ccc085b1..3b9b876aff4 100644 --- a/pkg/leakybucket/reset_filter.go +++ b/pkg/leakybucket/reset_filter.go @@ -23,10 +23,12 @@ type CancelOnFilter struct { Debug bool } -var cancelExprCacheLock sync.Mutex -var cancelExprCache map[string]struct { - CancelOnFilter *vm.Program -} +var ( + cancelExprCacheLock sync.Mutex + cancelExprCache map[string]struct { + CancelOnFilter *vm.Program + } +) func (u *CancelOnFilter) OnBucketPour(bucketFactory *BucketFactory) func(types.Event, *Leaky) *types.Event { return func(msg types.Event, leaky *Leaky) *types.Event { diff --git a/pkg/leakybucket/uniq.go b/pkg/leakybucket/uniq.go index 0cc0583390b..8a97f30b092 100644 --- a/pkg/leakybucket/uniq.go +++ b/pkg/leakybucket/uniq.go @@ -16,8 +16,10 @@ import ( // on overflow // on leak -var uniqExprCache map[string]vm.Program -var uniqExprCacheLock sync.Mutex +var ( + uniqExprCache map[string]vm.Program + uniqExprCacheLock sync.Mutex +) type Uniq struct { DistinctCompiled *vm.Program @@ -58,9 +60,6 @@ func (u *Uniq) AfterBucketPour(bucketFactory *BucketFactory) func(types.Event, * } func (u *Uniq) OnBucketInit(bucketFactory *BucketFactory) error { - var err error - var compiledExpr *vm.Program - if uniqExprCache == nil { uniqExprCache = make(map[string]vm.Program) } @@ -72,14 +71,17 @@ func (u *Uniq) OnBucketInit(bucketFactory *BucketFactory) error { } else { uniqExprCacheLock.Unlock() //release the lock during compile - compiledExpr, err = expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + compiledExpr, err := expr.Compile(bucketFactory.Distinct, exprhelpers.GetExprOptions(map[string]interface{}{"evt": &types.Event{}})...) + if err != nil { + return err + } u.DistinctCompiled = compiledExpr uniqExprCacheLock.Lock() uniqExprCache[bucketFactory.Distinct] = *compiledExpr uniqExprCacheLock.Unlock() } u.KeyCache = make(map[string]bool) - return err + return nil } // getElement computes a string from an event and a filter diff --git a/pkg/longpollclient/client.go b/pkg/longpollclient/client.go index 5c395185b20..6a668e07d84 100644 --- a/pkg/longpollclient/client.go +++ b/pkg/longpollclient/client.go @@ -10,7 +10,7 @@ import ( "net/url" "time" - "github.com/gofrs/uuid" + "github.com/google/uuid" log "github.com/sirupsen/logrus" "gopkg.in/tomb.v2" ) diff --git a/pkg/metabase/container.go b/pkg/metabase/container.go index 8b3dd4084c0..9787e535e86 100644 --- a/pkg/metabase/container.go +++ b/pkg/metabase/container.go @@ -5,8 +5,8 @@ import ( "context" "fmt" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" + typesImage "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/client" "github.com/docker/go-connections/nat" @@ -16,38 +16,40 @@ import ( ) type Container struct { - ListenAddr string - ListenPort string - SharedFolder string - Image string - Name string - ID string - CLI *client.Client - MBDBUri string - DockerGroupID string + ListenAddr string + ListenPort string + SharedFolder string + Image string + Name string + ID string + CLI *client.Client + MBDBUri string + DockerGroupID string + EnvironmentVariables []string } -func NewContainer(listenAddr string, listenPort string, sharedFolder string, containerName string, image string, mbDBURI string, dockerGroupID string) (*Container, error) { +func NewContainer(listenAddr string, listenPort string, sharedFolder string, containerName string, image string, mbDBURI string, dockerGroupID string, environmentVariables []string) (*Container, error) { cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) if err != nil { return nil, fmt.Errorf("failed to create docker client : %s", err) } return &Container{ - ListenAddr: listenAddr, - ListenPort: listenPort, - SharedFolder: sharedFolder, - Image: image, - Name: containerName, - CLI: cli, - MBDBUri: mbDBURI, - DockerGroupID: dockerGroupID, + ListenAddr: listenAddr, + ListenPort: listenPort, + SharedFolder: sharedFolder, + Image: image, + Name: containerName, + CLI: cli, + MBDBUri: mbDBURI, + DockerGroupID: dockerGroupID, + EnvironmentVariables: environmentVariables, }, nil } func (c *Container) Create() error { ctx := context.Background() log.Printf("Pulling docker image %s", c.Image) - reader, err := c.CLI.ImagePull(ctx, c.Image, types.ImagePullOptions{}) + reader, err := c.CLI.ImagePull(ctx, c.Image, typesImage.PullOptions{}) if err != nil { return fmt.Errorf("failed to pull docker image : %s", err) } @@ -79,9 +81,9 @@ func (c *Container) Create() error { }, } - env := []string{ - fmt.Sprintf("MB_DB_FILE=%s/metabase.db", containerSharedFolder), - } + env := c.EnvironmentVariables + + env = append(env, fmt.Sprintf("MB_DB_FILE=%s/metabase.db", containerSharedFolder)) if c.MBDBUri != "" { env = append(env, c.MBDBUri) } @@ -105,7 +107,7 @@ func (c *Container) Create() error { func (c *Container) Start() error { ctx := context.Background() - if err := c.CLI.ContainerStart(ctx, c.Name, types.ContainerStartOptions{}); err != nil { + if err := c.CLI.ContainerStart(ctx, c.Name, container.StartOptions{}); err != nil { return fmt.Errorf("failed while starting %s : %s", c.ID, err) } @@ -118,7 +120,7 @@ func StartContainer(name string) error { return fmt.Errorf("failed to create docker client : %s", err) } ctx := context.Background() - if err := cli.ContainerStart(ctx, name, types.ContainerStartOptions{}); err != nil { + if err := cli.ContainerStart(ctx, name, container.StartOptions{}); err != nil { return fmt.Errorf("failed while starting %s : %s", name, err) } @@ -146,7 +148,7 @@ func RemoveContainer(name string) error { } ctx := context.Background() log.Printf("Removing docker metabase %s", name) - if err := cli.ContainerRemove(ctx, name, types.ContainerRemoveOptions{}); err != nil { + if err := cli.ContainerRemove(ctx, name, container.RemoveOptions{}); err != nil { return fmt.Errorf("failed to remove container %s : %s", name, err) } return nil @@ -159,7 +161,7 @@ func RemoveImageContainer(image string) error { } ctx := context.Background() log.Printf("Removing docker image '%s'", image) - if _, err := cli.ImageRemove(ctx, image, types.ImageRemoveOptions{}); err != nil { + if _, err := cli.ImageRemove(ctx, image, typesImage.RemoveOptions{}); err != nil { return fmt.Errorf("failed to remove image container %s : %s", image, err) } return nil diff --git a/pkg/metabase/metabase.go b/pkg/metabase/metabase.go index 324a05666a1..0ebb219d211 100644 --- a/pkg/metabase/metabase.go +++ b/pkg/metabase/metabase.go @@ -30,15 +30,16 @@ type Metabase struct { } type Config struct { - Database *csconfig.DatabaseCfg `yaml:"database"` - ListenAddr string `yaml:"listen_addr"` - ListenPort string `yaml:"listen_port"` - ListenURL string `yaml:"listen_url"` - Username string `yaml:"username"` - Password string `yaml:"password"` - DBPath string `yaml:"metabase_db_path"` - DockerGroupID string `yaml:"-"` - Image string `yaml:"image"` + Database *csconfig.DatabaseCfg `yaml:"database"` + ListenAddr string `yaml:"listen_addr"` + ListenPort string `yaml:"listen_port"` + ListenURL string `yaml:"listen_url"` + Username string `yaml:"username"` + Password string `yaml:"password"` + DBPath string `yaml:"metabase_db_path"` + DockerGroupID string `yaml:"-"` + Image string `yaml:"image"` + EnvironmentVariables []string `yaml:"environment_variables"` } var ( @@ -88,7 +89,7 @@ func (m *Metabase) Init(containerName string, image string) error { if err != nil { return err } - m.Container, err = NewContainer(m.Config.ListenAddr, m.Config.ListenPort, m.Config.DBPath, containerName, image, DBConnectionURI, m.Config.DockerGroupID) + m.Container, err = NewContainer(m.Config.ListenAddr, m.Config.ListenPort, m.Config.DBPath, containerName, image, DBConnectionURI, m.Config.DockerGroupID, m.Config.EnvironmentVariables) if err != nil { return fmt.Errorf("container init: %w", err) } @@ -137,21 +138,21 @@ func (m *Metabase) LoadConfig(configPath string) error { m.Config = config return nil - } -func SetupMetabase(dbConfig *csconfig.DatabaseCfg, listenAddr string, listenPort string, username string, password string, mbDBPath string, dockerGroupID string, containerName string, image string) (*Metabase, error) { +func SetupMetabase(dbConfig *csconfig.DatabaseCfg, listenAddr string, listenPort string, username string, password string, mbDBPath string, dockerGroupID string, containerName string, image string, environmentVariables []string) (*Metabase, error) { metabase := &Metabase{ Config: &Config{ - Database: dbConfig, - ListenAddr: listenAddr, - ListenPort: listenPort, - Username: username, - Password: password, - ListenURL: fmt.Sprintf("http://%s:%s", listenAddr, listenPort), - DBPath: mbDBPath, - DockerGroupID: dockerGroupID, - Image: image, + Database: dbConfig, + ListenAddr: listenAddr, + ListenPort: listenPort, + Username: username, + Password: password, + ListenURL: fmt.Sprintf("http://%s:%s", listenAddr, listenPort), + DBPath: mbDBPath, + DockerGroupID: dockerGroupID, + Image: image, + EnvironmentVariables: environmentVariables, }, } if err := metabase.Init(containerName, image); err != nil { diff --git a/pkg/parser/enrich.go b/pkg/parser/enrich.go index 661410d20d3..a69cd963813 100644 --- a/pkg/parser/enrich.go +++ b/pkg/parser/enrich.go @@ -7,8 +7,10 @@ import ( ) /* should be part of a package shared with enrich/geoip.go */ -type EnrichFunc func(string, *types.Event, *log.Entry) (map[string]string, error) -type InitFunc func(map[string]string) (interface{}, error) +type ( + EnrichFunc func(string, *types.Event, *log.Entry) (map[string]string, error) + InitFunc func(map[string]string) (interface{}, error) +) type EnricherCtx struct { Registered map[string]*Enricher diff --git a/pkg/parser/enrich_date.go b/pkg/parser/enrich_date.go index 40c8de39da5..0a4bc51b862 100644 --- a/pkg/parser/enrich_date.go +++ b/pkg/parser/enrich_date.go @@ -44,6 +44,7 @@ func GenDateParse(date string) (string, time.Time) { "2006-01-02 15:04", "2006/01/02 15:04:05", "2006-01-02 15:04:05", + "2006-01-02T15:04:05", } ) diff --git a/pkg/parser/enrich_date_test.go b/pkg/parser/enrich_date_test.go index 930633feb35..13e106f3049 100644 --- a/pkg/parser/enrich_date_test.go +++ b/pkg/parser/enrich_date_test.go @@ -40,6 +40,38 @@ func TestDateParse(t *testing.T) { }, expected: "2011-12-17T08:17:43Z", }, + { + name: "ISO 8601, no timezone", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32Z", + }, + { + name: "ISO 8601, no timezone, milliseconds", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32.123", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32.123Z", + }, + { + name: "ISO 8601, no timezone, microseconds", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32.123456", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32.123456Z", + }, + { + name: "ISO 8601, no timezone, nanoseconds", + evt: types.Event{ + StrTime: "2024-11-26T20:13:32.123456789", + StrTimeFormat: "", + }, + expected: "2024-11-26T20:13:32.123456789Z", + }, } logger := log.WithField("test", "test") diff --git a/pkg/parser/enrich_geoip.go b/pkg/parser/enrich_geoip.go index 1756927bc4b..79a70077283 100644 --- a/pkg/parser/enrich_geoip.go +++ b/pkg/parser/enrich_geoip.go @@ -18,7 +18,6 @@ func IpToRange(field string, p *types.Event, plog *log.Entry) (map[string]string } r, err := exprhelpers.GeoIPRangeEnrich(field) - if err != nil { plog.Errorf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr @@ -47,7 +46,6 @@ func GeoIpASN(field string, p *types.Event, plog *log.Entry) (map[string]string, } r, err := exprhelpers.GeoIPASNEnrich(field) - if err != nil { plog.Debugf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr @@ -81,7 +79,6 @@ func GeoIpCity(field string, p *types.Event, plog *log.Entry) (map[string]string } r, err := exprhelpers.GeoIPEnrich(field) - if err != nil { plog.Debugf("Unable to enrich ip '%s'", field) return nil, nil //nolint:nilerr diff --git a/pkg/parser/node.go b/pkg/parser/node.go index 26046ae4fd6..1229a0f4470 100644 --- a/pkg/parser/node.go +++ b/pkg/parser/node.go @@ -3,6 +3,7 @@ package parser import ( "errors" "fmt" + "strconv" "strings" "time" @@ -236,7 +237,7 @@ func (n *Node) processGrok(p *types.Event, cachedExprEnv map[string]any) (bool, case string: gstr = out case int: - gstr = fmt.Sprintf("%d", out) + gstr = strconv.Itoa(out) case float64, float32: gstr = fmt.Sprintf("%f", out) default: @@ -352,21 +353,24 @@ func (n *Node) process(p *types.Event, ctx UnixParserCtx, expressionEnv map[stri clog.Warningf("unexpected type %t (%v) while running '%s'", output, output, stash.Key) continue } - cache.SetKey(stash.Name, key, value, &stash.TTLVal) + if err = cache.SetKey(stash.Name, key, value, &stash.TTLVal); err != nil { + clog.Warningf("failed to store data in cache: %s", err.Error()) + } } } // Iterate on leafs - for _, leaf := range n.LeavesNodes { - ret, err := leaf.process(p, ctx, cachedExprEnv) + leaves := n.LeavesNodes + for idx := range leaves { + ret, err := leaves[idx].process(p, ctx, cachedExprEnv) if err != nil { - clog.Tracef("\tNode (%s) failed : %v", leaf.rn, err) + clog.Tracef("\tNode (%s) failed : %v", leaves[idx].rn, err) clog.Debugf("Event leaving node : ko") return false, err } - clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaf.rn, ret, n.OnSuccess) + clog.Tracef("\tsub-node (%s) ret : %v (strategy:%s)", leaves[idx].rn, ret, n.OnSuccess) if ret { NodeState = true @@ -593,7 +597,7 @@ func (n *Node) compile(pctx *UnixParserCtx, ectx EnricherCtx) error { /* compile leafs if present */ for idx := range n.LeavesNodes { if n.LeavesNodes[idx].Name == "" { - n.LeavesNodes[idx].Name = fmt.Sprintf("child-%s", n.Name) + n.LeavesNodes[idx].Name = "child-" + n.Name } /*propagate debug/stats to child nodes*/ if !n.LeavesNodes[idx].Debug && n.Debug { diff --git a/pkg/parser/parsing_test.go b/pkg/parser/parsing_test.go index 269d51a1ba2..84d5f4db743 100644 --- a/pkg/parser/parsing_test.go +++ b/pkg/parser/parsing_test.go @@ -13,6 +13,8 @@ import ( "github.com/davecgh/go-spew/spew" log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "gopkg.in/yaml.v2" "github.com/crowdsecurity/crowdsec/pkg/exprhelpers" @@ -33,14 +35,11 @@ func TestParser(t *testing.T) { envSetting := os.Getenv("TEST_ONLY") - pctx, ectx, err := prepTests() - if err != nil { - t.Fatalf("failed to load env : %s", err) - } + pctx, ectx := prepTests(t) // Init the enricher if envSetting != "" { - if err := testOneParser(pctx, ectx, envSetting, nil); err != nil { + if err := testOneParser(t, pctx, ectx, envSetting, nil); err != nil { t.Fatalf("Test '%s' failed : %s", envSetting, err) } } else { @@ -57,7 +56,7 @@ func TestParser(t *testing.T) { fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) - if err := testOneParser(pctx, ectx, fname, nil); err != nil { + if err := testOneParser(t, pctx, ectx, fname, nil); err != nil { t.Fatalf("Test '%s' failed : %s", fname, err) } } @@ -71,22 +70,16 @@ func BenchmarkParser(t *testing.B) { log.SetLevel(log.ErrorLevel) - pctx, ectx, err := prepTests() - if err != nil { - t.Fatalf("failed to load env : %s", err) - } + pctx, ectx := prepTests(t) envSetting := os.Getenv("TEST_ONLY") if envSetting != "" { - if err := testOneParser(pctx, ectx, envSetting, t); err != nil { - t.Fatalf("Test '%s' failed : %s", envSetting, err) - } + err := testOneParser(t, pctx, ectx, envSetting, t) + require.NoError(t, err, "Test '%s' failed", envSetting) } else { fds, err := os.ReadDir("./tests/") - if err != nil { - t.Fatalf("Unable to read test directory : %s", err) - } + require.NoError(t, err, "Unable to read test directory") for _, fd := range fds { if !fd.IsDir() { @@ -96,14 +89,13 @@ func BenchmarkParser(t *testing.B) { fname := "./tests/" + fd.Name() log.Infof("Running test on %s", fname) - if err := testOneParser(pctx, ectx, fname, t); err != nil { - t.Fatalf("Test '%s' failed : %s", fname, err) - } + err := testOneParser(t, pctx, ectx, fname, t) + require.NoError(t, err, "Test '%s' failed", fname) } } } -func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing.B) error { +func testOneParser(t require.TestingT, pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing.B) error { var ( err error pnodes []Node @@ -143,7 +135,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing // TBD: Load post overflows // func testFile(t *testing.T, file string, pctx UnixParserCtx, nodes []Node) bool { parser_test_file := fmt.Sprintf("%s/test.yaml", dir) - tests := loadTestFile(parser_test_file) + tests := loadTestFile(t, parser_test_file) count := 1 if b != nil { @@ -151,8 +143,8 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing b.ResetTimer() } - for range(count) { - if !testFile(tests, *pctx, pnodes) { + for range count { + if !testFile(t, tests, *pctx, pnodes) { return errors.New("test failed") } } @@ -161,7 +153,7 @@ func testOneParser(pctx *UnixParserCtx, ectx EnricherCtx, dir string, b *testing } // prepTests is going to do the initialisation of parser : it's going to load enrichment plugins and load the patterns. This is done here so that we don't redo it for each test -func prepTests() (*UnixParserCtx, EnricherCtx, error) { +func prepTests(t require.TestingT) (*UnixParserCtx, EnricherCtx) { var ( err error pctx *UnixParserCtx @@ -169,22 +161,16 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { ) err = exprhelpers.Init(nil) - if err != nil { - return nil, ectx, fmt.Errorf("exprhelpers init failed: %w", err) - } + require.NoError(t, err, "exprhelpers init failed") // Load enrichment datadir := "./test_data/" err = exprhelpers.GeoIPInit(datadir) - if err != nil { - log.Fatalf("unable to initialize GeoIP: %s", err) - } + require.NoError(t, err, "geoip init failed") ectx, err = Loadplugin() - if err != nil { - return nil, ectx, fmt.Errorf("failed to load plugin geoip: %v", err) - } + require.NoError(t, err, "load plugin failed") log.Printf("Loaded -> %+v", ectx) @@ -194,18 +180,14 @@ func prepTests() (*UnixParserCtx, EnricherCtx, error) { /* this should be refactored to 2 lines :p */ // Init the parser pctx, err = Init(map[string]interface{}{"patterns": cfgdir + string("/patterns/"), "data": "./tests/"}) - if err != nil { - return nil, ectx, fmt.Errorf("failed to initialize parser: %v", err) - } + require.NoError(t, err, "parser init failed") - return pctx, ectx, nil + return pctx, ectx } -func loadTestFile(file string) []TestFile { +func loadTestFile(t require.TestingT, file string) []TestFile { yamlFile, err := os.Open(file) - if err != nil { - log.Fatalf("yamlFile.Get err #%v ", err) - } + require.NoError(t, err, "failed to open test file") dec := yaml.NewDecoder(yamlFile) dec.SetStrict(true) @@ -221,7 +203,7 @@ func loadTestFile(file string) []TestFile { break } - log.Fatalf("Failed to load testfile '%s' yaml error : %v", file, err) + require.NoError(t, err, "failed to load testfile '%s'", file) return nil } @@ -285,7 +267,7 @@ func matchEvent(expected types.Event, out types.Event, debug bool) ([]string, bo valid = true - for mapIdx := range(len(expectMaps)) { + for mapIdx := range len(expectMaps) { for expKey, expVal := range expectMaps[mapIdx] { outVal, ok := outMaps[mapIdx][expKey] if !ok { @@ -391,19 +373,14 @@ reCheck: return true, nil } -func testFile(testSet []TestFile, pctx UnixParserCtx, nodes []Node) bool { +func testFile(t require.TestingT, testSet []TestFile, pctx UnixParserCtx, nodes []Node) bool { log.Warning("Going to process one test set") for _, tf := range testSet { // func testSubSet(testSet TestFile, pctx UnixParserCtx, nodes []Node) (bool, error) { testOk, err := testSubSet(tf, pctx, nodes) - if err != nil { - log.Fatalf("test failed : %s", err) - } - - if !testOk { - log.Fatalf("failed test : %+v", tf) - } + require.NoError(t, err, "test failed") + assert.True(t, testOk, "failed test: %+v", tf) } return true @@ -427,9 +404,7 @@ func TestGeneratePatternsDoc(t *testing.T) { } pctx, err := Init(map[string]interface{}{"patterns": "../../config/patterns/", "data": "./tests/"}) - if err != nil { - t.Fatalf("unable to load patterns : %s", err) - } + require.NoError(t, err, "unable to load patterns") log.Infof("-> %s", spew.Sdump(pctx)) /*don't judge me, we do it for the users*/ diff --git a/pkg/parser/runtime.go b/pkg/parser/runtime.go index 8068690b68f..7af82a71535 100644 --- a/pkg/parser/runtime.go +++ b/pkg/parser/runtime.go @@ -29,10 +29,11 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { return false } - //it's a hack, we do it for the user + // it's a hack, we do it for the user target = strings.TrimPrefix(target, "evt.") log.Debugf("setting target %s to %s", target, value) + defer func() { if r := recover(); r != nil { log.Errorf("Runtime error while trying to set '%s': %+v", target, r) @@ -46,6 +47,7 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { //event is nil return false } + for _, f := range strings.Split(target, ".") { /* ** According to current Event layout we only have to handle struct and map @@ -57,7 +59,9 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { if (tmp == reflect.Value{}) || tmp.IsZero() { log.Debugf("map entry is zero in '%s'", target) } + iter.SetMapIndex(reflect.ValueOf(f), reflect.ValueOf(value)) + return true case reflect.Struct: tmp := iter.FieldByName(f) @@ -65,9 +69,11 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { log.Debugf("'%s' is not a valid target because '%s' is not valid", target, f) return false } + if tmp.Kind() == reflect.Ptr { tmp = reflect.Indirect(tmp) } + iter = tmp case reflect.Ptr: tmp := iter.Elem() @@ -82,11 +88,14 @@ func SetTargetByName(target string, value string, evt *types.Event) bool { log.Errorf("'%s' can't be set", target) return false } + if iter.Kind() != reflect.String { log.Errorf("Expected string, got %v when handling '%s'", iter.Kind(), target) return false } + iter.Set(reflect.ValueOf(value)) + return true } @@ -248,14 +257,18 @@ func stageidx(stage string, stages []string) int { return -1 } -var ParseDump bool -var DumpFolder string +var ( + ParseDump bool + DumpFolder string +) -var StageParseCache dumps.ParserResults -var StageParseMutex sync.Mutex +var ( + StageParseCache dumps.ParserResults + StageParseMutex sync.Mutex +) func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) { - var event = xp + event := xp /* the stage is undefined, probably line is freshly acquired, set to first stage !*/ if event.Stage == "" && len(ctx.Stages) > 0 { @@ -317,46 +330,46 @@ func Parse(ctx UnixParserCtx, xp types.Event, nodes []Node) (types.Event, error) } isStageOK := false - for idx, node := range nodes { + for idx := range nodes { //Only process current stage's nodes - if event.Stage != node.Stage { + if event.Stage != nodes[idx].Stage { continue } clog := log.WithFields(log.Fields{ - "node-name": node.rn, + "node-name": nodes[idx].rn, "stage": event.Stage, }) - clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), node.rn) + clog.Tracef("Processing node %d/%d -> %s", idx, len(nodes), nodes[idx].rn) if ctx.Profiling { - node.Profiling = true + nodes[idx].Profiling = true } - ret, err := node.process(&event, ctx, map[string]interface{}{"evt": &event}) + ret, err := nodes[idx].process(&event, ctx, map[string]interface{}{"evt": &event}) if err != nil { clog.Errorf("Error while processing node : %v", err) return event, err } - clog.Tracef("node (%s) ret : %v", node.rn, ret) + clog.Tracef("node (%s) ret : %v", nodes[idx].rn, ret) if ParseDump { var parserIdxInStage int StageParseMutex.Lock() - if len(StageParseCache[stage][node.Name]) == 0 { - StageParseCache[stage][node.Name] = make([]dumps.ParserResult, 0) + if len(StageParseCache[stage][nodes[idx].Name]) == 0 { + StageParseCache[stage][nodes[idx].Name] = make([]dumps.ParserResult, 0) parserIdxInStage = len(StageParseCache[stage]) } else { - parserIdxInStage = StageParseCache[stage][node.Name][0].Idx + parserIdxInStage = StageParseCache[stage][nodes[idx].Name][0].Idx } StageParseMutex.Unlock() evtcopy := deepcopy.Copy(event) parserInfo := dumps.ParserResult{Evt: evtcopy.(types.Event), Success: ret, Idx: parserIdxInStage} StageParseMutex.Lock() - StageParseCache[stage][node.Name] = append(StageParseCache[stage][node.Name], parserInfo) + StageParseCache[stage][nodes[idx].Name] = append(StageParseCache[stage][nodes[idx].Name], parserInfo) StageParseMutex.Unlock() } if ret { isStageOK = true } - if ret && node.OnSuccess == "next_stage" { + if ret && nodes[idx].OnSuccess == "next_stage" { clog.Debugf("node successful, stop end stage %s", stage) break } diff --git a/pkg/parser/stage.go b/pkg/parser/stage.go index b98db350254..ddc07ca7f1d 100644 --- a/pkg/parser/stage.go +++ b/pkg/parser/stage.go @@ -114,10 +114,12 @@ func LoadStages(stageFiles []Stagefile, pctx *UnixParserCtx, ectx EnricherCtx) ( for _, data := range node.Data { err = exprhelpers.FileInit(pctx.DataFolder, data.DestPath, data.Type) if err != nil { - log.Error(err) + log.Error(err.Error()) } if data.Type == "regexp" { //cache only makes sense for regexp - exprhelpers.RegexpCacheInit(data.DestPath, *data) + if err = exprhelpers.RegexpCacheInit(data.DestPath, *data); err != nil { + log.Error(err.Error()) + } } } diff --git a/pkg/parser/whitelist_test.go b/pkg/parser/whitelist_test.go index 02846f17fc1..a3b95b2fa3f 100644 --- a/pkg/parser/whitelist_test.go +++ b/pkg/parser/whitelist_test.go @@ -284,9 +284,9 @@ func TestWhitelistCheck(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var err error node.Whitelist = tt.whitelist - node.CompileWLs() + _, err := node.CompileWLs() + require.NoError(t, err) isWhitelisted := node.CheckIPsWL(tt.event) if !isWhitelisted { isWhitelisted, err = node.CheckExprWL(map[string]interface{}{"evt": tt.event}, tt.event) diff --git a/pkg/setup/detect_test.go b/pkg/setup/detect_test.go index 588e74dab54..72356bc1924 100644 --- a/pkg/setup/detect_test.go +++ b/pkg/setup/detect_test.go @@ -54,15 +54,20 @@ func TestSetupHelperProcess(t *testing.T) { } fmt.Fprint(os.Stdout, fakeSystemctlOutput) - os.Exit(0) + os.Exit(0) //nolint:revive } func tempYAML(t *testing.T, content string) os.File { t.Helper() require := require.New(t) - file, err := os.CreateTemp("", "") + file, err := os.CreateTemp(t.TempDir(), "") require.NoError(err) + t.Cleanup(func() { + require.NoError(file.Close()) + require.NoError(os.Remove(file.Name())) + }) + _, err = file.WriteString(dedent.Dedent(content)) require.NoError(err) @@ -249,7 +254,6 @@ func TestListSupported(t *testing.T) { t.Parallel() f := tempYAML(t, tc.yml) - defer os.Remove(f.Name()) supported, err := setup.ListSupported(&f) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -375,7 +379,6 @@ func TestDetectSimpleRule(t *testing.T) { - false ugly: `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) require.NoError(err) @@ -421,7 +424,6 @@ detect: for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -514,7 +516,6 @@ detect: for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -542,7 +543,6 @@ func TestDetectForcedUnit(t *testing.T) { journalctl_filter: - _SYSTEMD_UNIT=crowdsec-setup-forced.service `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedUnits: []string{"crowdsec-setup-forced.service"}}) require.NoError(err) @@ -580,7 +580,6 @@ func TestDetectForcedProcess(t *testing.T) { when: - ProcessRunning("foobar") `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedProcesses: []string{"foobar"}}) require.NoError(err) @@ -610,7 +609,6 @@ func TestDetectSkipService(t *testing.T) { when: - ProcessRunning("foobar") `) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedProcesses: []string{"foobar"}, SkipServices: []string{"wizard"}}) require.NoError(err) @@ -825,7 +823,6 @@ func TestDetectForcedOS(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{ForcedOS: tc.forced}) cstest.RequireErrorContains(t, err, tc.expectedErr) @@ -1009,7 +1006,6 @@ func TestDetectDatasourceValidation(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { f := tempYAML(t, tc.config) - defer os.Remove(f.Name()) detected, err := setup.Detect(&f, setup.DetectOptions{}) cstest.RequireErrorContains(t, err, tc.expectedErr) require.Equal(tc.expected, detected) diff --git a/pkg/setup/install.go b/pkg/setup/install.go index d63a1ee1775..556ddab4c9a 100644 --- a/pkg/setup/install.go +++ b/pkg/setup/install.go @@ -13,6 +13,7 @@ import ( "gopkg.in/yaml.v3" "github.com/crowdsecurity/crowdsec/pkg/cwhub" + "github.com/crowdsecurity/crowdsec/pkg/hubops" ) // AcquisDocument is created from a SetupItem. It represents a single YAML document, and can be part of a multi-document file. @@ -47,12 +48,14 @@ func decodeSetup(input []byte, fancyErrors bool) (Setup, error) { } // InstallHubItems installs the objects recommended in a setup file. -func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, dryRun bool) error { +func InstallHubItems(ctx context.Context, hub *cwhub.Hub, contentProvider cwhub.ContentProvider, input []byte, yes, dryRun, verbose bool) error { setupEnvelope, err := decodeSetup(input, false) if err != nil { return err } + plan := hubops.NewActionPlan(hub) + for _, setupItem := range setupEnvelope.Setup { forceAction := false downloadOnly := false @@ -68,70 +71,70 @@ func InstallHubItems(ctx context.Context, hub *cwhub.Hub, input []byte, dryRun b return fmt.Errorf("collection %s not found", collection) } - if dryRun { - fmt.Println("dry-run: would install collection", collection) - - continue + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing collection %s: %w", item.Name, err) + if !downloadOnly { + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } } for _, parser := range setupItem.Install.Parsers { - if dryRun { - fmt.Println("dry-run: would install parser", parser) - - continue - } - item := hub.GetItem(cwhub.PARSERS, parser) if item == nil { return fmt.Errorf("parser %s not found", parser) } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing parser %s: %w", item.Name, err) + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err } - } - - for _, scenario := range setupItem.Install.Scenarios { - if dryRun { - fmt.Println("dry-run: would install scenario", scenario) - continue + if !downloadOnly { + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } + } + for _, scenario := range setupItem.Install.Scenarios { item := hub.GetItem(cwhub.SCENARIOS, scenario) if item == nil { return fmt.Errorf("scenario %s not found", scenario) } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing scenario %s: %w", item.Name, err) + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err } - } - - for _, postoverflow := range setupItem.Install.PostOverflows { - if dryRun { - fmt.Println("dry-run: would install postoverflow", postoverflow) - continue + if !downloadOnly { + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } + } + for _, postoverflow := range setupItem.Install.PostOverflows { item := hub.GetItem(cwhub.POSTOVERFLOWS, postoverflow) if item == nil { return fmt.Errorf("postoverflow %s not found", postoverflow) } - if err := item.Install(ctx, forceAction, downloadOnly); err != nil { - return fmt.Errorf("while installing postoverflow %s: %w", item.Name, err) + if err := plan.AddCommand(hubops.NewDownloadCommand(item, contentProvider, forceAction)); err != nil { + return err + } + + if !downloadOnly { + if err := plan.AddCommand(hubops.NewEnableCommand(item, forceAction)); err != nil { + return err + } } } } - return nil + return plan.Execute(ctx, yes, dryRun, verbose) } // marshalAcquisDocuments creates the monolithic file, or itemized files (if a directory is provided) with the acquisition documents. @@ -189,7 +192,9 @@ func marshalAcquisDocuments(ads []AcquisDocument, toDir string) (string, error) return "", fmt.Errorf("while writing to %s: %w", ad.AcquisFilename, err) } - f.Sync() + if err = f.Sync(); err != nil { + return "", fmt.Errorf("while syncing %s: %w", ad.AcquisFilename, err) + } continue } diff --git a/pkg/types/appsec_event.go b/pkg/types/appsec_event.go index 11d70ad368d..54163f53fef 100644 --- a/pkg/types/appsec_event.go +++ b/pkg/types/appsec_event.go @@ -60,7 +60,6 @@ func (w AppsecEvent) GetVar(varName string) string { } log.Infof("var %s not found. Available variables: %+v", varName, w.Vars) return "" - } // getters diff --git a/pkg/types/constants.go b/pkg/types/constants.go index acb5b5bfacf..2421b076b97 100644 --- a/pkg/types/constants.go +++ b/pkg/types/constants.go @@ -1,23 +1,29 @@ package types -const ApiKeyAuthType = "api-key" -const TlsAuthType = "tls" -const PasswordAuthType = "password" +const ( + ApiKeyAuthType = "api-key" + TlsAuthType = "tls" + PasswordAuthType = "password" +) -const PAPIBaseURL = "https://papi.api.crowdsec.net/" -const PAPIVersion = "v1" -const PAPIPollUrl = "/decisions/stream/poll" -const PAPIPermissionsUrl = "/permissions" +const ( + PAPIBaseURL = "https://papi.api.crowdsec.net/" + PAPIVersion = "v1" + PAPIPollUrl = "/decisions/stream/poll" + PAPIPermissionsUrl = "/permissions" +) const CAPIBaseURL = "https://api.crowdsec.net/" -const CscliOrigin = "cscli" -const CrowdSecOrigin = "crowdsec" -const ConsoleOrigin = "console" -const CscliImportOrigin = "cscli-import" -const ListOrigin = "lists" -const CAPIOrigin = "CAPI" -const CommunityBlocklistPullSourceScope = "crowdsecurity/community-blocklist" +const ( + CscliOrigin = "cscli" + CrowdSecOrigin = "crowdsec" + ConsoleOrigin = "console" + CscliImportOrigin = "cscli-import" + ListOrigin = "lists" + CAPIOrigin = "CAPI" + CommunityBlocklistPullSourceScope = "crowdsecurity/community-blocklist" +) const DecisionTypeBan = "ban" diff --git a/pkg/types/event.go b/pkg/types/event.go index 9300626b927..0b09bf7cbdf 100644 --- a/pkg/types/event.go +++ b/pkg/types/event.go @@ -60,6 +60,7 @@ func MakeEvent(timeMachine bool, evtType int, process bool) Event { if timeMachine { evt.ExpectMode = TIMEMACHINE } + return evt } @@ -97,8 +98,9 @@ func (e *Event) GetType() string { func (e *Event) GetMeta(key string) string { if e.Type == OVFLW { - for _, alert := range e.Overflow.APIAlerts { - for _, event := range alert.Events { + alerts := e.Overflow.APIAlerts + for idx := range alerts { + for _, event := range alerts[idx].Events { if event.GetMeta(key) != "" { return event.GetMeta(key) } diff --git a/pkg/types/event_test.go b/pkg/types/event_test.go index 97b13f96d9a..638e42fe757 100644 --- a/pkg/types/event_test.go +++ b/pkg/types/event_test.go @@ -46,7 +46,6 @@ func TestSetParsed(t *testing.T) { assert.Equal(t, tt.value, tt.evt.Parsed[tt.key]) }) } - } func TestSetMeta(t *testing.T) { @@ -86,7 +85,6 @@ func TestSetMeta(t *testing.T) { assert.Equal(t, tt.value, tt.evt.GetMeta(tt.key)) }) } - } func TestParseIPSources(t *testing.T) { diff --git a/pkg/types/getfstype.go b/pkg/types/getfstype.go index 728e986bed0..c16fe86ec9c 100644 --- a/pkg/types/getfstype.go +++ b/pkg/types/getfstype.go @@ -100,7 +100,6 @@ func GetFSType(path string) (string, error) { var buf unix.Statfs_t err := unix.Statfs(path, &buf) - if err != nil { return "", err } diff --git a/pkg/types/ip.go b/pkg/types/ip.go index 9d08afd8809..3f52a7ccf18 100644 --- a/pkg/types/ip.go +++ b/pkg/types/ip.go @@ -2,7 +2,6 @@ package types import ( "encoding/binary" - "errors" "fmt" "math" "net" @@ -23,7 +22,8 @@ func LastAddress(n net.IPNet) net.IP { ip[6] | ^n.Mask[6], ip[7] | ^n.Mask[7], ip[8] | ^n.Mask[8], ip[9] | ^n.Mask[9], ip[10] | ^n.Mask[10], ip[11] | ^n.Mask[11], ip[12] | ^n.Mask[12], ip[13] | ^n.Mask[13], ip[14] | ^n.Mask[14], - ip[15] | ^n.Mask[15]} + ip[15] | ^n.Mask[15], + } } return net.IPv4( @@ -38,7 +38,7 @@ func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) { if strings.Contains(anyIP, "/") { _, net, err := net.ParseCIDR(anyIP) if err != nil { - return -1, 0, 0, 0, 0, fmt.Errorf("while parsing range %s: %w", anyIP, err) + return -1, 0, 0, 0, 0, fmt.Errorf("invalid ip range '%s': %w", anyIP, err) } return Range2Ints(*net) @@ -46,12 +46,12 @@ func Addr2Ints(anyIP string) (int, int64, int64, int64, int64, error) { ip := net.ParseIP(anyIP) if ip == nil { - return -1, 0, 0, 0, 0, errors.New("invalid address") + return -1, 0, 0, 0, 0, fmt.Errorf("invalid ip address '%s'", anyIP) } sz, start, end, err := IP2Ints(ip) if err != nil { - return -1, 0, 0, 0, 0, fmt.Errorf("while parsing ip %s: %w", anyIP, err) + return -1, 0, 0, 0, 0, fmt.Errorf("invalid ip address '%s': %w", anyIP, err) } return sz, start, end, start, end, nil diff --git a/pkg/types/ip_test.go b/pkg/types/ip_test.go index f8c14b12e3c..571163761d4 100644 --- a/pkg/types/ip_test.go +++ b/pkg/types/ip_test.go @@ -8,21 +8,21 @@ import ( ) func TestIP2Int(t *testing.T) { - tEmpty := net.IP{} + _, _, _, err := IP2Ints(tEmpty) if !strings.Contains(err.Error(), "unexpected len 0 for ") { t.Fatalf("unexpected: %s", err) } } + func TestRange2Int(t *testing.T) { tEmpty := net.IPNet{} - //empty item + // empty item _, _, _, _, _, err := Range2Ints(tEmpty) if !strings.Contains(err.Error(), "converting first ip in range") { t.Fatalf("unexpected: %s", err) } - } func TestAdd2Int(t *testing.T) { @@ -181,7 +181,7 @@ func TestAdd2Int(t *testing.T) { }, { in_addr: "xxx2", - exp_error: "invalid address", + exp_error: "invalid ip address 'xxx2'", }, } @@ -190,31 +190,37 @@ func TestAdd2Int(t *testing.T) { if err != nil && test.exp_error == "" { t.Fatalf("%d unexpected error : %s", idx, err) } + if test.exp_error != "" { if !strings.Contains(err.Error(), test.exp_error) { t.Fatalf("%d unmatched error : %s != %s", idx, err, test.exp_error) } - continue //we can skip this one + + continue // we can skip this one } + if sz != test.exp_sz { t.Fatalf("%d unexpected size %d != %d", idx, sz, test.exp_sz) } + if start_ip != test.exp_start_ip { t.Fatalf("%d unexpected start_ip %d != %d", idx, start_ip, test.exp_start_ip) } + if sz == 16 { if start_sfx != test.exp_start_sfx { t.Fatalf("%d unexpected start sfx %d != %d", idx, start_sfx, test.exp_start_sfx) } } + if end_ip != test.exp_end_ip { t.Fatalf("%d unexpected end ip %d != %d", idx, end_ip, test.exp_end_ip) } + if sz == 16 { if end_sfx != test.exp_end_sfx { t.Fatalf("%d unexpected end sfx %d != %d", idx, end_sfx, test.exp_end_sfx) } } - } } diff --git a/pkg/types/utils.go b/pkg/types/utils.go index 712d44ba12d..d5e4ac6f986 100644 --- a/pkg/types/utils.go +++ b/pkg/types/utils.go @@ -10,25 +10,46 @@ import ( "gopkg.in/natefinch/lumberjack.v2" ) -var logFormatter log.Formatter -var LogOutput *lumberjack.Logger //io.Writer -var logLevel log.Level +var ( + logFormatter log.Formatter + LogOutput *lumberjack.Logger // io.Writer + logLevel log.Level +) + +func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level, maxSize int, maxFiles int, maxAge int, format string, compress *bool, forceColors bool) error { + if format == "" { + format = "text" + } + + switch format { + case "text": + logFormatter = &log.TextFormatter{ + TimestampFormat: time.RFC3339, + FullTimestamp: true, + ForceColors: forceColors, + } + case "json": + logFormatter = &log.JSONFormatter{TimestampFormat: time.RFC3339} + default: + return fmt.Errorf("unknown log_format '%s'", format) + } -func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level, maxSize int, maxFiles int, maxAge int, compress *bool, forceColors bool) error { - /*Configure logs*/ if cfgMode == "file" { _maxsize := 500 if maxSize != 0 { _maxsize = maxSize } + _maxfiles := 3 if maxFiles != 0 { _maxfiles = maxFiles } + _maxage := 28 if maxAge != 0 { _maxage = maxAge } + _compress := true if compress != nil { _compress = *compress @@ -45,10 +66,11 @@ func SetDefaultLoggerConfig(cfgMode string, cfgFolder string, cfgLevel log.Level } else if cfgMode != "stdout" { return fmt.Errorf("log mode '%s' unknown", cfgMode) } + logLevel = cfgLevel log.SetLevel(logLevel) - logFormatter = &log.TextFormatter{TimestampFormat: time.RFC3339, FullTimestamp: true, ForceColors: forceColors} log.SetFormatter(logFormatter) + return nil } @@ -61,7 +83,9 @@ func ConfigureLogger(clog *log.Logger) error { if logFormatter != nil { clog.SetFormatter(logFormatter) } + clog.SetLevel(logLevel) + return nil } @@ -74,6 +98,8 @@ func IsNetworkFS(path string) (bool, string, error) { if err != nil { return false, "", err } + fsType = strings.ToLower(fsType) + return fsType == "nfs" || fsType == "cifs" || fsType == "smb" || fsType == "smb2", fsType, nil } diff --git a/rpm/SPECS/crowdsec.spec b/rpm/SPECS/crowdsec.spec index ac438ad0c14..ca912d58e49 100644 --- a/rpm/SPECS/crowdsec.spec +++ b/rpm/SPECS/crowdsec.spec @@ -143,18 +143,15 @@ rm -rf %{buildroot} #systemctl stop crowdsec || true -if [ $1 == 2 ];then - if [[ ! -d /var/lib/crowdsec/backup ]]; then - cscli config backup /var/lib/crowdsec/backup - fi -fi +#if [ $1 == 2 ]; then +# upgrade pre-install here +#fi %post -p /bin/bash #install if [ $1 == 1 ]; then - if [ ! -f "/var/lib/crowdsec/data/crowdsec.db" ] ; then touch /var/lib/crowdsec/data/crowdsec.db fi @@ -179,27 +176,21 @@ if [ $1 == 1 ]; then fi cscli hub update + cscli hub upgrade CSCLI_BIN_INSTALLED="/usr/bin/cscli" SILENT=true install_collection - echo "Get started with CrowdSec:" - echo " * Detailed guides are available in our documentation: https://docs.crowdsec.net" - echo " * Configuration items created by the community can be found at the Hub: https://hub.crowdsec.net" - echo " * Gain insights into your use of CrowdSec with the help of the console https://app.crowdsec.net" - -#upgrade -elif [ $1 == 2 ] && [ -d /var/lib/crowdsec/backup ]; then - cscli config restore /var/lib/crowdsec/backup - if [ $? == 0 ]; then - rm -rf /var/lib/crowdsec/backup - fi - - if [[ -f %{_sysconfdir}/crowdsec/online_api_credentials.yaml ]] ; then - chmod 600 %{_sysconfdir}/crowdsec/online_api_credentials.yaml - fi - - if [[ -f %{_sysconfdir}/crowdsec/local_api_credentials.yaml ]] ; then - chmod 600 %{_sysconfdir}/crowdsec/local_api_credentials.yaml - fi + GREEN='\033[0;32m' + BOLD='\033[1m' + RESET='\033[0m' + + echo -e "${BOLD}Get started with CrowdSec:${RESET}" + echo -e " * Go further by following our ${BOLD}post installation steps${RESET} : ${GREEN}${BOLD}https://docs.crowdsec.net/u/getting_started/next_steps${RESET}" + echo -e "====================================================================================================================" + echo -e " * Install a ${BOLD}remediation component${RESET} to block attackers: ${GREEN}${BOLD}https://docs.crowdsec.net/u/bouncers/intro${RESET}" + echo -e "====================================================================================================================" + echo -e " * Find more ${BOLD}collections${RESET}, ${BOLD}parsers${RESET} and ${BOLD}scenarios${RESET} created by the community with the Hub: ${GREEN}${BOLD}https://hub.crowdsec.net${RESET}" + echo -e "====================================================================================================================" + echo -e " * Subscribe to ${BOLD}additional blocklists${RESET}, ${BOLD}visualize${RESET} your alerts and more with the console: ${GREEN}${BOLD}https://app.crowdsec.net${RESET}" fi %systemd_post %{name}.service diff --git a/test/bats/01_crowdsec.bats b/test/bats/01_crowdsec.bats index aa5830a6bae..3df0b42a0f2 100644 --- a/test/bats/01_crowdsec.bats +++ b/test/bats/01_crowdsec.bats @@ -1,5 +1,4 @@ #!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: set -u @@ -68,6 +67,40 @@ teardown() { refute_output } +@test "crowdsec - log format" { + # fail early + config_disable_lapi + config_disable_agent + + config_set '.common.log_media="stdout"' + + config_set '.common.log_format=""' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --partial 'level=fatal msg="you must run at least the API Server or crowdsec"' + + config_set '.common.log_format="text"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --partial 'level=fatal msg="you must run at least the API Server or crowdsec"' + + config_set '.common.log_format="json"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + rune -0 jq -c 'select(.msg=="you must run at least the API Server or crowdsec") | .level' <(stderr | grep "^{") + assert_output '"fatal"' + + # If log_media='file', a hook to stderr is added only for fatal messages, + # with a predefined formatter (level + msg, no timestamp, ignore log_format) + + config_set '.common.log_media="file"' + + config_set '.common.log_format="text"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --regexp 'FATAL.* you must run at least the API Server or crowdsec$' + + config_set '.common.log_format="json"' + rune -0 wait-for --err "you must run at least the API Server or crowdsec" "$CROWDSEC" + assert_stderr --regexp 'FATAL.* you must run at least the API Server or crowdsec$' +} + @test "CS_LAPI_SECRET not strong enough" { CS_LAPI_SECRET=foo rune -1 wait-for "$CROWDSEC" assert_stderr --partial "api server init: unable to run local API: controller init: CS_LAPI_SECRET not strong enough" @@ -138,6 +171,8 @@ teardown() { rune -0 ./instance-crowdsec stop } +# TODO: move acquisition tests to test/bats/crowdsec-acquisition.bats + @test "crowdsec (error if the acquisition_path file is defined but missing)" { ACQUIS_YAML=$(config_get '.crowdsec_service.acquisition_path') rm -f "$ACQUIS_YAML" @@ -278,7 +313,7 @@ teardown() { # if filenames are missing, it won't be able to detect source type config_set "$ACQUIS_YAML" '.source="file"' rune -1 wait-for "$CROWDSEC" - assert_stderr --partial "failed to configure datasource file: no filename or filenames configuration provided" + assert_stderr --partial "while configuring datasource of type file from $ACQUIS_YAML (position 0): no filename or filenames configuration provided" config_set "$ACQUIS_YAML" '.filenames=["file.log"]' config_set "$ACQUIS_YAML" '.meh=3' diff --git a/test/bats/01_cscli.bats b/test/bats/01_cscli.bats index 264870501a5..9af3c841759 100644 --- a/test/bats/01_cscli.bats +++ b/test/bats/01_cscli.bats @@ -33,9 +33,9 @@ teardown() { # no "usage" output after every error rune -1 cscli blahblah - # error is displayed as log entry, not with print - assert_stderr --partial 'level=fatal msg="unknown command \"blahblah\" for \"cscli\""' - refute_stderr --partial 'unknown command "blahblah" for "cscli"' + # error is displayed with print, not as a log entry + assert_stderr --partial 'unknown command "blahblah" for "cscli"' + refute_stderr --partial 'level=fatal' } @test "cscli version" { @@ -172,41 +172,13 @@ teardown() { } @test "cscli config backup / restore" { - # test that we need a valid path - # disabled because in CI, the empty string is not passed as a parameter - #rune -1 cscli config backup "" - #assert_stderr --partial "failed to backup config: directory path can't be empty" + CONFIG_DIR=$(config_get '.config_paths.config_dir') rune -1 cscli config backup "/dev/null/blah" - assert_stderr --partial "failed to backup config: while creating /dev/null/blah: mkdir /dev/null/blah: not a directory" + assert_stderr --partial "'cscli config backup' has been removed, you can manually backup/restore $CONFIG_DIR instead" - # pick a dirpath - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - - # succeed the first time - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - - # don't overwrite an existing backup - rune -1 cscli config backup "$backupdir" - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" - - SIMULATION_YAML="$(config_get '.config_paths.simulation_path')" - - # restore - rm "$SIMULATION_YAML" - rune -0 cscli config restore "$backupdir" - assert_file_exists "$SIMULATION_YAML" - - # cleanup - rm -rf -- "${backupdir:?}" - - # backup: detect missing files - rm "$SIMULATION_YAML" - rune -1 cscli config backup "$backupdir" - assert_stderr --regexp "failed to backup config: failed copy .* to .*: stat .*: no such file or directory" - rm -rf -- "${backupdir:?}" + rune -1 cscli config restore "/dev/null/blah" + assert_stderr --partial "'cscli config restore' has been removed, you can manually backup/restore $CONFIG_DIR instead" } @test "'cscli completion' with or without configuration file" { @@ -294,7 +266,7 @@ teardown() { # it is possible to enable subcommands with feature flags defined in feature.yaml rune -1 cscli setup - assert_stderr --partial 'unknown command \"setup\" for \"cscli\"' + assert_stderr --partial 'unknown command "setup" for "cscli"' CONFIG_DIR=$(dirname "$CONFIG_YAML") echo ' - cscli_setup' >> "$CONFIG_DIR"/feature.yaml rune -0 cscli setup diff --git a/test/bats/01_cscli_lapi.bats b/test/bats/01_cscli_lapi.bats index 6e876576a6e..005eb15e141 100644 --- a/test/bats/01_cscli_lapi.bats +++ b/test/bats/01_cscli_lapi.bats @@ -113,9 +113,8 @@ teardown() { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') config_set "$LOCAL_API_CREDENTIALS" '.url="http://127.0.0.1:-80"' - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' + rune -1 cscli lapi status + assert_stderr 'Error: failed to authenticate to Local API (LAPI): parse "http://127.0.0.1:-80/": invalid port ":-80" after host' } @test "cscli - bad LAPI password" { @@ -123,9 +122,8 @@ teardown() { LOCAL_API_CREDENTIALS=$(config_get '.api.client.credentials_path') config_set "$LOCAL_API_CREDENTIALS" '.password="meh"' - rune -1 cscli lapi status -o json - rune -0 jq -r '.msg' <(stderr) - assert_output 'failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' + rune -1 cscli lapi status + assert_stderr 'Error: failed to authenticate to Local API (LAPI): API error: incorrect Username or Password' } @test "cscli lapi register / machines validate" { @@ -189,8 +187,10 @@ teardown() { rune -1 cscli lapi register --machine malicious --token 123456789012345678901234badtoken assert_stderr --partial "401 Unauthorized: API error: invalid token for auto registration" - rune -1 cscli machines inspect malicious -o json - assert_stderr --partial "unable to read machine data 'malicious': user 'malicious': user doesn't exist" + rune -1 cscli machines inspect malicious + # XXX: we may want to remove this warning + assert_stderr --partial 'QueryMachineByID : ent: machine not found' + assert_stderr --partial "Error: unable to read machine data 'malicious': user 'malicious': user doesn't exist" rune -0 cscli lapi register --machine newmachine --token 12345678901234567890123456789012 assert_stderr --partial "Successfully registered to Local API" diff --git a/test/bats/02_nolapi.bats b/test/bats/02_nolapi.bats index cefa6d798b4..70495a0ed91 100644 --- a/test/bats/02_nolapi.bats +++ b/test/bats/02_nolapi.bats @@ -66,18 +66,6 @@ teardown() { refute_output --partial "Local API Server" } -@test "cscli config backup" { - config_disable_lapi - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "$backupdir" - rm -rf -- "${backupdir:?}" - - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" -} - @test "lapi status shouldn't be ok without api.server" { config_disable_lapi rune -1 cscli machines list diff --git a/test/bats/03_noagent.bats b/test/bats/03_noagent.bats index 6be5101cee2..972b84977ad 100644 --- a/test/bats/03_noagent.bats +++ b/test/bats/03_noagent.bats @@ -60,18 +60,6 @@ teardown() { refute_output --partial "Crowdsec" } -@test "no agent: cscli config backup" { - config_disable_agent - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "$backupdir" - - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" - rm -rf -- "${backupdir:?}" -} - @test "no agent: lapi status should be ok" { config_disable_agent ./instance-crowdsec start diff --git a/test/bats/04_nocapi.bats b/test/bats/04_nocapi.bats index d22a6f0a953..eaeb0939112 100644 --- a/test/bats/04_nocapi.bats +++ b/test/bats/04_nocapi.bats @@ -51,17 +51,6 @@ teardown() { assert_output --regexp "Global:.*Crowdsec.*cscli:.*Local API Server:" } -@test "no agent: cscli config backup" { - config_disable_capi - backupdir=$(TMPDIR="$BATS_TEST_TMPDIR" mktemp -u) - rune -0 cscli config backup "$backupdir" - assert_stderr --partial "Starting configuration backup" - rune -1 cscli config backup "$backupdir" - assert_stderr --partial "failed to backup config" - assert_stderr --partial "file exists" - rm -rf -- "${backupdir:?}" -} - @test "without capi: cscli lapi status -> success" { config_disable_capi ./instance-crowdsec start @@ -76,5 +65,5 @@ teardown() { rune -0 cscli metrics assert_output --partial "Route" assert_output --partial '/v1/watchers/login' - assert_output --partial "Local API Metrics:" + assert_output --partial "Local API Metrics" } diff --git a/test/bats/07_setup.bats b/test/bats/07_setup.bats index f832ac572d2..72a8b64a57a 100644 --- a/test/bats/07_setup.bats +++ b/test/bats/07_setup.bats @@ -511,8 +511,9 @@ update-notifier-motd.timer enabled enabled rune -0 jq -e '.installed == false' <(output) # we install it - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' - assert_output 'dry-run: would install collection crowdsecurity/apache2' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' + assert_line --regexp 'download collections:crowdsecurity/apache2' + assert_line --regexp 'enable collections:crowdsecurity/apache2' # still not installed rune -0 cscli collections inspect crowdsecurity/apache2 -o json @@ -520,8 +521,8 @@ update-notifier-motd.timer enabled enabled # same with dependencies rune -0 cscli collections remove --all - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/linux"]}}]}' - assert_output 'dry-run: would install collection crowdsecurity/linux' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/linux"]}}]}' + assert_line --regexp 'enable collections:crowdsecurity/linux' } @test "cscli setup install-hub (dry run: install multiple collections)" { @@ -530,8 +531,8 @@ update-notifier-motd.timer enabled enabled rune -0 jq -e '.installed == false' <(output) # we install it - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' - assert_output 'dry-run: would install collection crowdsecurity/apache2' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/apache2"]}}]}' + assert_line --regexp 'enable collections:crowdsecurity/apache2' # still not installed rune -0 cscli collections inspect crowdsecurity/apache2 -o json @@ -539,15 +540,15 @@ update-notifier-motd.timer enabled enabled } @test "cscli setup install-hub (dry run: install multiple collections, parsers, scenarios, postoverflows)" { - rune -0 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/aws-console","crowdsecurity/caddy"],"parsers":["crowdsecurity/asterisk-logs"],"scenarios":["crowdsecurity/smb-fs"],"postoverflows":["crowdsecurity/cdn-whitelist","crowdsecurity/rdns"]}}]}' - assert_line 'dry-run: would install collection crowdsecurity/aws-console' - assert_line 'dry-run: would install collection crowdsecurity/caddy' - assert_line 'dry-run: would install parser crowdsecurity/asterisk-logs' - assert_line 'dry-run: would install scenario crowdsecurity/smb-fs' - assert_line 'dry-run: would install postoverflow crowdsecurity/cdn-whitelist' - assert_line 'dry-run: would install postoverflow crowdsecurity/rdns' - - rune -1 cscli setup install-hub /dev/stdin --dry-run <<< '{"setup":[{"install":{"collections":["crowdsecurity/foo"]}}]}' + rune -0 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/aws-console","crowdsecurity/caddy"],"parsers":["crowdsecurity/asterisk-logs"],"scenarios":["crowdsecurity/smb-bf"],"postoverflows":["crowdsecurity/cdn-whitelist","crowdsecurity/rdns"]}}]}' + assert_line --regexp 'enable collections:crowdsecurity/aws-console' + assert_line --regexp 'enable collections:crowdsecurity/caddy' + assert_line --regexp 'enable parsers:crowdsecurity/asterisk-logs' + assert_line --regexp 'enable scenarios:crowdsecurity/smb-bf' + assert_line --regexp 'enable postoverflows:crowdsecurity/cdn-whitelist' + assert_line --regexp 'enable postoverflows:crowdsecurity/rdns' + + rune -1 cscli setup install-hub /dev/stdin --dry-run --output raw <<< '{"setup":[{"install":{"collections":["crowdsecurity/foo"]}}]}' assert_stderr --partial 'collection crowdsecurity/foo not found' } diff --git a/test/bats/08_metrics.bats b/test/bats/08_metrics.bats index e260e667524..f3be9c60a95 100644 --- a/test/bats/08_metrics.bats +++ b/test/bats/08_metrics.bats @@ -66,7 +66,7 @@ teardown() { rune -0 cscli metrics assert_output --partial "Route" assert_output --partial '/v1/watchers/login' - assert_output --partial "Local API Metrics:" + assert_output --partial "Local API Metrics" rune -0 cscli metrics -o json rune -0 jq 'keys' <(output) @@ -93,7 +93,7 @@ teardown() { assert_stderr --partial "unknown metrics type: foobar" rune -0 cscli metrics show lapi - assert_output --partial "Local API Metrics:" + assert_output --partial "Local API Metrics" assert_output --regexp "Route.*Method.*Hits" assert_output --regexp "/v1/watchers/login.*POST" diff --git a/test/bats/08_metrics_bouncer.bats b/test/bats/08_metrics_bouncer.bats index c4dfebbab1d..5fb2c543bda 100644 --- a/test/bats/08_metrics_bouncer.bats +++ b/test/bats/08_metrics_bouncer.bats @@ -136,7 +136,10 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +--------------------------+ + | Bouncer Metrics (testbou | + | ncer) since 2024-02-08 1 | + | 3:35:16 +0000 UTC | +--------+-----------------+ | Origin | foo | | | dogyear | pound | @@ -226,7 +229,8 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +-------------------------------------------------------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC | +----------------------------------+------------------+-------------------+-----------------+ | Origin | active_decisions | dropped | foo | | | IPs | bytes | packets | dogyear | pound | @@ -309,7 +313,8 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC: + +-------------------------------------------------------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02-08 13:35:16 +0000 UTC | +----------------------------------+------------------+-------------------+-----------------+ | Origin | active_decisions | dropped | foo | | | IPs | bytes | packets | dogyear | pound | @@ -365,7 +370,9 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +-----------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02-0 | + | 9 03:40:00 +0000 UTC | +--------------------------+--------+-----------+ | Origin | ima | notagauge | | | second | inch | @@ -417,7 +424,9 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (testbouncer) since 2024-02-09 03:40:00 +0000 UTC: + +---------------------------------------------+ + | Bouncer Metrics (testbouncer) since 2024-02 | + | -09 03:40:00 +0000 UTC | +--------------------------+------------------+ | Origin | active_decisions | | | IPs | @@ -502,7 +511,9 @@ teardown() { rune -0 cscli metrics show bouncers assert_output - <<-EOT - Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 UTC: + +--------------------------------------------------------------+ + | Bouncer Metrics (bouncer1) since 2024-02-08 13:35:16 +0000 U | + | TC | +----------------------------+---------+-----------------------+ | Origin | dropped | processed | | | bytes | bytes | packets | @@ -512,8 +523,9 @@ teardown() { +----------------------------+---------+-----------+-----------+ | Total | 1.80k | 12.34k | 100 | +----------------------------+---------+-----------+-----------+ - - Bouncer Metrics (bouncer2) since 2024-02-08 10:48:36 +0000 UTC: + +------------------------------------------------+ + | Bouncer Metrics (bouncer2) since 2024-02-08 10 | + | :48:36 +0000 UTC | +----------------------------+-------------------+ | Origin | dropped | | | bytes | packets | diff --git a/test/bats/10_bouncers.bats b/test/bats/10_bouncers.bats index b1c90116dd2..c9ee1b0cd0c 100644 --- a/test/bats/10_bouncers.bats +++ b/test/bats/10_bouncers.bats @@ -117,12 +117,9 @@ teardown() { @test "we can't add the same bouncer twice" { rune -0 cscli bouncers add ciTestBouncer - rune -1 cscli bouncers add ciTestBouncer -o json + rune -1 cscli bouncers add ciTestBouncer - # XXX temporary hack to filter out unwanted log lines that may appear before - # log configuration (= not json) - rune -0 jq -c '[.level,.msg]' <(stderr | grep "^{") - assert_output '["fatal","unable to create bouncer: bouncer ciTestBouncer already exists"]' + assert_stderr 'Error: unable to create bouncer: bouncer ciTestBouncer already exists' rune -0 cscli bouncers list -o json rune -0 jq '. | length' <(output) diff --git a/test/bats/20_hub.bats b/test/bats/20_hub.bats index b8fa1e9efca..b03b58732fa 100644 --- a/test/bats/20_hub.bats +++ b/test/bats/20_hub.bats @@ -20,7 +20,6 @@ setup() { load "../lib/setup.sh" load "../lib/bats-file/load.bash" ./instance-data load - hub_strip_index } teardown() { @@ -76,15 +75,15 @@ teardown() { assert_stderr --partial "invalid hub item appsec-rules:crowdsecurity/vpatch-laravel-debug-mode: latest version missing from index" rune -1 cscli appsec-rules install crowdsecurity/vpatch-laravel-debug-mode --force - assert_stderr --partial "error while installing 'crowdsecurity/vpatch-laravel-debug-mode': latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again" + assert_stderr --partial "appsec-rules:crowdsecurity/vpatch-laravel-debug-mode: latest hash missing from index. The index file is invalid, please run 'cscli hub update' and try again" } @test "missing reference in hub index" { new_hub=$(jq <"$INDEX_PATH" 'del(.parsers."crowdsecurity/smb-logs") | del (.scenarios."crowdsecurity/mysql-bf")') echo "$new_hub" >"$INDEX_PATH" rune -0 cscli hub list --error - assert_stderr --partial "can't find crowdsecurity/smb-logs in parsers, required by crowdsecurity/smb" - assert_stderr --partial "can't find crowdsecurity/mysql-bf in scenarios, required by crowdsecurity/mysql" + assert_stderr --partial "can't find parsers:crowdsecurity/smb-logs, required by crowdsecurity/smb" + assert_stderr --partial "can't find scenarios:crowdsecurity/mysql-bf, required by crowdsecurity/mysql" } @test "loading hub reports tainted items (subitem is tainted)" { @@ -108,47 +107,28 @@ teardown() { @test "cscli hub update" { rm -f "$INDEX_PATH" rune -0 cscli hub update - assert_stderr --partial "Wrote index to $INDEX_PATH" + assert_output "Downloading $INDEX_PATH" rune -0 cscli hub update - assert_stderr --partial "hub index is up to date" + assert_output "Nothing to do, the hub index is up to date." } -@test "cscli hub upgrade" { +@test "cscli hub upgrade (up to date)" { rune -0 cscli hub upgrade - assert_stderr --partial "Upgrading parsers" - assert_stderr --partial "Upgraded 0 parsers" - assert_stderr --partial "Upgrading postoverflows" - assert_stderr --partial "Upgraded 0 postoverflows" - assert_stderr --partial "Upgrading scenarios" - assert_stderr --partial "Upgraded 0 scenarios" - assert_stderr --partial "Upgrading contexts" - assert_stderr --partial "Upgraded 0 contexts" - assert_stderr --partial "Upgrading collections" - assert_stderr --partial "Upgraded 0 collections" - assert_stderr --partial "Upgrading appsec-configs" - assert_stderr --partial "Upgraded 0 appsec-configs" - assert_stderr --partial "Upgrading appsec-rules" - assert_stderr --partial "Upgraded 0 appsec-rules" - assert_stderr --partial "Upgrading collections" - assert_stderr --partial "Upgraded 0 collections" + refute_output rune -0 cscli parsers install crowdsecurity/syslog-logs - rune -0 cscli hub upgrade - assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date" - rune -0 cscli hub upgrade --force - assert_stderr --partial "crowdsecurity/syslog-logs: up-to-date" - assert_stderr --partial "crowdsecurity/syslog-logs: updated" - assert_stderr --partial "Upgraded 1 parsers" - # this is used by the cron script to know if the hub was updated - assert_output --partial "updated crowdsecurity/syslog-logs" + refute_output + skip "todo: data files are re-downloaded with --force" } @test "cscli hub upgrade (with local items)" { mkdir -p "$CONFIG_DIR/collections" touch "$CONFIG_DIR/collections/foo.yaml" rune -0 cscli hub upgrade - assert_stderr --partial "not upgrading foo.yaml: local item" + assert_output - <<-EOT + collections:foo.yaml - not downloading local item + EOT } @test "cscli hub types" { diff --git a/test/bats/20_hub_collections.bats b/test/bats/20_hub_collections.bats deleted file mode 100644 index 6822339ae40..00000000000 --- a/test/bats/20_hub_collections.bats +++ /dev/null @@ -1,381 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli collections list" { - hub_purge_all - - # no items - rune -0 cscli collections list - assert_output --partial "COLLECTIONS" - rune -0 cscli collections list -o json - assert_json '{collections:[]}' - rune -0 cscli collections list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - - rune -0 cscli collections list - assert_output --partial crowdsecurity/sshd - assert_output --partial crowdsecurity/smb - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli collections list -o json - assert_output --partial crowdsecurity/sshd - assert_output --partial crowdsecurity/smb - rune -0 jq '.collections | length' <(output) - assert_output "2" - - rune -0 cscli collections list -o raw - assert_output --partial crowdsecurity/sshd - assert_output --partial crowdsecurity/smb - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli collections list -a" { - expected=$(jq <"$INDEX_PATH" -r '.collections | length') - - rune -0 cscli collections list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli collections list -o json -a - rune -0 jq '.collections | length' <(output) - assert_output "$expected" - - rune -0 cscli collections list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli collections list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli collections list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli collections list -o json -a | jq -r '.collections[].name') - - rune -0 sort -f <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli collections list [collection]..." { - # non-existent - rune -1 cscli collections install foo/bar - assert_stderr --partial "can't find 'foo/bar' in collections" - - # not installed - rune -0 cscli collections list crowdsecurity/smb - assert_output --regexp 'crowdsecurity/smb.*disabled' - - # install two items - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - - # list an installed item - rune -0 cscli collections list crowdsecurity/sshd - assert_output --regexp "crowdsecurity/sshd" - refute_output --partial "crowdsecurity/smb" - - # list multiple installed and non installed items - rune -0 cscli collections list crowdsecurity/sshd crowdsecurity/smb crowdsecurity/nginx - assert_output --partial "crowdsecurity/sshd" - assert_output --partial "crowdsecurity/smb" - assert_output --partial "crowdsecurity/nginx" - - rune -0 cscli collections list crowdsecurity/sshd -o json - rune -0 jq '.collections | length' <(output) - assert_output "1" - rune -0 cscli collections list crowdsecurity/sshd crowdsecurity/smb crowdsecurity/nginx -o json - rune -0 jq '.collections | length' <(output) - assert_output "3" - - rune -0 cscli collections list crowdsecurity/sshd -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli collections list crowdsecurity/sshd crowdsecurity/smb -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli collections install" { - rune -1 cscli collections install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli collections install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in collections" - - # simple install - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli collections inspect crowdsecurity/sshd --no-metrics - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli collections install crowdsecurity/ssshd - assert_stderr --partial "can't find 'crowdsecurity/ssshd' in collections, did you mean 'crowdsecurity/sshd'?" - - # install multiple - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - rune -0 cscli collections inspect crowdsecurity/sshd --no-metrics - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'installed: true' - rune -0 cscli collections inspect crowdsecurity/smb --no-metrics - assert_output --partial 'crowdsecurity/smb' - assert_output --partial 'installed: true' -} - -@test "cscli collections install (file location and download-only)" { - rune -0 cscli collections install crowdsecurity/linux --download-only - rune -0 cscli collections inspect crowdsecurity/linux --no-metrics - assert_output --partial 'crowdsecurity/linux' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/collections/crowdsecurity/linux.yaml" - assert_file_not_exists "$CONFIG_DIR/collections/linux.yaml" - - rune -0 cscli collections install crowdsecurity/linux - rune -0 cscli collections inspect crowdsecurity/linux --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/collections/linux.yaml" -} - -@test "cscli collections install --force (tainted)" { - rune -0 cscli collections install crowdsecurity/sshd - echo "dirty" >"$CONFIG_DIR/collections/sshd.yaml" - - rune -1 cscli collections install crowdsecurity/sshd - assert_stderr --partial "error while installing 'crowdsecurity/sshd': while enabling crowdsecurity/sshd: crowdsecurity/sshd is tainted, won't overwrite unless --force" - - rune -0 cscli collections install crowdsecurity/sshd --force - assert_stderr --partial "Enabled crowdsecurity/sshd" -} - -@test "cscli collections install --ignore (skip on errors)" { - rune -1 cscli collections install foo/bar crowdsecurity/sshd - assert_stderr --partial "can't find 'foo/bar' in collections" - refute_stderr --partial "Enabled collections: crowdsecurity/sshd" - - rune -0 cscli collections install foo/bar crowdsecurity/sshd --ignore - assert_stderr --partial "can't find 'foo/bar' in collections" - assert_stderr --partial "Enabled collections: crowdsecurity/sshd" -} - -@test "cscli collections inspect" { - rune -1 cscli collections inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli collections inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in collections" - - # one item - rune -0 cscli collections inspect crowdsecurity/sshd --no-metrics - assert_line 'type: collections' - assert_line 'name: crowdsecurity/sshd' - assert_line 'author: crowdsecurity' - assert_line 'path: collections/crowdsecurity/sshd.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli collections inspect crowdsecurity/sshd - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -c '[.type, .name, .author, .path, .installed]' <(output) - assert_json '["collections","crowdsecurity/sshd","crowdsecurity","collections/crowdsecurity/sshd.yaml",false]' - - # one item, raw - rune -0 cscli collections inspect crowdsecurity/sshd -o raw - assert_line 'type: collections' - assert_line 'name: crowdsecurity/sshd' - assert_line 'author: crowdsecurity' - assert_line 'path: collections/crowdsecurity/sshd.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb --no-metrics - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'crowdsecurity/smb' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb -o json - rune -0 jq -sc '[.[] | [.type, .name, .author, .path, .installed]]' <(output) - assert_json '[["collections","crowdsecurity/sshd","crowdsecurity","collections/crowdsecurity/sshd.yaml",false],["collections","crowdsecurity/smb","crowdsecurity","collections/crowdsecurity/smb.yaml",false]]' - - # multiple items, raw - rune -0 cscli collections inspect crowdsecurity/sshd crowdsecurity/smb -o raw - assert_output --partial 'crowdsecurity/sshd' - assert_output --partial 'crowdsecurity/smb' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli collections remove" { - rune -1 cscli collections remove - assert_stderr --partial "specify at least one collection to remove or '--all'" - rune -1 cscli collections remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in collections" - - rune -0 cscli collections install crowdsecurity/sshd --download-only - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial 'removing crowdsecurity/sshd: not installed -- no need to remove' - - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial 'Removed crowdsecurity/sshd' - - rune -0 cscli collections remove crowdsecurity/sshd --purge - assert_stderr --partial 'Removed source file [crowdsecurity/sshd]' - - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial 'removing crowdsecurity/sshd: not installed -- no need to remove' - - rune -0 cscli collections remove crowdsecurity/sshd --purge --debug - assert_stderr --partial 'removing crowdsecurity/sshd: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/sshd]' - - # install, then remove, check files - rune -0 cscli collections install crowdsecurity/sshd - assert_file_exists "$CONFIG_DIR/collections/sshd.yaml" - rune -0 cscli collections remove crowdsecurity/sshd - assert_file_not_exists "$CONFIG_DIR/collections/sshd.yaml" - - # delete is an alias for remove - rune -0 cscli collections install crowdsecurity/sshd - assert_file_exists "$CONFIG_DIR/collections/sshd.yaml" - rune -0 cscli collections delete crowdsecurity/sshd - assert_file_not_exists "$CONFIG_DIR/collections/sshd.yaml" - - # purge - assert_file_exists "$HUB_DIR/collections/crowdsecurity/sshd.yaml" - rune -0 cscli collections remove crowdsecurity/sshd --purge - assert_file_not_exists "$HUB_DIR/collections/crowdsecurity/sshd.yaml" - - rune -0 cscli collections install crowdsecurity/sshd crowdsecurity/smb - - # --all - rune -0 cscli collections list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli collections remove --all - - rune -0 cscli collections list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli collections remove --force" { - # remove a collections that belongs to a collection - rune -0 cscli collections install crowdsecurity/linux - rune -0 cscli collections remove crowdsecurity/sshd - assert_stderr --partial "crowdsecurity/sshd belongs to collections: [crowdsecurity/linux]" - assert_stderr --partial "Run 'sudo cscli collections remove crowdsecurity/sshd --force' if you want to force remove this collection" -} - -@test "cscli collections upgrade" { - rune -1 cscli collections upgrade - assert_stderr --partial "specify at least one collection to upgrade or '--all'" - rune -1 cscli collections upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in collections" - rune -0 cscli collections remove crowdsecurity/exim --purge - rune -1 cscli collections upgrade crowdsecurity/exim - assert_stderr --partial "can't upgrade crowdsecurity/exim: not installed" - rune -0 cscli collections install crowdsecurity/exim --download-only - rune -1 cscli collections upgrade crowdsecurity/exim - assert_stderr --partial "can't upgrade crowdsecurity/exim: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all collections - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.collections |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli collections install crowdsecurity/sshd - - echo "v0.0" > "$CONFIG_DIR/collections/sshd.yaml" - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli collections upgrade crowdsecurity/sshd - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/collections/sshd.yaml" - # XXX: should return error - rune -0 cscli collections upgrade crowdsecurity/sshd - assert_stderr --partial "crowdsecurity/sshd is tainted, --force to overwrite" - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli collections upgrade crowdsecurity/sshd --force - rune -0 cscli collections inspect crowdsecurity/sshd -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli collections install crowdsecurity/smb - echo "v0.0" >"$CONFIG_DIR/collections/sshd.yaml" - echo "v0.0" >"$CONFIG_DIR/collections/smb.yaml" - rune -0 cscli collections list -o json - rune -0 jq -e '[.collections[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli collections upgrade crowdsecurity/sshd crowdsecurity/smb - rune -0 cscli collections list -o json - rune -0 jq -e 'any(.collections[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/collections/sshd.yaml" - echo "v0.0" >"$CONFIG_DIR/collections/smb.yaml" - rune -0 cscli collections list -o json - rune -0 jq -e '[.collections[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli collections upgrade --all - rune -0 cscli collections list -o json - rune -0 jq -e 'any(.collections[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/20_hub_collections_dep.bats b/test/bats/20_hub_collections_dep.bats index 673b812dc0d..94a984709a8 100644 --- a/test/bats/20_hub_collections_dep.bats +++ b/test/bats/20_hub_collections_dep.bats @@ -20,7 +20,6 @@ setup() { load "../lib/setup.sh" load "../lib/bats-file/load.bash" ./instance-data load - hub_strip_index } teardown() { @@ -84,18 +83,32 @@ teardown() { assert_stderr --partial "crowdsecurity/smb is tainted, use '--force' to remove" } +@test "cscli collections inspect (dependencies)" { + rune -0 cscli collections install crowdsecurity/smb + + # The inspect command must show the dependencies of the local or older version. + echo "{'collections': ['crowdsecurity/sshd']}" >"$CONFIG_DIR/collections/smb.yaml" + + rune -0 cscli collections inspect crowdsecurity/smb --no-metrics -o json + rune -0 jq -e '.collections' <(output) + assert_json '["crowdsecurity/sshd"]' +} + @test "cscli collections (dependencies II: the revenge)" { rune -0 cscli collections install crowdsecurity/wireguard baudneo/gotify rune -0 cscli collections remove crowdsecurity/wireguard - assert_stderr --partial "crowdsecurity/syslog-logs was not removed because it also belongs to baudneo/gotify" + assert_output --regexp 'disabling collections:crowdsecurity/wireguard' + refute_output --regexp 'disabling parsers:crowdsecurity/syslog-logs' rune -0 cscli collections inspect crowdsecurity/wireguard -o json rune -0 jq -e '.installed==false' <(output) + rune -0 cscli parsers inspect crowdsecurity/syslog-logs -o json + rune -0 jq -e '.installed==true' <(output) } @test "cscli collections (dependencies III: origins)" { # it is perfectly fine to remove an item belonging to a collection that we are removing anyway - # inject a dependency: sshd requires the syslog-logs parsers, but linux does too + # inject a direct dependency: sshd requires the syslog-logs parsers, but linux does too hub_dep=$(jq <"$INDEX_PATH" '. * {collections:{"crowdsecurity/sshd":{parsers:["crowdsecurity/syslog-logs"]}}}') echo "$hub_dep" >"$INDEX_PATH" @@ -108,11 +121,8 @@ teardown() { # removing linux should remove syslog-logs even though sshd depends on it rune -0 cscli collections remove crowdsecurity/linux - refute_stderr --partial "crowdsecurity/syslog-logs was not removed" - # we must also consider indirect dependencies - refute_stderr --partial "crowdsecurity/ssh-bf was not removed" - rune -0 cscli parsers list -o json - rune -0 jq -e '.parsers | length == 0' <(output) + rune -0 cscli hub list -o json + rune -0 jq -e 'add | length == 0' <(output) } @test "cscli collections (dependencies IV: looper)" { diff --git a/test/bats/20_hub_items.bats b/test/bats/20_hub_items.bats index 4b390c90ed4..4ddaf387488 100644 --- a/test/bats/20_hub_items.bats +++ b/test/bats/20_hub_items.bats @@ -22,7 +22,6 @@ setup() { load "../lib/setup.sh" load "../lib/bats-file/load.bash" ./instance-data load - hub_strip_index } teardown() { @@ -80,10 +79,9 @@ teardown() { echo "$new_hub" >"$INDEX_PATH" rune -0 cscli collections install crowdsecurity/sshd - rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics -o json - # XXX: we are on the verbose side here... - rune -0 jq -r ".msg" <(stderr) - assert_output --regexp "failed to read Hub index: failed to sync hub items: failed to scan .*: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version. Run 'sudo cscli hub update' to download the index again" + rune -1 cscli collections inspect crowdsecurity/sshd --no-metrics + # XXX: it would be better to trigger this during parse, not sync + assert_stderr "Error: failed to sync $HUB_DIR: while syncing collections sshd.yaml: 1.2.3.4: Invalid Semantic Version" } @test "removing or purging an item already removed by hand" { @@ -92,20 +90,15 @@ teardown() { rune -0 jq -r '.local_path' <(output) rune -0 rm "$(output)" - rune -0 cscli parsers remove crowdsecurity/syslog-logs --debug - assert_stderr --partial "removing crowdsecurity/syslog-logs: not installed -- no need to remove" + rune -0 cscli parsers remove crowdsecurity/syslog-logs + assert_output "Nothing to do." rune -0 cscli parsers inspect crowdsecurity/syslog-logs -o json rune -0 jq -r '.path' <(output) rune -0 rm "$HUB_DIR/$(output)" - rune -0 cscli parsers remove crowdsecurity/syslog-logs --purge --debug - assert_stderr --partial "removing crowdsecurity/syslog-logs: not downloaded -- no need to remove" - - rune -0 cscli parsers remove crowdsecurity/linux --all --error --purge --force - rune -0 cscli collections remove crowdsecurity/linux --all --error --purge --force - refute_output - refute_stderr + rune -0 cscli parsers remove crowdsecurity/syslog-logs --purge + assert_output "Nothing to do." } @test "a local item is not tainted" { @@ -122,7 +115,7 @@ teardown() { # and not from hub update rune -0 cscli hub update - assert_stderr --partial "collection crowdsecurity/sshd is tainted" + assert_stderr --partial "collection crowdsecurity/sshd is tainted by local changes" refute_stderr --partial "collection foobar.yaml is tainted" } @@ -151,25 +144,42 @@ teardown() { @test "a local item cannot be downloaded by cscli" { rune -0 mkdir -p "$CONFIG_DIR/collections" rune -0 touch "$CONFIG_DIR/collections/foobar.yaml" - rune -1 cscli collections install foobar.yaml - assert_stderr --partial "foobar.yaml is local, can't download" - rune -1 cscli collections install foobar.yaml --force - assert_stderr --partial "foobar.yaml is local, can't download" + rune -0 cscli collections install foobar.yaml + assert_output --partial "Nothing to do." + rune -0 cscli collections install foobar.yaml --force + assert_output --partial "Nothing to do." + rune -0 cscli collections install --download-only foobar.yaml + assert_output --partial "Nothing to do." } @test "a local item cannot be removed by cscli" { - rune -0 mkdir -p "$CONFIG_DIR/collections" - rune -0 touch "$CONFIG_DIR/collections/foobar.yaml" - rune -0 cscli collections remove foobar.yaml - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove foobar.yaml --purge - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove foobar.yaml --force - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove --all - assert_stderr --partial "foobar.yaml is a local item, please delete manually" - rune -0 cscli collections remove --all --purge - assert_stderr --partial "foobar.yaml is a local item, please delete manually" + rune -0 mkdir -p "$CONFIG_DIR/scenarios" + rune -0 touch "$CONFIG_DIR/scenarios/foobar.yaml" + rune -0 cscli scenarios remove foobar.yaml + assert_output - <<-EOT + WARN scenarios:foobar.yaml is a local item, please delete manually + Nothing to do. + EOT + rune -0 cscli scenarios remove foobar.yaml --purge + assert_output - <<-EOT + WARN scenarios:foobar.yaml is a local item, please delete manually + Nothing to do. + EOT + rune -0 cscli scenarios remove foobar.yaml --force + assert_output - <<-EOT + WARN scenarios:foobar.yaml is a local item, please delete manually + Nothing to do. + EOT + + rune -0 cscli scenarios install crowdsecurity/ssh-bf + + rune -0 cscli scenarios remove --all + assert_line "WARN scenarios:foobar.yaml is a local item, please delete manually" + assert_line "disabling scenarios:crowdsecurity/ssh-bf" + + rune -0 cscli scenarios remove --all --purge + assert_line "WARN scenarios:foobar.yaml is a local item, please delete manually" + assert_line "purging scenarios:crowdsecurity/ssh-bf" } @test "a dangling link is reported with a warning" { @@ -182,6 +192,16 @@ teardown() { assert_json '[]' } +@test "replacing a symlink with a regular file makes a local item" { + rune -0 cscli parsers install crowdsecurity/caddy-logs + rune -0 rm "$CONFIG_DIR/parsers/s01-parse/caddy-logs.yaml" + rune -0 cp "$HUB_DIR/parsers/s01-parse/crowdsecurity/caddy-logs.yaml" "$CONFIG_DIR/parsers/s01-parse/caddy-logs.yaml" + rune -0 cscli hub list + rune -0 cscli parsers inspect crowdsecurity/caddy-logs -o json + rune -0 jq -e '[.tainted,.local,.local_version==false,true,"?"]' <(output) + refute_stderr +} + @test "tainted hub file, not enabled, install --force should repair" { rune -0 cscli scenarios install crowdsecurity/ssh-bf rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json diff --git a/test/bats/20_hub_parsers.bats b/test/bats/20_hub_parsers.bats deleted file mode 100644 index 791b1a2177f..00000000000 --- a/test/bats/20_hub_parsers.bats +++ /dev/null @@ -1,383 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli parsers list" { - hub_purge_all - - # no items - rune -0 cscli parsers list - assert_output --partial "PARSERS" - rune -0 cscli parsers list -o json - assert_json '{parsers:[]}' - rune -0 cscli parsers list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth - - rune -0 cscli parsers list - assert_output --partial crowdsecurity/whitelists - assert_output --partial crowdsecurity/windows-auth - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli parsers list -o json - assert_output --partial crowdsecurity/whitelists - assert_output --partial crowdsecurity/windows-auth - rune -0 jq '.parsers | length' <(output) - assert_output "2" - - rune -0 cscli parsers list -o raw - assert_output --partial crowdsecurity/whitelists - assert_output --partial crowdsecurity/windows-auth - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli parsers list -a" { - expected=$(jq <"$INDEX_PATH" -r '.parsers | length') - - rune -0 cscli parsers list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli parsers list -o json -a - rune -0 jq '.parsers | length' <(output) - assert_output "$expected" - - rune -0 cscli parsers list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli parsers list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli parsers list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli parsers list -o json -a | jq -r '.parsers[].name') - - rune -0 sort -f <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli parsers list [parser]..." { - # non-existent - rune -1 cscli parsers install foo/bar - assert_stderr --partial "can't find 'foo/bar' in parsers" - - # not installed - rune -0 cscli parsers list crowdsecurity/whitelists - assert_output --regexp 'crowdsecurity/whitelists.*disabled' - - # install two items - rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth - - # list an installed item - rune -0 cscli parsers list crowdsecurity/whitelists - assert_output --regexp "crowdsecurity/whitelists.*enabled" - refute_output --partial "crowdsecurity/windows-auth" - - # list multiple installed and non installed items - rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs - assert_output --partial "crowdsecurity/whitelists" - assert_output --partial "crowdsecurity/windows-auth" - assert_output --partial "crowdsecurity/traefik-logs" - - rune -0 cscli parsers list crowdsecurity/whitelists -o json - rune -0 jq '.parsers | length' <(output) - assert_output "1" - rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o json - rune -0 jq '.parsers | length' <(output) - assert_output "3" - - rune -0 cscli parsers list crowdsecurity/whitelists -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "3" -} - -@test "cscli parsers install" { - rune -1 cscli parsers install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli parsers install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in parsers" - - # simple install - rune -0 cscli parsers install crowdsecurity/whitelists - rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics - assert_output --partial 'crowdsecurity/whitelists' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli parsers install crowdsecurity/sshd-logz - assert_stderr --partial "can't find 'crowdsecurity/sshd-logz' in parsers, did you mean 'crowdsecurity/sshd-logs'?" - - # install multiple - rune -0 cscli parsers install crowdsecurity/pgsql-logs crowdsecurity/postfix-logs - rune -0 cscli parsers inspect crowdsecurity/pgsql-logs --no-metrics - assert_output --partial 'crowdsecurity/pgsql-logs' - assert_output --partial 'installed: true' - rune -0 cscli parsers inspect crowdsecurity/postfix-logs --no-metrics - assert_output --partial 'crowdsecurity/postfix-logs' - assert_output --partial 'installed: true' -} - -@test "cscli parsers install (file location and download-only)" { - rune -0 cscli parsers install crowdsecurity/whitelists --download-only - rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics - assert_output --partial 'crowdsecurity/whitelists' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" - assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - rune -0 cscli parsers install crowdsecurity/whitelists - rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" -} - -@test "cscli parsers install --force (tainted)" { - rune -0 cscli parsers install crowdsecurity/whitelists - echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - rune -1 cscli parsers install crowdsecurity/whitelists - assert_stderr --partial "error while installing 'crowdsecurity/whitelists': while enabling crowdsecurity/whitelists: crowdsecurity/whitelists is tainted, won't overwrite unless --force" - - rune -0 cscli parsers install crowdsecurity/whitelists --force - assert_stderr --partial "Enabled crowdsecurity/whitelists" -} - -@test "cscli parsers install --ignore (skip on errors)" { - rune -1 cscli parsers install foo/bar crowdsecurity/whitelists - assert_stderr --partial "can't find 'foo/bar' in parsers" - refute_stderr --partial "Enabled parsers: crowdsecurity/whitelists" - - rune -0 cscli parsers install foo/bar crowdsecurity/whitelists --ignore - assert_stderr --partial "can't find 'foo/bar' in parsers" - assert_stderr --partial "Enabled parsers: crowdsecurity/whitelists" -} - -@test "cscli parsers inspect" { - rune -1 cscli parsers inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli parsers inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" - - # one item - rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics - assert_line 'type: parsers' - assert_line 'stage: s01-parse' - assert_line 'name: crowdsecurity/sshd-logs' - assert_line 'author: crowdsecurity' - assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli parsers inspect crowdsecurity/sshd-logs - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json - rune -0 jq -c '[.type, .stage, .name, .author, .path, .installed]' <(output) - assert_json '["parsers","s01-parse","crowdsecurity/sshd-logs","crowdsecurity","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false]' - - # one item, raw - rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o raw - assert_line 'type: parsers' - assert_line 'name: crowdsecurity/sshd-logs' - assert_line 'stage: s01-parse' - assert_line 'author: crowdsecurity' - assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists --no-metrics - assert_output --partial 'crowdsecurity/sshd-logs' - assert_output --partial 'crowdsecurity/whitelists' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o json - rune -0 jq -sc '[.[] | [.type, .stage, .name, .author, .path, .installed]]' <(output) - assert_json '[["parsers","s01-parse","crowdsecurity/sshd-logs","crowdsecurity","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false],["parsers","s02-enrich","crowdsecurity/whitelists","crowdsecurity","parsers/s02-enrich/crowdsecurity/whitelists.yaml",false]]' - - # multiple items, raw - rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o raw - assert_output --partial 'crowdsecurity/sshd-logs' - assert_output --partial 'crowdsecurity/whitelists' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli parsers remove" { - rune -1 cscli parsers remove - assert_stderr --partial "specify at least one parser to remove or '--all'" - rune -1 cscli parsers remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" - - rune -0 cscli parsers install crowdsecurity/whitelists --download-only - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_stderr --partial "removing crowdsecurity/whitelists: not installed -- no need to remove" - - rune -0 cscli parsers install crowdsecurity/whitelists - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_stderr --partial "Removed crowdsecurity/whitelists" - - rune -0 cscli parsers remove crowdsecurity/whitelists --purge - assert_stderr --partial 'Removed source file [crowdsecurity/whitelists]' - - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_stderr --partial "removing crowdsecurity/whitelists: not installed -- no need to remove" - - rune -0 cscli parsers remove crowdsecurity/whitelists --purge --debug - assert_stderr --partial 'removing crowdsecurity/whitelists: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/whitelists]' - - # install, then remove, check files - rune -0 cscli parsers install crowdsecurity/whitelists - assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - rune -0 cscli parsers remove crowdsecurity/whitelists - assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - # delete is an alias for remove - rune -0 cscli parsers install crowdsecurity/whitelists - assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - rune -0 cscli parsers delete crowdsecurity/whitelists - assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - - # purge - assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" - rune -0 cscli parsers remove crowdsecurity/whitelists --purge - assert_file_not_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" - - rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth - - # --all - rune -0 cscli parsers list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli parsers remove --all - - rune -0 cscli parsers list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli parsers remove --force" { - # remove a parser that belongs to a collection - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli parsers remove crowdsecurity/sshd-logs - assert_stderr --partial "crowdsecurity/sshd-logs belongs to collections: [crowdsecurity/sshd]" - assert_stderr --partial "Run 'sudo cscli parsers remove crowdsecurity/sshd-logs --force' if you want to force remove this parser" -} - -@test "cscli parsers upgrade" { - rune -1 cscli parsers upgrade - assert_stderr --partial "specify at least one parser to upgrade or '--all'" - rune -1 cscli parsers upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" - rune -0 cscli parsers remove crowdsecurity/pam-logs --purge - rune -1 cscli parsers upgrade crowdsecurity/pam-logs - assert_stderr --partial "can't upgrade crowdsecurity/pam-logs: not installed" - rune -0 cscli parsers install crowdsecurity/pam-logs --download-only - rune -1 cscli parsers upgrade crowdsecurity/pam-logs - assert_stderr --partial "can't upgrade crowdsecurity/pam-logs: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all parsers - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.parsers |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli parsers install crowdsecurity/whitelists - - echo "v0.0" > "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli parsers upgrade crowdsecurity/whitelists - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - # XXX: should return error - rune -0 cscli parsers upgrade crowdsecurity/whitelists - assert_stderr --partial "crowdsecurity/whitelists is tainted, --force to overwrite" - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli parsers upgrade crowdsecurity/whitelists --force - rune -0 cscli parsers inspect crowdsecurity/whitelists -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli parsers install crowdsecurity/windows-auth - echo "v0.0" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - echo "v0.0" >"$CONFIG_DIR/parsers/s01-parse/windows-auth.yaml" - rune -0 cscli parsers list -o json - rune -0 jq -e '[.parsers[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli parsers upgrade crowdsecurity/whitelists crowdsecurity/windows-auth - rune -0 cscli parsers list -o json - rune -0 jq -e 'any(.parsers[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" - echo "v0.0" >"$CONFIG_DIR/parsers/s01-parse/windows-auth.yaml" - rune -0 cscli parsers list -o json - rune -0 jq -e '[.parsers[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli parsers upgrade --all - rune -0 cscli parsers list -o json - rune -0 jq -e 'any(.parsers[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/20_hub_postoverflows.bats b/test/bats/20_hub_postoverflows.bats deleted file mode 100644 index 37337b08caa..00000000000 --- a/test/bats/20_hub_postoverflows.bats +++ /dev/null @@ -1,383 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli postoverflows list" { - hub_purge_all - - # no items - rune -0 cscli postoverflows list - assert_output --partial "POSTOVERFLOWS" - rune -0 cscli postoverflows list -o json - assert_json '{postoverflows:[]}' - rune -0 cscli postoverflows list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - - rune -0 cscli postoverflows list - assert_output --partial crowdsecurity/rdns - assert_output --partial crowdsecurity/cdn-whitelist - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli postoverflows list -o json - assert_output --partial crowdsecurity/rdns - assert_output --partial crowdsecurity/cdn-whitelist - rune -0 jq '.postoverflows | length' <(output) - assert_output "2" - - rune -0 cscli postoverflows list -o raw - assert_output --partial crowdsecurity/rdns - assert_output --partial crowdsecurity/cdn-whitelist - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli postoverflows list -a" { - expected=$(jq <"$INDEX_PATH" -r '.postoverflows | length') - - rune -0 cscli postoverflows list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli postoverflows list -o json -a - rune -0 jq '.postoverflows | length' <(output) - assert_output "$expected" - - rune -0 cscli postoverflows list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli postoverflows list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli postoverflows list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli postoverflows list -o json -a | jq -r '.postoverflows[].name') - - rune -0 sort -f <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli postoverflows list [postoverflow]..." { - # non-existent - rune -1 cscli postoverflows install foo/bar - assert_stderr --partial "can't find 'foo/bar' in postoverflows" - - # not installed - rune -0 cscli postoverflows list crowdsecurity/rdns - assert_output --regexp 'crowdsecurity/rdns.*disabled' - - # install two items - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - - # list an installed item - rune -0 cscli postoverflows list crowdsecurity/rdns - assert_output --regexp "crowdsecurity/rdns.*enabled" - refute_output --partial "crowdsecurity/cdn-whitelist" - - # list multiple installed and non installed items - rune -0 cscli postoverflows list crowdsecurity/rdns crowdsecurity/cdn-whitelist crowdsecurity/ipv6_to_range - assert_output --partial "crowdsecurity/rdns" - assert_output --partial "crowdsecurity/cdn-whitelist" - assert_output --partial "crowdsecurity/ipv6_to_range" - - rune -0 cscli postoverflows list crowdsecurity/rdns -o json - rune -0 jq '.postoverflows | length' <(output) - assert_output "1" - rune -0 cscli postoverflows list crowdsecurity/rdns crowdsecurity/cdn-whitelist crowdsecurity/ipv6_to_range -o json - rune -0 jq '.postoverflows | length' <(output) - assert_output "3" - - rune -0 cscli postoverflows list crowdsecurity/rdns -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli postoverflows list crowdsecurity/rdns crowdsecurity/cdn-whitelist crowdsecurity/ipv6_to_range -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "3" -} - -@test "cscli postoverflows install" { - rune -1 cscli postoverflows install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli postoverflows install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in postoverflows" - - # simple install - rune -0 cscli postoverflows install crowdsecurity/rdns - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli postoverflows install crowdsecurity/rdnf - assert_stderr --partial "can't find 'crowdsecurity/rdnf' in postoverflows, did you mean 'crowdsecurity/rdns'?" - - # install multiple - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'installed: true' - rune -0 cscli postoverflows inspect crowdsecurity/cdn-whitelist --no-metrics - assert_output --partial 'crowdsecurity/cdn-whitelist' - assert_output --partial 'installed: true' -} - -@test "cscli postoverflows install (file location and download-only)" { - rune -0 cscli postoverflows install crowdsecurity/rdns --download-only - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/postoverflows/s00-enrich/crowdsecurity/rdns.yaml" - assert_file_not_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - rune -0 cscli postoverflows install crowdsecurity/rdns - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" -} - -@test "cscli postoverflows install --force (tainted)" { - rune -0 cscli postoverflows install crowdsecurity/rdns - echo "dirty" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - rune -1 cscli postoverflows install crowdsecurity/rdns - assert_stderr --partial "error while installing 'crowdsecurity/rdns': while enabling crowdsecurity/rdns: crowdsecurity/rdns is tainted, won't overwrite unless --force" - - rune -0 cscli postoverflows install crowdsecurity/rdns --force - assert_stderr --partial "Enabled crowdsecurity/rdns" -} - -@test "cscli postoverflow install --ignore (skip on errors)" { - rune -1 cscli postoverflows install foo/bar crowdsecurity/rdns - assert_stderr --partial "can't find 'foo/bar' in postoverflows" - refute_stderr --partial "Enabled postoverflows: crowdsecurity/rdns" - - rune -0 cscli postoverflows install foo/bar crowdsecurity/rdns --ignore - assert_stderr --partial "can't find 'foo/bar' in postoverflows" - assert_stderr --partial "Enabled postoverflows: crowdsecurity/rdns" -} - -@test "cscli postoverflows inspect" { - rune -1 cscli postoverflows inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli postoverflows inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in postoverflows" - - # one item - rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics - assert_line 'type: postoverflows' - assert_line 'stage: s00-enrich' - assert_line 'name: crowdsecurity/rdns' - assert_line 'author: crowdsecurity' - assert_line 'path: postoverflows/s00-enrich/crowdsecurity/rdns.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli postoverflows inspect crowdsecurity/rdns - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -c '[.type, .stage, .name, .author, .path, .installed]' <(output) - assert_json '["postoverflows","s00-enrich","crowdsecurity/rdns","crowdsecurity","postoverflows/s00-enrich/crowdsecurity/rdns.yaml",false]' - - # one item, raw - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o raw - assert_line 'type: postoverflows' - assert_line 'name: crowdsecurity/rdns' - assert_line 'stage: s00-enrich' - assert_line 'author: crowdsecurity' - assert_line 'path: postoverflows/s00-enrich/crowdsecurity/rdns.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist --no-metrics - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'crowdsecurity/cdn-whitelist' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist -o json - rune -0 jq -sc '[.[] | [.type, .stage, .name, .author, .path, .installed]]' <(output) - assert_json '[["postoverflows","s00-enrich","crowdsecurity/rdns","crowdsecurity","postoverflows/s00-enrich/crowdsecurity/rdns.yaml",false],["postoverflows","s01-whitelist","crowdsecurity/cdn-whitelist","crowdsecurity","postoverflows/s01-whitelist/crowdsecurity/cdn-whitelist.yaml",false]]' - - # multiple items, raw - rune -0 cscli postoverflows inspect crowdsecurity/rdns crowdsecurity/cdn-whitelist -o raw - assert_output --partial 'crowdsecurity/rdns' - assert_output --partial 'crowdsecurity/cdn-whitelist' - run -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli postoverflows remove" { - rune -1 cscli postoverflows remove - assert_stderr --partial "specify at least one postoverflow to remove or '--all'" - rune -1 cscli postoverflows remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in postoverflows" - - rune -0 cscli postoverflows install crowdsecurity/rdns --download-only - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_stderr --partial "removing crowdsecurity/rdns: not installed -- no need to remove" - - rune -0 cscli postoverflows install crowdsecurity/rdns - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_stderr --partial 'Removed crowdsecurity/rdns' - - rune -0 cscli postoverflows remove crowdsecurity/rdns --purge - assert_stderr --partial 'Removed source file [crowdsecurity/rdns]' - - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_stderr --partial 'removing crowdsecurity/rdns: not installed -- no need to remove' - - rune -0 cscli postoverflows remove crowdsecurity/rdns --purge --debug - assert_stderr --partial 'removing crowdsecurity/rdns: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/rdns]' - - # install, then remove, check files - rune -0 cscli postoverflows install crowdsecurity/rdns - assert_file_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - rune -0 cscli postoverflows remove crowdsecurity/rdns - assert_file_not_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - # delete is an alias for remove - rune -0 cscli postoverflows install crowdsecurity/rdns - assert_file_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - rune -0 cscli postoverflows delete crowdsecurity/rdns - assert_file_not_exists "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - - # purge - assert_file_exists "$HUB_DIR/postoverflows/s00-enrich/crowdsecurity/rdns.yaml" - rune -0 cscli postoverflows remove crowdsecurity/rdns --purge - assert_file_not_exists "$HUB_DIR/postoverflows/s00-enrich/crowdsecurity/rdns.yaml" - - rune -0 cscli postoverflows install crowdsecurity/rdns crowdsecurity/cdn-whitelist - - # --all - rune -0 cscli postoverflows list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli postoverflows remove --all - - rune -0 cscli postoverflows list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli postoverflows remove --force" { - # remove a postoverflow that belongs to a collection - rune -0 cscli collections install crowdsecurity/auditd - rune -0 cscli postoverflows remove crowdsecurity/auditd-whitelisted-process - assert_stderr --partial "crowdsecurity/auditd-whitelisted-process belongs to collections: [crowdsecurity/auditd]" - assert_stderr --partial "Run 'sudo cscli postoverflows remove crowdsecurity/auditd-whitelisted-process --force' if you want to force remove this postoverflow" -} - -@test "cscli postoverflows upgrade" { - rune -1 cscli postoverflows upgrade - assert_stderr --partial "specify at least one postoverflow to upgrade or '--all'" - rune -1 cscli postoverflows upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in postoverflows" - rune -0 cscli postoverflows remove crowdsecurity/discord-crawler-whitelist --purge - rune -1 cscli postoverflows upgrade crowdsecurity/discord-crawler-whitelist - assert_stderr --partial "can't upgrade crowdsecurity/discord-crawler-whitelist: not installed" - rune -0 cscli postoverflows install crowdsecurity/discord-crawler-whitelist --download-only - rune -1 cscli postoverflows upgrade crowdsecurity/discord-crawler-whitelist - assert_stderr --partial "can't upgrade crowdsecurity/discord-crawler-whitelist: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all postoverflows - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.postoverflows |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli postoverflows install crowdsecurity/rdns - - echo "v0.0" > "$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli postoverflows upgrade crowdsecurity/rdns - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - # XXX: should return error - rune -0 cscli postoverflows upgrade crowdsecurity/rdns - assert_stderr --partial "crowdsecurity/rdns is tainted, --force to overwrite" - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli postoverflows upgrade crowdsecurity/rdns --force - rune -0 cscli postoverflows inspect crowdsecurity/rdns -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli postoverflows install crowdsecurity/cdn-whitelist - echo "v0.0" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - echo "v0.0" >"$CONFIG_DIR/postoverflows/s01-whitelist/cdn-whitelist.yaml" - rune -0 cscli postoverflows list -o json - rune -0 jq -e '[.postoverflows[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli postoverflows upgrade crowdsecurity/rdns crowdsecurity/cdn-whitelist - rune -0 cscli postoverflows list -o json - rune -0 jq -e 'any(.postoverflows[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/postoverflows/s00-enrich/rdns.yaml" - echo "v0.0" >"$CONFIG_DIR/postoverflows/s01-whitelist/cdn-whitelist.yaml" - rune -0 cscli postoverflows list -o json - rune -0 jq -e '[.postoverflows[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli postoverflows upgrade --all - rune -0 cscli postoverflows list -o json - rune -0 jq -e 'any(.postoverflows[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/20_hub_scenarios.bats b/test/bats/20_hub_scenarios.bats deleted file mode 100644 index 3ab3d944c93..00000000000 --- a/test/bats/20_hub_scenarios.bats +++ /dev/null @@ -1,382 +0,0 @@ -#!/usr/bin/env bats -# vim: ft=bats:list:ts=8:sts=4:sw=4:et:ai:si: - -set -u - -setup_file() { - load "../lib/setup_file.sh" - ./instance-data load - HUB_DIR=$(config_get '.config_paths.hub_dir') - export HUB_DIR - INDEX_PATH=$(config_get '.config_paths.index_path') - export INDEX_PATH - CONFIG_DIR=$(config_get '.config_paths.config_dir') - export CONFIG_DIR -} - -teardown_file() { - load "../lib/teardown_file.sh" -} - -setup() { - load "../lib/setup.sh" - load "../lib/bats-file/load.bash" - ./instance-data load - hub_strip_index -} - -teardown() { - ./instance-crowdsec stop -} - -#---------- - -@test "cscli scenarios list" { - hub_purge_all - - # no items - rune -0 cscli scenarios list - assert_output --partial "SCENARIOS" - rune -0 cscli scenarios list -o json - assert_json '{scenarios:[]}' - rune -0 cscli scenarios list -o raw - assert_output 'name,status,version,description' - - # some items - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - - rune -0 cscli scenarios list - assert_output --partial crowdsecurity/ssh-bf - assert_output --partial crowdsecurity/telnet-bf - rune -0 grep -c enabled <(output) - assert_output "2" - - rune -0 cscli scenarios list -o json - assert_output --partial crowdsecurity/ssh-bf - assert_output --partial crowdsecurity/telnet-bf - rune -0 jq '.scenarios | length' <(output) - assert_output "2" - - rune -0 cscli scenarios list -o raw - assert_output --partial crowdsecurity/ssh-bf - assert_output --partial crowdsecurity/telnet-bf - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" -} - -@test "cscli scenarios list -a" { - expected=$(jq <"$INDEX_PATH" -r '.scenarios | length') - - rune -0 cscli scenarios list -a - rune -0 grep -c disabled <(output) - assert_output "$expected" - - rune -0 cscli scenarios list -o json -a - rune -0 jq '.scenarios | length' <(output) - assert_output "$expected" - - rune -0 cscli scenarios list -o raw -a - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "$expected" - - # the list should be the same in all formats, and sorted (not case sensitive) - - list_raw=$(cscli scenarios list -o raw -a | tail -n +2 | cut -d, -f1) - list_human=$(cscli scenarios list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) - list_json=$(cscli scenarios list -o json -a | jq -r '.scenarios[].name') - - rune -0 sort -f <<<"$list_raw" - assert_output "$list_raw" - - assert_equal "$list_raw" "$list_json" - assert_equal "$list_raw" "$list_human" -} - -@test "cscli scenarios list [scenario]..." { - # non-existent - rune -1 cscli scenario install foo/bar - assert_stderr --partial "can't find 'foo/bar' in scenarios" - - # not installed - rune -0 cscli scenarios list crowdsecurity/ssh-bf - assert_output --regexp 'crowdsecurity/ssh-bf.*disabled' - - # install two items - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - - # list an installed item - rune -0 cscli scenarios list crowdsecurity/ssh-bf - assert_output --regexp "crowdsecurity/ssh-bf.*enabled" - refute_output --partial "crowdsecurity/telnet-bf" - - # list multiple installed and non installed items - rune -0 cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/telnet-bf crowdsecurity/aws-bf crowdsecurity/aws-bf - assert_output --partial "crowdsecurity/ssh-bf" - assert_output --partial "crowdsecurity/telnet-bf" - assert_output --partial "crowdsecurity/aws-bf" - - rune -0 cscli scenarios list crowdsecurity/ssh-bf -o json - rune -0 jq '.scenarios | length' <(output) - assert_output "1" - rune -0 cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/telnet-bf crowdsecurity/aws-bf -o json - rune -0 jq '.scenarios | length' <(output) - assert_output "3" - - rune -0 cscli scenarios list crowdsecurity/ssh-bf -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "1" - rune -0 cscli scenarios list crowdsecurity/ssh-bf crowdsecurity/telnet-bf crowdsecurity/aws-bf -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "3" -} - -@test "cscli scenarios install" { - rune -1 cscli scenarios install - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - - # not in hub - rune -1 cscli scenarios install crowdsecurity/blahblah - assert_stderr --partial "can't find 'crowdsecurity/blahblah' in scenarios" - - # simple install - rune -0 cscli scenarios install crowdsecurity/ssh-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'installed: true' - - # autocorrect - rune -1 cscli scenarios install crowdsecurity/ssh-tf - assert_stderr --partial "can't find 'crowdsecurity/ssh-tf' in scenarios, did you mean 'crowdsecurity/ssh-bf'?" - - # install multiple - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'installed: true' - rune -0 cscli scenarios inspect crowdsecurity/telnet-bf --no-metrics - assert_output --partial 'crowdsecurity/telnet-bf' - assert_output --partial 'installed: true' -} - -@test "cscli scenarios install (file location and download-only)" { - # simple install - rune -0 cscli scenarios install crowdsecurity/ssh-bf --download-only - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'installed: false' - assert_file_exists "$HUB_DIR/scenarios/crowdsecurity/ssh-bf.yaml" - assert_file_not_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_output --partial 'installed: true' - assert_file_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" -} - -@test "cscli scenarios install --force (tainted)" { - rune -0 cscli scenarios install crowdsecurity/ssh-bf - echo "dirty" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - - rune -1 cscli scenarios install crowdsecurity/ssh-bf - assert_stderr --partial "error while installing 'crowdsecurity/ssh-bf': while enabling crowdsecurity/ssh-bf: crowdsecurity/ssh-bf is tainted, won't overwrite unless --force" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf --force - assert_stderr --partial "Enabled crowdsecurity/ssh-bf" -} - -@test "cscli scenarios install --ignore (skip on errors)" { - rune -1 cscli scenarios install foo/bar crowdsecurity/ssh-bf - assert_stderr --partial "can't find 'foo/bar' in scenarios" - refute_stderr --partial "Enabled scenarios: crowdsecurity/ssh-bf" - - rune -0 cscli scenarios install foo/bar crowdsecurity/ssh-bf --ignore - assert_stderr --partial "can't find 'foo/bar' in scenarios" - assert_stderr --partial "Enabled scenarios: crowdsecurity/ssh-bf" -} - -@test "cscli scenarios inspect" { - rune -1 cscli scenarios inspect - assert_stderr --partial 'requires at least 1 arg(s), only received 0' - # required for metrics - ./instance-crowdsec start - - rune -1 cscli scenarios inspect blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in scenarios" - - # one item - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf --no-metrics - assert_line 'type: scenarios' - assert_line 'name: crowdsecurity/ssh-bf' - assert_line 'author: crowdsecurity' - assert_line 'path: scenarios/crowdsecurity/ssh-bf.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # one item, with metrics - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf - assert_line --partial 'Current metrics:' - - # one item, json - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -c '[.type, .name, .author, .path, .installed]' <(output) - assert_json '["scenarios","crowdsecurity/ssh-bf","crowdsecurity","scenarios/crowdsecurity/ssh-bf.yaml",false]' - - # one item, raw - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o raw - assert_line 'type: scenarios' - assert_line 'name: crowdsecurity/ssh-bf' - assert_line 'author: crowdsecurity' - assert_line 'path: scenarios/crowdsecurity/ssh-bf.yaml' - assert_line 'installed: false' - refute_line --partial 'Current metrics:' - - # multiple items - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf --no-metrics - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'crowdsecurity/telnet-bf' - rune -1 grep -c 'Current metrics:' <(output) - assert_output "0" - - # multiple items, with metrics - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf - rune -0 grep -c 'Current metrics:' <(output) - assert_output "2" - - # multiple items, json - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf -o json - rune -0 jq -sc '[.[] | [.type, .name, .author, .path, .installed]]' <(output) - assert_json '[["scenarios","crowdsecurity/ssh-bf","crowdsecurity","scenarios/crowdsecurity/ssh-bf.yaml",false],["scenarios","crowdsecurity/telnet-bf","crowdsecurity","scenarios/crowdsecurity/telnet-bf.yaml",false]]' - - # multiple items, raw - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf crowdsecurity/telnet-bf -o raw - assert_output --partial 'crowdsecurity/ssh-bf' - assert_output --partial 'crowdsecurity/telnet-bf' - run -1 grep -c 'Current metrics:' <(output) - assert_output "0" -} - -@test "cscli scenarios remove" { - rune -1 cscli scenarios remove - assert_stderr --partial "specify at least one scenario to remove or '--all'" - rune -1 cscli scenarios remove blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in scenarios" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf --download-only - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "removing crowdsecurity/ssh-bf: not installed -- no need to remove" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "Removed crowdsecurity/ssh-bf" - - rune -0 cscli scenarios remove crowdsecurity/ssh-bf --purge - assert_stderr --partial 'Removed source file [crowdsecurity/ssh-bf]' - - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "removing crowdsecurity/ssh-bf: not installed -- no need to remove" - - rune -0 cscli scenarios remove crowdsecurity/ssh-bf --purge --debug - assert_stderr --partial 'removing crowdsecurity/ssh-bf: not downloaded -- no need to remove' - refute_stderr --partial 'Removed source file [crowdsecurity/ssh-bf]' - - # install, then remove, check files - rune -0 cscli scenarios install crowdsecurity/ssh-bf - assert_file_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_file_not_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - - # delete is an alias for remove - rune -0 cscli scenarios install crowdsecurity/ssh-bf - assert_file_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - rune -0 cscli scenarios delete crowdsecurity/ssh-bf - assert_file_not_exists "$CONFIG_DIR/scenarios/ssh-bf.yaml" - - # purge - assert_file_exists "$HUB_DIR/scenarios/crowdsecurity/ssh-bf.yaml" - rune -0 cscli scenarios remove crowdsecurity/ssh-bf --purge - assert_file_not_exists "$HUB_DIR/scenarios/crowdsecurity/ssh-bf.yaml" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf crowdsecurity/telnet-bf - - # --all - rune -0 cscli scenarios list -o raw - rune -0 grep -vc 'name,status,version,description' <(output) - assert_output "2" - - rune -0 cscli scenarios remove --all - - rune -0 cscli scenarios list -o raw - rune -1 grep -vc 'name,status,version,description' <(output) - assert_output "0" -} - -@test "cscli scenarios remove --force" { - # remove a scenario that belongs to a collection - rune -0 cscli collections install crowdsecurity/sshd - rune -0 cscli scenarios remove crowdsecurity/ssh-bf - assert_stderr --partial "crowdsecurity/ssh-bf belongs to collections: [crowdsecurity/sshd]" - assert_stderr --partial "Run 'sudo cscli scenarios remove crowdsecurity/ssh-bf --force' if you want to force remove this scenario" -} - -@test "cscli scenarios upgrade" { - rune -1 cscli scenarios upgrade - assert_stderr --partial "specify at least one scenario to upgrade or '--all'" - rune -1 cscli scenarios upgrade blahblah/blahblah - assert_stderr --partial "can't find 'blahblah/blahblah' in scenarios" - rune -0 cscli scenarios remove crowdsecurity/vsftpd-bf --purge - rune -1 cscli scenarios upgrade crowdsecurity/vsftpd-bf - assert_stderr --partial "can't upgrade crowdsecurity/vsftpd-bf: not installed" - rune -0 cscli scenarios install crowdsecurity/vsftpd-bf --download-only - rune -1 cscli scenarios upgrade crowdsecurity/vsftpd-bf - assert_stderr --partial "can't upgrade crowdsecurity/vsftpd-bf: downloaded but not installed" - - # hash of the string "v0.0" - sha256_0_0="dfebecf42784a31aa3d009dbcec0c657154a034b45f49cf22a895373f6dbf63d" - - # add version 0.0 to all scenarios - new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.scenarios |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') - echo "$new_hub" >"$INDEX_PATH" - - rune -0 cscli scenarios install crowdsecurity/ssh-bf - - echo "v0.0" > "$CONFIG_DIR/scenarios/ssh-bf.yaml" - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version=="0.0"' <(output) - - # upgrade - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version==.version' <(output) - - # taint - echo "dirty" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - # XXX: should return error - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf - assert_stderr --partial "crowdsecurity/ssh-bf is tainted, --force to overwrite" - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version=="?"' <(output) - - # force upgrade with taint - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf --force - rune -0 cscli scenarios inspect crowdsecurity/ssh-bf -o json - rune -0 jq -e '.local_version==.version' <(output) - - # multiple items - rune -0 cscli scenarios install crowdsecurity/telnet-bf - echo "v0.0" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - echo "v0.0" >"$CONFIG_DIR/scenarios/telnet-bf.yaml" - rune -0 cscli scenarios list -o json - rune -0 jq -e '[.scenarios[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli scenarios upgrade crowdsecurity/ssh-bf crowdsecurity/telnet-bf - rune -0 cscli scenarios list -o json - rune -0 jq -e 'any(.scenarios[].local_version; .=="0.0") | not' <(output) - - # upgrade all - echo "v0.0" >"$CONFIG_DIR/scenarios/ssh-bf.yaml" - echo "v0.0" >"$CONFIG_DIR/scenarios/telnet-bf.yaml" - rune -0 cscli scenarios list -o json - rune -0 jq -e '[.scenarios[].local_version]==["0.0","0.0"]' <(output) - rune -0 cscli scenarios upgrade --all - rune -0 cscli scenarios list -o json - rune -0 jq -e 'any(.scenarios[].local_version; .=="0.0") | not' <(output) -} diff --git a/test/bats/30_machines.bats b/test/bats/30_machines.bats index d4cce67d0b0..3d73bd096ae 100644 --- a/test/bats/30_machines.bats +++ b/test/bats/30_machines.bats @@ -30,9 +30,8 @@ teardown() { } @test "don't overwrite local credentials by default" { - rune -1 cscli machines add local -a -o json - rune -0 jq -r '.msg' <(stderr) - assert_output --partial 'already exists: please remove it, use "--force" or specify a different file with "-f"' + rune -1 cscli machines add local -a + assert_stderr --partial 'already exists: please remove it, use "--force" or specify a different file with "-f"' rune -0 cscli machines add local -a --force assert_stderr --partial "Machine 'local' successfully added to the local API." } diff --git a/test/bats/80_alerts.bats b/test/bats/80_alerts.bats index 6d84c1a1fce..f01e918925c 100644 --- a/test/bats/80_alerts.bats +++ b/test/bats/80_alerts.bats @@ -89,7 +89,7 @@ teardown() { assert_line --regexp "^ - AS *: *$" assert_line --regexp "^ - Begin *: .*$" assert_line --regexp "^ - End *: .*$" - assert_line --regexp "^ - Active Decisions *:$" + assert_line --regexp "^\| Active Decisions *\|$" assert_line --regexp "^.* ID .* scope:value .* action .* expiration .* created_at .*$" assert_line --regexp "^.* Ip:10.20.30.40 .* ban .*$" diff --git a/test/bats/90_decisions.bats b/test/bats/90_decisions.bats index 8601414db48..3c3ab9987ca 100644 --- a/test/bats/90_decisions.bats +++ b/test/bats/90_decisions.bats @@ -31,11 +31,7 @@ teardown() { @test "'decisions add' requires parameters" { rune -1 cscli decisions add - assert_stderr --partial "missing arguments, a value is required (--ip, --range or --scope and --value)" - - rune -1 cscli decisions add -o json - rune -0 jq -c '[ .level, .msg]' <(stderr | grep "^{") - assert_output '["fatal","missing arguments, a value is required (--ip, --range or --scope and --value)"]' + assert_stderr "Error: missing arguments, a value is required (--ip, --range or --scope and --value)" } @test "cscli decisions list, with and without --machine" { @@ -61,16 +57,13 @@ teardown() { @test "cscli decisions list, incorrect parameters" { rune -1 cscli decisions list --until toto - assert_stderr --partial 'unable to retrieve decisions: performing request: API error: while parsing duration: time: invalid duration \"toto\"' - rune -1 cscli decisions list --until toto -o json - rune -0 jq -c '[.level, .msg]' <(stderr | grep "^{") - assert_output '["fatal","unable to retrieve decisions: performing request: API error: while parsing duration: time: invalid duration \"toto\""]' + assert_stderr 'Error: unable to retrieve decisions: performing request: API error: while parsing duration: time: invalid duration "toto"' } @test "cscli decisions import" { # required input rune -1 cscli decisions import - assert_stderr --partial 'required flag(s) \"input\" not set"' + assert_stderr 'Error: required flag(s) "input" not set' # unsupported format rune -1 cscli decisions import -i - <<<'value\n5.6.7.8' --format xml @@ -172,7 +165,7 @@ teardown() { EOT assert_stderr --partial 'Parsing values' assert_stderr --partial 'Imported 1 decisions' - assert_file_contains "$LOGFILE" "invalid addr/range 'whatever': invalid address" + assert_file_contains "$LOGFILE" "invalid addr/range 'whatever': invalid ip address 'whatever'" rune -0 cscli decisions list -a -o json assert_json '[]' @@ -189,7 +182,7 @@ teardown() { EOT assert_stderr --partial 'Parsing values' assert_stderr --partial 'Imported 3 decisions' - assert_file_contains "$LOGFILE" "invalid addr/range 'bad-apple': invalid address" + assert_file_contains "$LOGFILE" "invalid addr/range 'bad-apple': invalid ip address 'bad-apple'" rune -0 cscli decisions list -a -o json rune -0 jq -r '.[0].decisions | length' <(output) diff --git a/test/bats/crowdsec-acquisition.bats b/test/bats/crowdsec-acquisition.bats new file mode 100644 index 00000000000..1a92624b4c4 --- /dev/null +++ b/test/bats/crowdsec-acquisition.bats @@ -0,0 +1,78 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + ACQUIS_DIR=$(config_get '.crowdsec_service.acquisition_dir') + mkdir -p "$ACQUIS_DIR" +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "malformed acqusition file" { + cat >"$ACQUIS_DIR/file.yaml" <<-EOT + filename: + - /path/to/file.log + labels: + type: syslog + EOT + + rune -1 "$CROWDSEC" -t + assert_stderr --partial "crowdsec init: while loading acquisition config: while configuring datasource of type file from $ACQUIS_DIR/file.yaml (position 0): cannot parse FileAcquisition configuration: yaml: unmarshal errors:" +} + +@test "datasource type detection" { + config_set '.common.log_level="debug" | .common.log_media="stdout"' + + # for backward compatibility, a missing source type is not a problem if it can be detected by the presence of other fields + + cat >"$ACQUIS_DIR/file.yaml" <<-EOT + filename: /path/to/file.log + labels: + type: syslog + --- + filenames: + - /path/to/file.log + labels: + type: syslog + EOT + + cat >"$ACQUIS_DIR"/journal.yaml <<-EOT + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + # However, a wrong source type will raise a brow. + # This is currently not a fatal error because it has been tolerated in the past. + + cat >"$ACQUIS_DIR"/bad.yaml <<-EOT + source: docker + journalctl_filter: + - "_SYSTEMD_UNIT=ssh.service" + labels: + type: syslog + EOT + + rune -0 "$CROWDSEC" -t + assert_stderr --partial "datasource type missing in $ACQUIS_DIR/file.yaml (position 0): detected 'source=file'" + assert_stderr --partial "datasource type missing in $ACQUIS_DIR/file.yaml (position 1): detected 'source=file'" + assert_stderr --partial "datasource type missing in $ACQUIS_DIR/journal.yaml (position 0): detected 'source=journalctl'" + assert_stderr --partial "datasource type mismatch in $ACQUIS_DIR/bad.yaml (position 0): found 'docker' but should probably be 'journalctl'" +} diff --git a/test/bats/cscli-hubtype-inspect.bats b/test/bats/cscli-hubtype-inspect.bats new file mode 100644 index 00000000000..9c96aadb3ad --- /dev/null +++ b/test/bats/cscli-hubtype-inspect.bats @@ -0,0 +1,93 @@ +#!/usr/bin/env bats + +# Generic tests for the command "cscli inspect". +# +# Behavior that is specific to a hubtype should be tested in a separate file. + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli parsers inspect" { + rune -1 cscli parsers inspect + assert_stderr --partial 'requires at least 1 arg(s), only received 0' + # required for metrics + ./instance-crowdsec start + + rune -1 cscli parsers inspect blahblah/blahblah + assert_stderr --partial "can't find 'blahblah/blahblah' in parsers" + + # one item + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics + assert_line 'type: parsers' + assert_line 'name: crowdsecurity/sshd-logs' + assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' + assert_line 'installed: false' + refute_line --partial 'Current metrics:' + + # one item, with metrics + rune -0 cscli parsers inspect crowdsecurity/sshd-logs + assert_line --partial 'Current metrics:' + + # one item, json + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -c '[.type, .name, .path, .installed]' <(output) + assert_json '["parsers","crowdsecurity/sshd-logs","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false]' + + # one item, raw + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o raw + assert_line 'type: parsers' + assert_line 'name: crowdsecurity/sshd-logs' + assert_line 'path: parsers/s01-parse/crowdsecurity/sshd-logs.yaml' + assert_line 'installed: false' + refute_line --partial 'Current metrics:' + + # multiple items + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists --no-metrics + assert_output --partial 'crowdsecurity/sshd-logs' + assert_output --partial 'crowdsecurity/whitelists' + rune -1 grep -c 'Current metrics:' <(output) + assert_output "0" + + # multiple items, with metrics + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists + rune -0 grep -c 'Current metrics:' <(output) + assert_output "2" + + # multiple items, json + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o json + rune -0 jq -sc '[.[] | [.type, .name, .path, .installed]]' <(output) + assert_json '[["parsers","crowdsecurity/sshd-logs","parsers/s01-parse/crowdsecurity/sshd-logs.yaml",false],["parsers","crowdsecurity/whitelists","parsers/s02-enrich/crowdsecurity/whitelists.yaml",false]]' + + # multiple items, raw + rune -0 cscli parsers inspect crowdsecurity/sshd-logs crowdsecurity/whitelists -o raw + assert_output --partial 'crowdsecurity/sshd-logs' + assert_output --partial 'crowdsecurity/whitelists' + rune -1 grep -c 'Current metrics:' <(output) + assert_output "0" +} diff --git a/test/bats/cscli-hubtype-install.bats b/test/bats/cscli-hubtype-install.bats new file mode 100644 index 00000000000..58c16dd968d --- /dev/null +++ b/test/bats/cscli-hubtype-install.bats @@ -0,0 +1,301 @@ +#!/usr/bin/env bats + +# Generic tests for the command "cscli install". +# +# Behavior that is specific to a hubtype should be tested in a separate file. + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR +# INDEX_PATH=$(config_get '.config_paths.index_path') +# export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # make sure the hub is empty + hub_purge_all +} + +teardown() { + # most tests don't need the service, but we ensure it's stopped + ./instance-crowdsec stop +} + +#---------- + +@test "cscli install (no argument)" { + rune -1 cscli parsers install + refute_output + assert_stderr --partial 'requires at least 1 arg(s), only received 0' +} + +@test "cscli install (aliased)" { + rune -1 cscli parser install + refute_output + assert_stderr --partial 'requires at least 1 arg(s), only received 0' +} + +@test "install an item (non-existent)" { + rune -1 cscli parsers install foo/bar + assert_stderr --partial "can't find 'foo/bar' in parsers" +} + +@test "install an item (dry run)" { + rune -0 cscli parsers install crowdsecurity/whitelists --dry-run + assert_output - --regexp <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists \([0-9]+.[0-9]+\) + ✅ enable + parsers: crowdsecurity/whitelists + + Dry run, no action taken. + EOT + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "install an item (dry-run, de-duplicate commands)" { + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/whitelists --dry-run --output raw + assert_output - --regexp <<-EOT + Action plan: + 📥 download parsers:crowdsecurity/whitelists \([0-9]+.[0-9]+\) + ✅ enable parsers:crowdsecurity/whitelists + + Dry run, no action taken. + EOT + refute_stderr +} + +@test "install an item" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + enabling parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "install an item (autocorrect)" { + rune -1 cscli parsers install crowdsecurity/whatelists + assert_stderr --partial "can't find 'crowdsecurity/whatelists' in parsers, did you mean 'crowdsecurity/whitelists'?" + refute_output +} + +@test "install an item (download only)" { + assert_file_not_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "install an item (already installed)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli parsers install crowdsecurity/whitelists --dry-run + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr +} + +@test "install an item (force is no-op if not tainted)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers install crowdsecurity/whitelists --force + assert_output "Nothing to do." + refute_stderr +} + +@test "install an item (tainted, requires --force)" { + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers install crowdsecurity/whitelists --dry-run + assert_output - --stderr <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + # XXX should this fail with status 1 instead? + rune -0 cscli parsers install crowdsecurity/whitelists + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + rune -0 cscli parsers install crowdsecurity/whitelists --force + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) +} + +@test "install multiple items" { + rune -0 cscli parsers install crowdsecurity/pgsql-logs crowdsecurity/postfix-logs + rune -0 cscli parsers inspect crowdsecurity/pgsql-logs --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) + rune -0 cscli parsers inspect crowdsecurity/postfix-logs --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) +} + +@test "install multiple items (some already installed)" { + rune -0 cscli parsers install crowdsecurity/pgsql-logs + rune -0 cscli parsers install crowdsecurity/pgsql-logs crowdsecurity/postfix-logs --dry-run + assert_output - --regexp <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/postfix-logs \([0-9]+.[0-9]+\) + ✅ enable + parsers: crowdsecurity/postfix-logs + + Dry run, no action taken. + EOT + refute_stderr +} + +@test "install one or multiple items (ignore errors)" { + rune -0 cscli parsers install foo/bar --ignore + assert_stderr --partial "can't find 'foo/bar' in parsers" + assert_output "Nothing to do." + + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + # XXX: this is not testing '--ignore' anymore; TODO find a better error to ignore + # and maybe re-evaluate the --ignore flag + rune -0 cscli parsers install crowdsecurity/whitelists --ignore + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + # error on one item, should still install the others + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/pgsql-logs --ignore + refute_stderr + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + downloading parsers:crowdsecurity/pgsql-logs + enabling parsers:crowdsecurity/pgsql-logs + + $RELOAD_MESSAGE + EOT + rune -0 cscli parsers inspect crowdsecurity/pgsql-logs --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) +} + +@test "override part of a collection with local items" { + # A collection will use a local item to fulfil a dependency provided it has + # the correct name field. + + mkdir -p "$CONFIG_DIR/parsers/s01-parse" + echo "name: crowdsecurity/sshd-logs" > "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" + rune -0 cscli parsers list -o json + rune -0 jq -c '.parsers[] | [.name,.status]' <(output) + assert_json '["crowdsecurity/sshd-logs","enabled,local"]' + + # attempt to install from hub + rune -0 cscli parsers install crowdsecurity/sshd-logs + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + rune -0 cscli parsers list -o json + rune -0 jq -c '.parsers[] | [.name,.status]' <(output) + assert_json '["crowdsecurity/sshd-logs","enabled,local"]' + + # attempt to install from a collection + rune -0 cscli collections install crowdsecurity/sshd + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + + # verify it installed the rest of the collection + assert_line 'enabling contexts:crowdsecurity/bf_base' + assert_line 'enabling collections:crowdsecurity/sshd' + + # remove them + rune -0 cscli collections delete crowdsecurity/sshd --force --purge + rune -0 rm "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" + + # do the same with a different file name + echo "name: crowdsecurity/sshd-logs" > "$CONFIG_DIR/parsers/s01-parse/something.yaml" + rune -0 cscli parsers list -o json + rune -0 jq -c '.parsers[] | [.name,.status]' <(output) + assert_json '["crowdsecurity/sshd-logs","enabled,local"]' + + # attempt to install from hub + rune -0 cscli parsers install crowdsecurity/sshd-logs + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + + # attempt to install from a collection + rune -0 cscli collections install crowdsecurity/sshd + assert_line 'parsers:crowdsecurity/sshd-logs - not downloading local item' + + # verify it installed the rest of the collection + assert_line 'enabling contexts:crowdsecurity/bf_base' + assert_line 'enabling collections:crowdsecurity/sshd' +} + +@test "a local item can override an official one, if it's not installed" { + mkdir -p "$CONFIG_DIR/parsers/s02-enrich" + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/hi.yaml" + # no warning + rune -0 cscli parsers list + refute_stderr + rune -0 cscli parsers list -o json + rune -0 jq -e '.installed,.local==true,true' <(output) +} + +@test "conflicting item names: local and non local - the local one has priority" { + mkdir -p "$CONFIG_DIR/parsers/s02-enrich" + rune -0 cscli parsers install crowdsecurity/whitelists + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/hi.yaml" + rune -0 cscli parsers list -o json + rune -0 jq -e '.installed,.local==true,true' <(output) + rune -0 cscli parsers list + assert_stderr --partial "multiple parsers named crowdsecurity/whitelists: ignoring $CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "conflicting item names: both local, the last one wins" { + mkdir -p "$CONFIG_DIR/parsers/s02-enrich" + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/one.yaml" + echo "name: crowdsecurity/whitelists" > "$CONFIG_DIR/parsers/s02-enrich/two.yaml" + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -r '.local_path' <(output) + assert_output --partial "/parsers/s02-enrich/two.yaml" + rune -0 cscli parsers list + assert_stderr --partial "multiple parsers named crowdsecurity/whitelists: ignoring $CONFIG_DIR/parsers/s02-enrich/one.yaml" +} diff --git a/test/bats/cscli-hubtype-list.bats b/test/bats/cscli-hubtype-list.bats new file mode 100644 index 00000000000..14113650c74 --- /dev/null +++ b/test/bats/cscli-hubtype-list.bats @@ -0,0 +1,130 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli parsers list" { + hub_purge_all + + # no items + rune -0 cscli parsers list + assert_output --partial "PARSERS" + rune -0 cscli parsers list -o json + assert_json '{parsers:[]}' + rune -0 cscli parsers list -o raw + assert_output 'name,status,version,description' + + # some items + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + + rune -0 cscli parsers list + assert_output --partial crowdsecurity/whitelists + assert_output --partial crowdsecurity/windows-auth + rune -0 grep -c enabled <(output) + assert_output "2" + + rune -0 cscli parsers list -o json + assert_output --partial crowdsecurity/whitelists + assert_output --partial crowdsecurity/windows-auth + rune -0 jq '.parsers | length' <(output) + assert_output "2" + + rune -0 cscli parsers list -o raw + assert_output --partial crowdsecurity/whitelists + assert_output --partial crowdsecurity/windows-auth + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "2" +} + +@test "cscli parsers list -a" { + expected=$(jq <"$INDEX_PATH" -r '.parsers | length') + + rune -0 cscli parsers list -a + rune -0 grep -c disabled <(output) + assert_output "$expected" + + rune -0 cscli parsers list -o json -a + rune -0 jq '.parsers | length' <(output) + assert_output "$expected" + + rune -0 cscli parsers list -o raw -a + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "$expected" + + # the list should be the same in all formats, and sorted (not case sensitive) + + list_raw=$(cscli parsers list -o raw -a | tail -n +2 | cut -d, -f1) + list_human=$(cscli parsers list -o human -a | tail -n +6 | head -n -1 | cut -d' ' -f2) + list_json=$(cscli parsers list -o json -a | jq -r '.parsers[].name') + + # use python to sort because it handles "_" like go + rune -0 python3 -c 'import sys; print("".join(sorted(sys.stdin.readlines(), key=str.casefold)), end="")' <<<"$list_raw" + assert_output "$list_raw" + + assert_equal "$list_raw" "$list_json" + assert_equal "$list_raw" "$list_human" +} + +@test "cscli parsers list [parser]..." { + # non-existent + rune -1 cscli parsers install foo/bar + assert_stderr --partial "can't find 'foo/bar' in parsers" + + # not installed + rune -0 cscli parsers list crowdsecurity/whitelists + assert_output --regexp 'crowdsecurity/whitelists.*disabled' + + # install two items + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + + # list an installed item + rune -0 cscli parsers list crowdsecurity/whitelists + assert_output --regexp "crowdsecurity/whitelists.*enabled" + refute_output --partial "crowdsecurity/windows-auth" + + # list multiple installed and non installed items + rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs + assert_output --partial "crowdsecurity/whitelists" + assert_output --partial "crowdsecurity/windows-auth" + assert_output --partial "crowdsecurity/traefik-logs" + + rune -0 cscli parsers list crowdsecurity/whitelists -o json + rune -0 jq '.parsers | length' <(output) + assert_output "1" + rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o json + rune -0 jq '.parsers | length' <(output) + assert_output "3" + + rune -0 cscli parsers list crowdsecurity/whitelists -o raw + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "1" + rune -0 cscli parsers list crowdsecurity/whitelists crowdsecurity/windows-auth crowdsecurity/traefik-logs -o raw + rune -0 grep -vc 'name,status,version,description' <(output) + assert_output "3" +} diff --git a/test/bats/cscli-hubtype-remove.bats b/test/bats/cscli-hubtype-remove.bats new file mode 100644 index 00000000000..32db8efe788 --- /dev/null +++ b/test/bats/cscli-hubtype-remove.bats @@ -0,0 +1,245 @@ +#!/usr/bin/env bats + +# Generic tests for the command "cscli remove". +# +# Behavior that is specific to a hubtype should be tested in a separate file. + + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR +# INDEX_PATH=$(config_get '.config_paths.index_path') +# export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # make sure the hub is empty + hub_purge_all +} + +teardown() { + # most tests don't need the service, but we ensure it's stopped + ./instance-crowdsec stop +} + +#---------- + +@test "cscli remove (no argument)" { + rune -1 cscli parsers remove + refute_output + assert_stderr --partial "specify at least one parser to remove or '--all'" +} + +@test "cscli remove (aliased)" { + rune -1 cscli parser remove + refute_output + assert_stderr --partial "specify at least one parser to remove or '--all'" +} + +@test "cscli delete (alias of remove)" { + rune -1 cscli parsers delete + refute_output + assert_stderr --partial "specify at least one parser to remove or '--all'" +} + +@test "remove an item (non-existent)" { + rune -1 cscli parsers remove foo/bar + refute_output + assert_stderr --partial "can't find 'foo/bar' in parsers" +} + +@test "remove an item (not downloaded)" { + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.downloaded==false' <(output) + + rune -0 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --force + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --purge + assert_output "Nothing to do." + refute_stderr +} + +@test "remove an item (not installed)" { + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + + rune -0 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --force + assert_output "Nothing to do." + refute_stderr + rune -0 cscli parsers remove crowdsecurity/whitelists --purge + assert_output --partial "purging parsers:crowdsecurity/whitelists" +} + +@test "remove an item (dry run)" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_output - --regexp <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists + + Dry run, no action taken. + EOT + refute_stderr + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==true' <(output) + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" +} + +@test "remove an item" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers remove crowdsecurity/whitelists + assert_output - <<-EOT + disabling parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + assert_file_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "remove an item (purge)" { + rune -0 cscli parsers install crowdsecurity/whitelists + assert_file_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers remove crowdsecurity/whitelists --purge + assert_output - <<-EOT + disabling parsers:crowdsecurity/whitelists + purging parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.downloaded==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + assert_file_not_exists "$HUB_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "remove multiple items" { + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + rune -0 cscli parsers remove crowdsecurity/whitelists crowdsecurity/windows-auth --dry-run + assert_output - --regexp <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists, crowdsecurity/windows-auth + + Dry run, no action taken. + EOT + refute_stderr + + rune -0 cscli parsers remove crowdsecurity/whitelists crowdsecurity/windows-auth + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + rune -0 cscli parsers inspect crowdsecurity/windows-auth --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) +} + +@test "remove all items of a same type" { + rune -0 cscli parsers install crowdsecurity/whitelists crowdsecurity/windows-auth + + rune -1 cscli parsers remove crowdsecurity/whitelists --all + assert_stderr "Error: can't specify items and '--all' at the same time" + + rune -0 cscli parsers remove --all --dry-run + assert_output - --regexp <<-EOT + Action plan: + ❌ disable + parsers: crowdsecurity/whitelists, crowdsecurity/windows-auth + + Dry run, no action taken. + EOT + refute_stderr + + rune -0 cscli parsers remove --all + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + rune -0 cscli parsers inspect crowdsecurity/windows-auth --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) +} + +@test "remove an item (tainted, requires --force)" { + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -1 cscli parsers remove crowdsecurity/whitelists --dry-run + assert_stderr --partial "crowdsecurity/whitelists is tainted, use '--force' to remove" + refute_output + + rune -1 cscli parsers remove crowdsecurity/whitelists + assert_stderr --partial "crowdsecurity/whitelists is tainted, use '--force' to remove" + refute_output + + rune -0 cscli parsers remove crowdsecurity/whitelists --force + assert_output - <<-EOT + disabling parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + rune -0 cscli parsers inspect crowdsecurity/whitelists --no-metrics -o json + rune -0 jq -e '.installed==false' <(output) + assert_file_not_exists "$CONFIG_DIR/parsers/s02-enrich/crowdsecurity/whitelists.yaml" +} + +@test "remove an item that belongs to a collection (requires --force)" { + rune -0 cscli collections install crowdsecurity/sshd + # XXX: should exit with 1? + rune -0 cscli parsers remove crowdsecurity/sshd-logs + assert_output "Nothing to do." + assert_stderr --partial "crowdsecurity/sshd-logs belongs to collections: [crowdsecurity/sshd]" + assert_stderr --partial "Run 'sudo cscli parsers remove crowdsecurity/sshd-logs --force' if you want to force remove this parser" + assert_file_exists "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" + + rune -0 cscli parsers remove crowdsecurity/sshd-logs --force + assert_output - <<-EOT + disabling parsers:crowdsecurity/sshd-logs + + $RELOAD_MESSAGE + EOT + refute_stderr + assert_file_not_exists "$CONFIG_DIR/parsers/s01-parse/sshd-logs.yaml" +} + +@test "remove an item (autocomplete)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli __complete parsers remove crowd + assert_stderr --partial '[Debug] parsers: [crowdsecurity/whitelists]' + assert_output --partial 'crowdsecurity/whitelists' +} diff --git a/test/bats/cscli-hubtype-upgrade.bats b/test/bats/cscli-hubtype-upgrade.bats new file mode 100644 index 00000000000..4244e611cf6 --- /dev/null +++ b/test/bats/cscli-hubtype-upgrade.bats @@ -0,0 +1,253 @@ +#!/usr/bin/env bats + +# Generic tests for the upgrade of hub items and data files. +# +# Commands under test: +# cscli upgrade +# +# This file should test behavior that can be applied to all types. + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load + # make sure the hub is empty + hub_purge_all +} + +teardown() { + # most tests don't need the service, but we ensure it's stopped + ./instance-crowdsec stop +} + +hub_inject_v0() { + # add a version 0.0 to all parsers + + # hash of the string "v0.0" + sha256_0_0="daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0" + + new_hub=$(jq --arg DIGEST "$sha256_0_0" <"$INDEX_PATH" '.parsers |= with_entries(.value.versions["0.0"] = {"digest": $DIGEST, "deprecated": false})') + echo "$new_hub" >"$INDEX_PATH" +} + +install_v0() { + local hubtype=$1 + shift + local item_name=$1 + shift + + cscli "$hubtype" install "$item_name" + printf "%s" "v0.0" > "$(jq -r '.local_path' <(cscli "$hubtype" inspect "$item_name" --no-metrics -o json))" +} + +#---------- + +@test "cscli upgrade (no argument)" { + rune -1 cscli parsers upgrade + refute_output + assert_stderr --partial "specify at least one parser to upgrade or '--all'" +} + +@test "cscli upgrade (aliased)" { + rune -1 cscli parser upgrade + refute_output + assert_stderr --partial "specify at least one parser to upgrade or '--all'" +} + +@test "upgrade an item (non-existent)" { + rune -1 cscli parsers upgrade foo/bar + assert_stderr --partial "can't find 'foo/bar' in parsers" +} + +@test "upgrade an item (non installed)" { + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers install crowdsecurity/whitelists --download-only + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output 'Nothing to do.' + refute_stderr +} + +@test "upgrade an item (up-to-date)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli parsers upgrade crowdsecurity/whitelists --dry-run + assert_output 'Nothing to do.' + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output 'Nothing to do.' +} + +@test "upgrade an item (dry run)" { + hub_inject_v0 + install_v0 parsers crowdsecurity/whitelists + latest=$(get_latest_version parsers crowdsecurity/whitelists) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists --dry-run + assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/whitelists (0.0 -> $latest) + + Dry run, no action taken. + EOT + refute_stderr +} + +get_latest_version() { + local hubtype=$1 + shift + local item_name=$1 + shift + + cscli "$hubtype" inspect "$item_name" -o json | jq -r '.version' +} + +@test "upgrade an item" { + hub_inject_v0 + install_v0 parsers crowdsecurity/whitelists + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version=="0.0"' <(output) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + + # the version is now the latest + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade an item (tainted, requires --force)" { + rune -0 cscli parsers install crowdsecurity/whitelists + echo "dirty" >"$CONFIG_DIR/parsers/s02-enrich/whitelists.yaml" + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version=="?"' <(output) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists --dry-run + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + rune -0 cscli parsers upgrade crowdsecurity/whitelists + assert_output - <<-EOT + WARN parsers:crowdsecurity/whitelists is tainted, use '--force' to overwrite + Nothing to do. + EOT + refute_stderr + + rune -0 cscli parsers upgrade crowdsecurity/whitelists --force + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade multiple items" { + hub_inject_v0 + + install_v0 parsers crowdsecurity/whitelists + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version=="0.0"' <(output) + latest_whitelists=$(get_latest_version parsers crowdsecurity/whitelists) + + install_v0 parsers crowdsecurity/sshd-logs + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -e '.local_version=="0.0"' <(output) + latest_sshd=$(get_latest_version parsers crowdsecurity/sshd-logs) + + rune -0 cscli parsers upgrade crowdsecurity/whitelists crowdsecurity/sshd-logs --dry-run + assert_output - <<-EOT + Action plan: + 📥 download + parsers: crowdsecurity/sshd-logs (0.0 -> $latest_sshd), crowdsecurity/whitelists (0.0 -> $latest_whitelists) + + Dry run, no action taken. + EOT + refute_stderr + + rune -0 cscli parsers upgrade crowdsecurity/whitelists crowdsecurity/sshd-logs + assert_output - <<-EOT + downloading parsers:crowdsecurity/whitelists + downloading parsers:crowdsecurity/sshd-logs + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version==.version' <(output) + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade all items of the same type" { + hub_inject_v0 + + install_v0 parsers crowdsecurity/whitelists + install_v0 parsers crowdsecurity/sshd-logs + install_v0 parsers crowdsecurity/windows-auth + + rune -0 cscli parsers upgrade --all + assert_output - <<-EOT + downloading parsers:crowdsecurity/sshd-logs + downloading parsers:crowdsecurity/whitelists + downloading parsers:crowdsecurity/windows-auth + + $RELOAD_MESSAGE + EOT + refute_stderr + + rune -0 cscli parsers inspect crowdsecurity/whitelists -o json + rune -0 jq -e '.local_version==.version' <(output) + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs -o json + rune -0 jq -e '.local_version==.version' <(output) + + rune -0 cscli parsers inspect crowdsecurity/windows-auth -o json + rune -0 jq -e '.local_version==.version' <(output) +} + +@test "upgrade an item (autocomplete)" { + rune -0 cscli parsers install crowdsecurity/whitelists + rune -0 cscli __complete parsers upgrade crowd + assert_stderr --partial '[Debug] parsers: [crowdsecurity/whitelists]' + assert_output --partial 'crowdsecurity/whitelists' +} + diff --git a/test/bats/cscli-parsers.bats b/test/bats/cscli-parsers.bats new file mode 100644 index 00000000000..6ff138e9fd8 --- /dev/null +++ b/test/bats/cscli-parsers.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +# Tests for the "cscli parsers" behavior that is not covered by cscli-hubtype-*.bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli parsers inspect (includes the stage attribute)" { + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics -o human + assert_line 'stage: s01-parse' + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics -o raw + assert_line 'stage: s01-parse' + + rune -0 cscli parsers inspect crowdsecurity/sshd-logs --no-metrics -o json + rune -0 jq -r '.stage' <(output) + assert_output 's01-parse' +} diff --git a/test/bats/cscli-postoverflows.bats b/test/bats/cscli-postoverflows.bats new file mode 100644 index 00000000000..979ee81defb --- /dev/null +++ b/test/bats/cscli-postoverflows.bats @@ -0,0 +1,44 @@ +#!/usr/bin/env bats + +# Tests for the "cscli postoverflows" behavior that is not covered by cscli-hubtype-*.bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + HUB_DIR=$(config_get '.config_paths.hub_dir') + export HUB_DIR + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH + CONFIG_DIR=$(config_get '.config_paths.config_dir') + export CONFIG_DIR +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "cscli postoverflows inspect (includes the stage attribute)" { + rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics -o human + assert_line 'stage: s00-enrich' + + rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics -o raw + assert_line 'stage: s00-enrich' + + rune -0 cscli postoverflows inspect crowdsecurity/rdns --no-metrics -o json + rune -0 jq -r '.stage' <(output) + assert_output 's00-enrich' +} diff --git a/test/bats/hub-index.bats b/test/bats/hub-index.bats new file mode 100644 index 00000000000..a609974d67a --- /dev/null +++ b/test/bats/hub-index.bats @@ -0,0 +1,357 @@ +#!/usr/bin/env bats + +set -u + +setup_file() { + load "../lib/setup_file.sh" + ./instance-data load + INDEX_PATH=$(config_get '.config_paths.index_path') + export INDEX_PATH +} + +teardown_file() { + load "../lib/teardown_file.sh" +} + +setup() { + load "../lib/setup.sh" + load "../lib/bats-file/load.bash" + ./instance-data load +} + +teardown() { + ./instance-crowdsec stop +} + +#---------- + +@test "malformed index - null item" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + EOF + + rune -1 cscli hub list + assert_stderr --partial "invalid hub index: parsers:author/pars1 has no index metadata." +} + +@test "malformed index - no download path" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + version: "0.0" + versions: + 0.0: + digest: daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0 + EOF + + rune -1 cscli hub list + assert_stderr --partial "invalid hub index: parsers:author/pars1 has no download path." +} + +@test "malformed parser - no stage" { + # Installing a parser requires a stage directory + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -1 cscli hub list -o raw + assert_stderr --partial "invalid hub index: parsers:author/pars1 has no stage." +} + +@test "malformed parser - short path" { + # Installing a parser requires a stage directory + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli parsers install author/pars1 + rune -0 cscli hub list + # XXX here the item is installed but won't work, we only have a warning + assert_stderr --partial 'Ignoring file' + assert_stderr --partial 'path is too short' +} + +@test "malformed item - not yaml" { + # Installing an item requires reading the list of data files + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0 + content: "v0.0" + EOF + + rune -0 cscli hub list -o raw + rune -1 cscli parsers install author/pars1 + assert_stderr --partial 'unmarshal errors' +} + +@test "malformed item - hash mismatch" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: "0000000000000000000000000000000000000000000000000000000000000000" + content: "v0.0" + EOF + + rune -0 cscli hub list -o raw + rune -1 cscli parsers install author/pars1 + assert_stderr --partial 'parsers:author/pars1: hash mismatch: expected 0000000000000000000000000000000000000000000000000000000000000000, got daa1832414a685d69269e0ae15024b908f4602db45f9900e9c6e7f204af207c0.' +} + +@test "install minimal item" { + yq -o json >"$INDEX_PATH" <<-'EOF' + parsers: + author/pars1: + path: parsers/s01-parse/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli parsers install author/pars1 + assert_line "downloading parsers:author/pars1" + assert_line "enabling parsers:author/pars1" + rune -0 cscli hub list +} + +@test "replace an item in a collection update" { + # A new version of coll1 will uninstall pars1 and install pars2. + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.0" + versions: + 0.0: + digest: 801e11865f8fdf82a348e70fe3f568af190715c40a176e058da2ad21ff5e20be + content: "{'parsers': ['author/pars1']}" + parsers: + - author/pars1 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list + rune -0 cscli collections install author/coll1 + + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.1" + versions: + 0.0: + digest: 801e11865f8fdf82a348e70fe3f568af190715c40a176e058da2ad21ff5e20be + 0.1: + digest: f3c535c2d01abec5aadbb5ce03c357a478d91b116410c9fee288e073cd34c0dd + content: "{'parsers': ['author/pars2']}" + parsers: + - author/pars2 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli collections upgrade author/coll1 + assert_output - <<-EOT + downloading parsers:author/pars2 + enabling parsers:author/pars2 + disabling parsers:author/pars1 + downloading collections:author/coll1 + + $RELOAD_MESSAGE + EOT + + rune -0 cscli hub list -o raw + assert_output - <<-EOT + name,status,version,description,type + author/pars2,enabled,0.0,,parsers + author/coll1,enabled,0.1,,collections + EOT +} + +@test "replace an outdated item only if it's not used elsewhere" { + # XXX + skip "not implemented" + # A new version of coll1 will uninstall pars1 and install pars2. + # Pars3 will not be uninstalled because it's still required by coll2. + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.0" + versions: + 0.0: + digest: 0c397c7b3e19d730578932fdc260c53f39bd2488fad87207ab6b7e4dc315b067 + content: "{'parsers': ['author/pars1', 'author/pars3']}" + parsers: + - author/pars1 + - author/pars3 + author/coll2: + path: collections/author/coll2.yaml + version: "0.0" + versions: + 0.0: + digest: 96df483ff697d4d214792b135a3ba5ddaca0ebfd856e7da89215926394ac4001 + content: "{'parsers': ['author/pars3']}" + parsers: + - author/pars3 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars3: + path: parsers/s01-parse/author/pars3.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list + rune -0 cscli collections install author/coll1 author/coll2 + + yq -o json >"$INDEX_PATH" <<-'EOF' + collections: + author/coll1: + path: collections/author/coll1.yaml + version: "0.1" + versions: + 0.0: + digest: 0c397c7b3e19d730578932fdc260c53f39bd2488fad87207ab6b7e4dc315b067 + 0.1: + digest: f3c535c2d01abec5aadbb5ce03c357a478d91b116410c9fee288e073cd34c0dd + content: "{'parsers': ['author/pars2']}" + parsers: + - author/pars2 + author/coll2: + path: collections/author/coll2.yaml + version: "0.0" + versions: + 0.0: + digest: 96df483ff697d4d214792b135a3ba5ddaca0ebfd856e7da89215926394ac4001 + content: "{'parsers': ['author/pars3']}" + parsers: + - author/pars3 + parsers: + author/pars1: + path: parsers/s01-parse/author/pars1.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars2: + path: parsers/s01-parse/author/pars2.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + author/pars3: + path: parsers/s01-parse/author/pars3.yaml + stage: s01-parse + version: "0.0" + versions: + 0.0: + digest: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a + content: "{}" + EOF + + rune -0 cscli hub list -o raw + rune -0 cscli collections upgrade author/coll1 + assert_output - <<-EOT + downloading parsers:author/pars2 + enabling parsers:author/pars2 + disabling parsers:author/pars1 + downloading collections:author/coll1 + + $RELOAD_MESSAGE + EOT + + rune -0 cscli hub list -o raw + assert_output - <<-EOT + name,status,version,description,type + author/pars2,enabled,0.0,,parsers + author/pars3,enabled,0.0,,parsers + author/coll1,enabled,0.1,,collections + EOT +} diff --git a/test/bin/remove-all-hub-items b/test/bin/remove-all-hub-items index 981602b775a..b5d611782ff 100755 --- a/test/bin/remove-all-hub-items +++ b/test/bin/remove-all-hub-items @@ -14,7 +14,7 @@ echo "Pre-downloading Hub content..." types=$("$CSCLI" hub types -o raw) for itemtype in $types; do - "$CSCLI" "$itemtype" remove --all --force + "$CSCLI" "$itemtype" remove --all --force --purge --yes done echo " done." diff --git a/test/lib/config/config-local b/test/lib/config/config-local index 3e3c806b616..4f3ec7cc2ae 100755 --- a/test/lib/config/config-local +++ b/test/lib/config/config-local @@ -117,7 +117,7 @@ make_init_data() { "$CSCLI" --warning hub update --with-content # preload some content and data files - "$CSCLI" collections install crowdsecurity/linux --download-only + "$CSCLI" collections install crowdsecurity/linux --download-only --yes # sub-items did not respect --download-only ./bin/remove-all-hub-items diff --git a/test/lib/setup_file.sh b/test/lib/setup_file.sh index 39a084596e2..902edc5de82 100755 --- a/test/lib/setup_file.sh +++ b/test/lib/setup_file.sh @@ -260,16 +260,6 @@ hub_purge_all() { } export -f hub_purge_all -# remove unused data from the index, to make sure we don't rely on it in any way -hub_strip_index() { - local INDEX - INDEX=$(config_get .config_paths.index_path) - local hub_min - hub_min=$(jq <"$INDEX" 'del(..|.long_description?) | del(..|.deprecated?) | del (..|.labels?)') - echo "$hub_min" >"$INDEX" -} -export -f hub_strip_index - # remove color and style sequences from stdin plaintext() { sed -E 's/\x1B\[[0-9;]*[JKmsu]//g' @@ -340,3 +330,17 @@ lp-get-token() { echo "$resp" | yq -r '.token' } export -f lp-get-token + +case $(uname) in + "Linux") + # shellcheck disable=SC2089 + RELOAD_MESSAGE="Run 'sudo systemctl reload crowdsec' for the new configuration to be effective." + ;; + *) + # shellcheck disable=SC2089 + RELOAD_MESSAGE="Run 'sudo service crowdsec reload' for the new configuration to be effective." + ;; +esac + +# shellcheck disable=SC2090 +export RELOAD_MESSAGE diff --git a/test/localstack/docker-compose.yml b/test/localstack/docker-compose.yml index f58f3c7f263..9f0a690353b 100644 --- a/test/localstack/docker-compose.yml +++ b/test/localstack/docker-compose.yml @@ -15,7 +15,6 @@ services: AWS_HOST: localstack DEBUG: "" KINESYS_ERROR_PROBABILITY: "" - DOCKER_HOST: "unix://var/run/docker.sock" LOCALSTACK_HOST: "localstack" AWS_REGION: "us-east-1" diff --git a/wizard.sh b/wizard.sh index 6e215365f6c..2d3260fc22f 100755 --- a/wizard.sh +++ b/wizard.sh @@ -21,11 +21,8 @@ DOCKER_MODE="false" CROWDSEC_LIB_DIR="/var/lib/crowdsec" CROWDSEC_USR_DIR="/usr/local/lib/crowdsec" CROWDSEC_DATA_DIR="${CROWDSEC_LIB_DIR}/data" -CROWDSEC_DB_PATH="${CROWDSEC_DATA_DIR}/crowdsec.db" CROWDSEC_PATH="/etc/crowdsec" CROWDSEC_CONFIG_PATH="${CROWDSEC_PATH}" -CROWDSEC_LOG_FILE="/var/log/crowdsec.log" -LAPI_LOG_FILE="/var/log/crowdsec_api.log" CROWDSEC_PLUGIN_DIR="${CROWDSEC_USR_DIR}/plugins" CROWDSEC_CONSOLE_DIR="${CROWDSEC_PATH}/console" @@ -35,8 +32,6 @@ CSCLI_BIN="./cmd/crowdsec-cli/cscli" CLIENT_SECRETS="local_api_credentials.yaml" LAPI_SECRETS="online_api_credentials.yaml" -CONSOLE_FILE="console.yaml" - BIN_INSTALL_PATH="/usr/local/bin" CROWDSEC_BIN_INSTALLED="${BIN_INSTALL_PATH}/crowdsec" @@ -91,9 +86,6 @@ SENTINEL_PLUGIN_CONFIG="./cmd/notification-sentinel/sentinel.yaml" FILE_PLUGIN_CONFIG="./cmd/notification-file/file.yaml" -BACKUP_DIR=$(mktemp -d) -rm -rf -- "$BACKUP_DIR" - log_info() { msg=$1 date=$(date "+%Y-%m-%d %H:%M:%S") @@ -262,20 +254,26 @@ install_collection() { fi done + local YES="" + if [[ ${SILENT} == "false" ]]; then COLLECTION_TO_INSTALL=($(whiptail --separate-output --ok-button Continue --title "Crowdsec collections" --checklist "Available collections in crowdsec, try to pick one that fits your profile. Collections contains parsers and scenarios to protect your system." 20 120 10 "${HMENU[@]}" 3>&1 1>&2 2>&3)) if [ $? -eq 1 ]; then log_err "user bailed out at collection selection" exit 1; fi; + else + YES="--yes" fi; for collection in "${COLLECTION_TO_INSTALL[@]}"; do log_info "Installing collection '${collection}'" - ${CSCLI_BIN_INSTALLED} collections install "${collection}" --error + # shellcheck disable=SC2248 + ${CSCLI_BIN_INSTALLED} collections install "${collection}" --error ${YES} done - ${CSCLI_BIN_INSTALLED} parsers install "crowdsecurity/whitelists" --error + # shellcheck disable=SC2248 + ${CSCLI_BIN_INSTALLED} parsers install "crowdsecurity/whitelists" --error ${YES} if [[ ${SILENT} == "false" ]]; then whiptail --msgbox "Out of safety, I installed a parser called 'crowdsecurity/whitelists'. This one will prevent private IP addresses from being banned, feel free to remove it any time." 20 50 fi @@ -420,22 +418,19 @@ install_crowdsec() { mkdir -p "${CROWDSEC_CONFIG_PATH}/contexts" || exit mkdir -p "${CROWDSEC_CONSOLE_DIR}" || exit - # tmp - mkdir -p /tmp/data mkdir -p /etc/crowdsec/hub/ - install -v -m 600 -D "./config/${CLIENT_SECRETS}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 600 -D "./config/${LAPI_SECRETS}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - - ## end tmp - install -v -m 600 -D ./config/config.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/dev.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/user.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/acquis.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/profiles.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/simulation.yaml "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/"${CONSOLE_FILE}" "${CROWDSEC_CONFIG_PATH}" 1> /dev/null || exit - install -v -m 644 -D ./config/context.yaml "${CROWDSEC_CONSOLE_DIR}" 1> /dev/null || exit + # Don't overwrite existing files + [[ ! -f "${CROWDSEC_CONFIG_PATH}/${CLIENT_SECRETS}" ]] && install -v -m 600 -D "./config/${CLIENT_SECRETS}" "${CROWDSEC_CONFIG_PATH}" >/dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/${LAPI_SECRETS}" ]] && install -v -m 600 -D "./config/${LAPI_SECRETS}" "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/config.yaml" ]] && install -v -m 600 -D ./config/config.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/dev.yaml" ]] && install -v -m 644 -D ./config/dev.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/user.yaml" ]] && install -v -m 644 -D ./config/user.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/acquis.yaml" ]] && install -v -m 644 -D ./config/acquis.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/profiles.yaml" ]] && install -v -m 644 -D ./config/profiles.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/simulation.yaml" ]] && install -v -m 644 -D ./config/simulation.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/console.yaml" ]] && install -v -m 644 -D ./config/console.yaml "${CROWDSEC_CONFIG_PATH}" > /dev/null || exit + [[ ! -f "${CROWDSEC_CONFIG_PATH}/context.yaml" ]] && install -v -m 644 -D ./config/context.yaml "${CROWDSEC_CONSOLE_DIR}" > /dev/null || exit DATA=${CROWDSEC_DATA_DIR} CFG=${CROWDSEC_CONFIG_PATH} envsubst '$CFG $DATA' < ./config/user.yaml > ${CROWDSEC_CONFIG_PATH}"/user.yaml" || log_fatal "unable to generate user configuration file" if [[ ${DOCKER_MODE} == "false" ]]; then @@ -465,23 +460,12 @@ update_full() { log_err "Cscli binary '$CSCLI_BIN' not found. Please build it with 'make build'" && exit fi - log_info "Backing up existing configuration" - ${CSCLI_BIN_INSTALLED} config backup ${BACKUP_DIR} - log_info "Saving default database content if exist" - if [[ -f "/var/lib/crowdsec/data/crowdsec.db" ]]; then - cp /var/lib/crowdsec/data/crowdsec.db ${BACKUP_DIR}/crowdsec.db - fi - log_info "Cleanup existing crowdsec configuration" + log_info "Removing old binaries" uninstall_crowdsec log_info "Installing crowdsec" install_crowdsec - log_info "Restoring configuration" + log_info "Updating hub" ${CSCLI_BIN_INSTALLED} hub update - ${CSCLI_BIN_INSTALLED} config restore ${BACKUP_DIR} - log_info "Restoring saved database if exist" - if [[ -f "${BACKUP_DIR}/crowdsec.db" ]]; then - cp ${BACKUP_DIR}/crowdsec.db /var/lib/crowdsec/data/crowdsec.db - fi log_info "Finished, restarting" systemctl restart crowdsec || log_fatal "Failed to restart crowdsec" } @@ -559,15 +543,6 @@ uninstall_crowdsec() { ${CSCLI_BIN} dashboard remove -f -y >/dev/null delete_bins - # tmp - rm -rf /tmp/data/ - ## end tmp - - find /etc/crowdsec -maxdepth 1 -mindepth 1 | grep -v "bouncer" | xargs rm -rf || echo "" - rm -f ${CROWDSEC_LOG_FILE} || echo "" - rm -f ${LAPI_LOG_FILE} || echo "" - rm -f ${CROWDSEC_DB_PATH} || echo "" - rm -rf ${CROWDSEC_LIB_DIR} || echo "" rm -rf ${CROWDSEC_USR_DIR} || echo "" rm -f ${SYSTEMD_PATH_FILE} || echo "" log_info "crowdsec successfully uninstalled" @@ -759,12 +734,11 @@ usage() { echo " ./wizard.sh --unattended Install in unattended mode, no question will be asked and defaults will be followed" echo " ./wizard.sh --docker-mode Will install crowdsec without systemd and generate random machine-id" echo " ./wizard.sh -n|--noop Do nothing" - - exit 0 } if [[ $# -eq 0 ]]; then -usage + usage + exit 0 fi while [[ $# -gt 0 ]]