diff --git a/CHANGELOG.md b/CHANGELOG.md index 063c9683..75e6ea8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,12 +12,16 @@ # 2.x.x ## Breaking changes +- Add support for Elasticsearch 8, remove support for Elasticsearch 6 and below - [#744](https://github.com/jertel/elastalert2/pull/744) - @ferozsalam, @jertel, and @nsano-rururu + WARNING! Read the [ES 8 upgrade notes](https://elastalert2.readthedocs.io/en/latest/recipes/faq.html#does-elastalert-2-support-elasticsearch-8) BEFORE upgrading your cluster to Elasticsearch 8. Failure to do so can result in your cluster no longer starting and unable to rollback to 7.x. +- Kibana dashboard integration has been removed, as it only was supported with older versions of Elasticsearch and Kibana. Per the above breaking change those older versions are no longer supported by ElastAlert 2. - Dockerfile refactor for app home and user home to be the same directory (/opt/elastalert/). Before app home is /opt/elastalert/ and user home is /opt/elastalert/elastalert. After app home and user home are the same /opt/elastalert/ - [#656](https://github.com/jertel/elastalert2/pull/656) ## New features - [MS Teams] Kibana Discover URL and Facts - [#660](https://github.com/jertel/elastalert2/pull/660) - @thib12 - Add support for Kibana 7.17 for Kibana Discover - [#695](https://github.com/jertel/elastalert2/pull/695) - @nsano-rururu - Added a fixed name metric_agg_value to MetricAggregationRule match_body - [#697](https://github.com/jertel/elastalert2/pull/697) - @iamxeph + ## Other changes - Load Jinja template when loading an alert - [#654](https://github.com/jertel/elastalert2/pull/654) - @thib12 - tox 3.24.4 to 3.24.5 - [#655](https://github.com/jertel/elastalert2/pull/655) - @nsano-rururu diff --git a/chart/elastalert2/values.yaml b/chart/elastalert2/values.yaml index 85f2cb55..2893f8e1 100644 --- a/chart/elastalert2/values.yaml +++ b/chart/elastalert2/values.yaml @@ -79,7 +79,6 @@ extraConfigOptions: {} # # Options to propagate to all rules, e.g. a common slack_webhook_url or kibana_url # # Please note at the time of implementing this value, it will not work for required_locals # # Which MUST be set at the rule level, these are: ['alert', 'type', 'name', 'index'] - # generate_kibana_link: true # kibana_url: https://kibana.yourdomain.com # slack_webhook_url: dummy diff --git a/docs/source/elastalert.rst b/docs/source/elastalert.rst index 0bbcb125..fa9a2ad5 100755 --- a/docs/source/elastalert.rst +++ b/docs/source/elastalert.rst @@ -66,7 +66,7 @@ Additional rule types and alerts can be easily imported or written. (See :ref:`W In addition to this basic usage, there are many other features that make alerts more useful: -- Alerts link to Kibana dashboards +- Alerts link to Kibana Discover searches - Aggregate counts for arbitrary fields - Combine alerts into periodic reports - Separate alerts by using a unique key field diff --git a/docs/source/recipes/faq-md.md b/docs/source/recipes/faq-md.md index 9a3019db..71a8460f 100644 --- a/docs/source/recipes/faq-md.md +++ b/docs/source/recipes/faq-md.md @@ -219,8 +219,7 @@ rounded to a single timestamp. If you are using ``query_key`` (a single key, not multiple keys) you can use ``use_terms_query``. This will make ElastAlert 2 perform a terms aggregation to get the counts for each value of a certain -field. Both ``use_terms_query`` and ``use_count_query`` also require ``doc_type`` to be set to the -``_type`` of the documents. They may not be compatible with all rule types. +field. May not be compatible with all rule types. Can I perform aggregations? ========== @@ -436,21 +435,20 @@ filter: Does ElastAlert 2 support Elasticsearch 8? =========== -Support for Elasticsearch 8 is a work in progress. It is currently possible to -load ElastAlert 2 against a _fresh_ installation of Elasticsearch (i.e. one where -no previous ElastAlert instance has been running) without any extra steps. +ElastAlert 2 supports Elasticsearch 8. To upgrade an existing ElastAlert 2 installation to Elasticsearch 8 the -following manual steps are required: +following manual steps are required (note the important WARNING below): * Shutdown ElastAlert 2. -* Delete or rename the old `elastalert*` indices. See Elasticsearch - documentation for instructions on how to delete via the API. +* Delete the old `elastalert*` indices. See [Elasticsearch + documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-delete-index.html) + for instructions on how to delete via the API, or use the Kibana Index Management interface. +* Upgrade the Elastic cluster to Elasticsearch 8. * If NOT running ElastAlert 2 via Docker or Kubernetes, run elastalert-create-index to create the new indices. This is not needed when running via a container since the container always attempts to creates the indices at startup, if they're not yet created. * Restart ElastAlert 2. -However, *at this point it is not guaranteed that features which used to work -on Elasticsearch 7 will still work*. +WARNING: Failure to remove the old ElastAlert indices can result in a non-working Elasticsearch cluster. This is because the ElastAlert indices contain deprecated features and the Elasticsearch 8 upgrade logic is currently flawed and does not correctly handle this situation. The Elasticsearch GitHub repository contains [more information](https://github.com/elastic/elasticsearch/issues/84199) on this problem. \ No newline at end of file diff --git a/docs/source/recipes/writing_filters.rst b/docs/source/recipes/writing_filters.rst index 5d7690b5..0fe1ae20 100644 --- a/docs/source/recipes/writing_filters.rst +++ b/docs/source/recipes/writing_filters.rst @@ -100,7 +100,7 @@ For ranges on fields:: Negation, and, or ***************** -For Elasticsearch 2.X, any of the filters can be embedded in ``not``, ``and``, and ``or``:: +Any of the filters can be embedded in ``not``, ``and``, and ``or``:: filter: - or: diff --git a/docs/source/ruletypes.rst b/docs/source/ruletypes.rst index 31089b6d..296e9d11 100644 --- a/docs/source/ruletypes.rst +++ b/docs/source/ruletypes.rst @@ -60,22 +60,12 @@ Rule Configuration Cheat Sheet +--------------------------------------------------------------+ | | ``description`` (string, default empty string) | | +--------------------------------------------------------------+ | -| ``generate_kibana_link`` (boolean, default False) | | -+--------------------------------------------------------------+ | -| ``use_kibana_dashboard`` (string, no default) | | -+--------------------------------------------------------------+ | | ``kibana_url`` (string, default from es_host) | | +--------------------------------------------------------------+ | | ``kibana_username`` (string, no default) | | +--------------------------------------------------------------+ | | ``kibana_password`` (string, no default) | | +--------------------------------------------------------------+ | -| ``use_kibana4_dashboard`` (string, no default) | | -+--------------------------------------------------------------+ | -| ``kibana4_start_timedelta`` (time, default: 10 min) | | -+--------------------------------------------------------------+ | -| ``kibana4_end_timedelta`` (time, default: 10 min) | | -+--------------------------------------------------------------+ | | ``generate_kibana_discover_url`` (boolean, default False) | | +--------------------------------------------------------------+ | | ``shorten_kibana_discover_url`` (boolean, default False) | | @@ -177,13 +167,9 @@ Rule Configuration Cheat Sheet | ``attach_related`` (boolean, default False) | | | | | Opt | | | | | | | | +-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ |``use_count_query`` (boolean, default False) | | | | | Opt | Opt | Opt | | | | | | -| | | | | | | | | | | | | | -|``doc_type`` (string, no default) | | | | | | | | | | | | | +-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ |``use_terms_query`` (boolean, default False) | | | | | Opt | Opt | | Opt | | | | | | | | | | | | | | | | | | | -|``doc_type`` (string, no default) | | | | | | | | | | | | | -| | | | | | | | | | | | | | |``query_key`` (string or list, no default) | | | | | | | | | | | | | | | | | | | | | | | | | | | |``terms_size`` (int, default 50) | | | | | | | | | | | | | @@ -224,8 +210,6 @@ Rule Configuration Cheat Sheet | | | | | | | | | | | | | | |([min|max|avg|sum|cardinality|value_count|percentiles])| | | | | | | | | | | | | +-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ -|``doc_type`` (string, no default) | | | | | | | | | | Req | Req | Req | -+-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ |``metric_agg_script`` (no default) | | | | | | | | | | Opt | Opt | | +-------------------------------------------------------+--------+-----------+-----------+--------+-----------+-------+----------+--------+-----------+------------------+-----------------+----------------+ |``percentile_range`` ++required if percentiles is used | | | | | | | | | | Req++ | Req++ | | @@ -596,27 +580,13 @@ description ``description``: text describing the purpose of rule. (Optional, string, default empty string) Can be referenced in custom alerters to provide context as to why a rule might trigger. -generate_kibana_link -^^^^^^^^^^^^^^^^^^^^ - -``generate_kibana_link``: This option is for Kibana 3 only. -If true, ElastAlert 2 will generate a temporary Kibana dashboard and include a link to it in alerts. The dashboard -consists of an events over time graph and a table with ``include`` fields selected in the table. If the rule uses ``query_key``, the -dashboard will also contain a filter for the ``query_key`` of the alert. The dashboard schema will -be uploaded to the kibana-int index as a temporary dashboard. (Optional, boolean, default False) - kibana_url ^^^^^^^^^^ ``kibana_url``: The base url of the Kibana application. If not specified, a URL will be constructed using ``es_host`` and ``es_port``. -This value will be used if one of the following conditions are met: - -- ``generate_kibana_link`` is true -- ``use_kibana_dashboard`` is true -- ``use_kibana4_dashboard`` is true -- ``generate_kibana_discover_url`` is true and ``kibana_discover_app_url`` is a relative path +This value will be used if ``generate_kibana_discover_url`` is true and ``kibana_discover_app_url`` is a relative path (Optional, string, default ``http://:/_plugin/kibana/``) @@ -636,38 +606,6 @@ This value is only used if ``shorten_kibana_discover_url`` is true. (Optional, string, no default) -use_kibana_dashboard -^^^^^^^^^^^^^^^^^^^^ - -``use_kibana_dashboard``: The name of a Kibana 3 dashboard to link to. Instead of generating a dashboard from a template, -ElastAlert 2 can use an existing dashboard. It will set the time range on the dashboard to around the match time, -upload it as a temporary dashboard, add a filter to the ``query_key`` of the alert if applicable, -and put the url to the dashboard in the alert. (Optional, string, no default) - -use_kibana4_dashboard -^^^^^^^^^^^^^^^^^^^^^ - -``use_kibana4_dashboard``: A link to a Kibana 4 dashboard. For example, "https://kibana.example.com/#/dashboard/My-Dashboard". -This will set the time setting on the dashboard from the match time minus the timeframe, to 10 minutes after the match time. -Note that this does not support filtering by ``query_key`` like Kibana 3. This value can use `$VAR` and `${VAR}` references -to expand environment variables. - -kibana4_start_timedelta -^^^^^^^^^^^^^^^^^^^^^^^ - -``kibana4_start_timedelta``: Defaults to 10 minutes. This option allows you to specify the start time for the generated kibana4 dashboard. -This value is added in front of the event. For example, - -``kibana4_start_timedelta: minutes: 2`` - -kibana4_end_timedelta -^^^^^^^^^^^^^^^^^^^^^ - -``kibana4_end_timedelta``: Defaults to 10 minutes. This option allows you to specify the end time for the generated kibana4 dashboard. -This value is added in back of the event. For example, - -``kibana4_end_timedelta: minutes: 2`` - generate_kibana_discover_url ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -735,9 +673,8 @@ kibana_discover_version The currently supported versions of Kibana Discover are: -- `5.6` -- `6.0`, `6.1`, `6.2`, `6.3`, `6.4`, `6.5`, `6.6`, `6.7`, `6.8` - `7.0`, `7.1`, `7.2`, `7.3`, `7.4`, `7.5`, `7.6`, `7.7`, `7.8`, `7.9`, `7.10`, `7.11`, `7.12`, `7.13`, `7.14`, `7.15`, `7.16`, `7.17` +- `8.0` ``kibana_discover_version: '7.15'`` @@ -747,6 +684,8 @@ kibana_discover_index_pattern_id ``kibana_discover_index_pattern_id``: The id of the index pattern to link to in the Kibana Discover application. These ids are usually generated and can be found in url of the index pattern management page, or by exporting its saved object. +In this documentation all references of "index pattern" refer to the similarly named concept in Kibana 8 called "data view". + Example export of an index pattern's saved object: .. code-block:: text @@ -1106,12 +1045,10 @@ Optional: ``use_count_query``: If true, ElastAlert 2 will poll Elasticsearch using the count api, and not download all of the matching documents. This is useful is you care only about numbers and not the actual data. It should also be used if you expect a large number of query hits, in the order -of tens of thousands or more. ``doc_type`` must be set to use this. - -``doc_type``: Specify the ``_type`` of document to search for. This must be present if ``use_count_query`` or ``use_terms_query`` is set. +of tens of thousands or more. ``use_terms_query``: If true, ElastAlert 2 will make an aggregation query against Elasticsearch to get counts of documents matching -each unique value of ``query_key``. This must be used with ``query_key`` and ``doc_type``. This will only return a maximum of ``terms_size``, +each unique value of ``query_key``. This must be used with ``query_key``. This will only return a maximum of ``terms_size``, default 50, unique terms. ``terms_size``: When used with ``use_terms_query``, this is the maximum number of terms returned per query. Default is 50. @@ -1241,12 +1178,10 @@ cause alerts. Baseline is established after ``timeframe`` has elapsed twice sinc ``use_count_query``: If true, ElastAlert 2 will poll Elasticsearch using the count api, and not download all of the matching documents. This is useful is you care only about numbers and not the actual data. It should also be used if you expect a large number of query hits, in the order -of tens of thousands or more. ``doc_type`` must be set to use this. - -``doc_type``: Specify the ``_type`` of document to search for. This must be present if ``use_count_query`` or ``use_terms_query`` is set. +of tens of thousands or more. ``use_terms_query``: If true, ElastAlert 2 will make an aggregation query against Elasticsearch to get counts of documents matching -each unique value of ``query_key``. This must be used with ``query_key`` and ``doc_type``. This will only return a maximum of ``terms_size``, +each unique value of ``query_key``. This must be used with ``query_key``. This will only return a maximum of ``terms_size``, default 50, unique terms. ``terms_size``: When used with ``use_terms_query``, this is the maximum number of terms returned per query. Default is 50. @@ -1268,12 +1203,10 @@ Optional: ``use_count_query``: If true, ElastAlert 2 will poll Elasticsearch using the count api, and not download all of the matching documents. This is useful is you care only about numbers and not the actual data. It should also be used if you expect a large number of query hits, in the order -of tens of thousands or more. ``doc_type`` must be set to use this. - -``doc_type``: Specify the ``_type`` of document to search for. This must be present if ``use_count_query`` or ``use_terms_query`` is set. +of tens of thousands or more. ``use_terms_query``: If true, ElastAlert 2 will make an aggregation query against Elasticsearch to get counts of documents matching -each unique value of ``query_key``. This must be used with ``query_key`` and ``doc_type``. This will only return a maximum of ``terms_size``, +each unique value of ``query_key``. This must be used with ``query_key``. This will only return a maximum of ``terms_size``, default 50, unique terms. ``terms_size``: When used with ``use_terms_query``, this is the maximum number of terms returned per query. Default is 50. @@ -1370,8 +1303,6 @@ supported by the specified aggregation type. If using a scripted field via ``me .. note:: When Metric Aggregation has a match, match_body includes an aggregated value that triggered the match so that you can use that on an alert. The value is named based on ``metric_agg_key`` and ``metric_agg_type``. For example, if you set ``metric_agg_key`` to 'system.cpu.total.norm.pct' and ``metric_agg_type`` to 'avg', the name of the value is 'metric_system.cpu.total.norm.pct_avg'. Because of this naming rule, you might face conflicts with jinja2 template, and when that happens, you also can use 'metric_agg_value' from match_body instead. -``doc_type``: Specify the ``_type`` of document to search for. - This rule also requires at least one of the two following options: ``max_threshold``: If the calculated metric value is greater than this number, an alert will be triggered. This threshold is exclusive. @@ -1477,9 +1408,7 @@ This rule requires: ``match_bucket_filter``: ES filter DSL. This defines a filter for the match bucket, which should match a subset of the documents returned by the main query filter. -``doc_type``: Specify the ``_type`` of document to search for. - -This rule also requires at least one of the two following options: +ssThis rule also requires at least one of the two following options: ``min_percentage``: If the percentage of matching documents is less than this number, an alert will be triggered. diff --git a/docs/source/running_elastalert.rst b/docs/source/running_elastalert.rst index 143d4eee..e7c8cba7 100644 --- a/docs/source/running_elastalert.rst +++ b/docs/source/running_elastalert.rst @@ -204,7 +204,7 @@ As a Python package Requirements ------------ -- Elasticsearch 6.x, 7.x. +- Elasticsearch 7.x or 8.x - ISO8601 or Unix timestamped data - Python 3.10. Require OpenSSL 1.1.1 or newer. - pip diff --git a/elastalert/__init__.py b/elastalert/__init__.py index 34cb0dbc..13c3559f 100644 --- a/elastalert/__init__.py +++ b/elastalert/__init__.py @@ -63,32 +63,6 @@ def es_version(self): time.sleep(3) return self._es_version - def is_atleastfive(self): - """ - Returns True when the Elasticsearch server version >= 5 - """ - return int(self.es_version.split(".")[0]) >= 5 - - def is_atleastsix(self): - """ - Returns True when the Elasticsearch server version >= 6 - """ - return int(self.es_version.split(".")[0]) >= 6 - - def is_atleastsixtwo(self): - """ - Returns True when the Elasticsearch server version >= 6.2 - """ - major, minor = list(map(int, self.es_version.split(".")[:2])) - return major > 6 or (major == 6 and minor >= 2) - - def is_atleastsixsix(self): - """ - Returns True when the Elasticsearch server version >= 6.6 - """ - major, minor = list(map(int, self.es_version.split(".")[:2])) - return major > 6 or (major == 6 and minor >= 6) - def is_atleastseven(self): """ Returns True when the Elasticsearch server version >= 7 @@ -103,12 +77,7 @@ def is_atleasteight(self): def resolve_writeback_index(self, writeback_index, doc_type): - """ In ES6, you cannot have multiple _types per index, - therefore we use self.writeback_index as the prefix for the actual - index name, based on doc_type. """ - if not self.is_atleastsix(): - return writeback_index - elif doc_type == 'silence': + if doc_type == 'silence': return writeback_index + '_silence' elif doc_type == 'past_elastalert': return writeback_index + '_past' @@ -117,157 +86,3 @@ def resolve_writeback_index(self, writeback_index, doc_type): elif doc_type == 'elastalert_error': return writeback_index + '_error' return writeback_index - - @query_params( - "_source", - "_source_exclude", - "_source_excludes", - "_source_include", - "_source_includes", - "allow_no_indices", - "allow_partial_search_results", - "analyze_wildcard", - "analyzer", - "batched_reduce_size", - "default_operator", - "df", - "docvalue_fields", - "expand_wildcards", - "explain", - "from_", - "ignore_unavailable", - "lenient", - "max_concurrent_shard_requests", - "pre_filter_shard_size", - "preference", - "q", - "rest_total_hits_as_int", - "request_cache", - "routing", - "scroll", - "search_type", - "seq_no_primary_term", - "size", - "sort", - "stats", - "stored_fields", - "suggest_field", - "suggest_mode", - "suggest_size", - "suggest_text", - "terminate_after", - "timeout", - "track_scores", - "track_total_hits", - "typed_keys", - "version", - ) - def deprecated_search(self, index=None, doc_type=None, body=None, params=None): - """ - Execute a search query and get back search hits that match the query. - ``_ - :arg index: A list of index names to search, or a string containing a - comma-separated list of index names to search; use `_all` - or empty string to perform the operation on all indices - :arg doc_type: A comma-separated list of document types to search; leave - empty to perform the operation on all types - :arg body: The search definition using the Query DSL - :arg _source: True or false to return the _source field or not, or a - list of fields to return - :arg _source_exclude: A list of fields to exclude from the returned - _source field - :arg _source_include: A list of fields to extract and return from the - _source field - :arg allow_no_indices: Whether to ignore if a wildcard indices - expression resolves into no concrete indices. (This includes `_all` - string or when no indices have been specified) - :arg allow_partial_search_results: Set to false to return an overall - failure if the request would produce partial results. Defaults to - True, which will allow partial results in the case of timeouts or - partial failures - :arg analyze_wildcard: Specify whether wildcard and prefix queries - should be analyzed (default: false) - :arg analyzer: The analyzer to use for the query string - :arg batched_reduce_size: The number of shard results that should be - reduced at once on the coordinating node. This value should be used - as a protection mechanism to reduce the memory overhead per search - request if the potential number of shards in the request can be - large., default 512 - :arg default_operator: The default operator for query string query (AND - or OR), default 'OR', valid choices are: 'AND', 'OR' - :arg df: The field to use as default where no field prefix is given in - the query string - :arg docvalue_fields: A comma-separated list of fields to return as the - docvalue representation of a field for each hit - :arg expand_wildcards: Whether to expand wildcard expression to concrete - indices that are open, closed or both., default 'open', valid - choices are: 'open', 'closed', 'none', 'all' - :arg explain: Specify whether to return detailed information about score - computation as part of a hit - :arg from\\_: Starting offset (default: 0) - :arg ignore_unavailable: Whether specified concrete indices should be - ignored when unavailable (missing or closed) - :arg lenient: Specify whether format-based query failures (such as - providing text to a numeric field) should be ignored - :arg max_concurrent_shard_requests: The number of concurrent shard - requests this search executes concurrently. This value should be - used to limit the impact of the search on the cluster in order to - limit the number of concurrent shard requests, default 'The default - grows with the number of nodes in the cluster but is at most 256.' - :arg pre_filter_shard_size: A threshold that enforces a pre-filter - roundtrip to prefilter search shards based on query rewriting if - the number of shards the search request expands to exceeds the - threshold. This filter roundtrip can limit the number of shards - significantly if for instance a shard can not match any documents - based on it's rewrite method ie. if date filters are mandatory to - match but the shard bounds and the query are disjoint., default 128 - :arg preference: Specify the node or shard the operation should be - performed on (default: random) - :arg q: Query in the Lucene query string syntax - :arg rest_total_hits_as_int: This parameter is used to restore the total hits as a number - in the response. This param is added version 6.x to handle mixed cluster queries where nodes - are in multiple versions (7.0 and 6.latest) - :arg request_cache: Specify if request cache should be used for this - request or not, defaults to index level setting - :arg routing: A comma-separated list of specific routing values - :arg scroll: Specify how long a consistent view of the index should be - maintained for scrolled search - :arg search_type: Search operation type, valid choices are: - 'query_then_fetch', 'dfs_query_then_fetch' - :arg size: Number of hits to return (default: 10) - :arg sort: A comma-separated list of : pairs - :arg stats: Specific 'tag' of the request for logging and statistical - purposes - :arg stored_fields: A comma-separated list of stored fields to return as - part of a hit - :arg suggest_field: Specify which field to use for suggestions - :arg suggest_mode: Specify suggest mode, default 'missing', valid - choices are: 'missing', 'popular', 'always' - :arg suggest_size: How many suggestions to return in response - :arg suggest_text: The source text for which the suggestions should be - returned - :arg terminate_after: The maximum number of documents to collect for - each shard, upon reaching which the query execution will terminate - early. - :arg timeout: Explicit operation timeout - :arg track_scores: Whether to calculate and return scores even if they - are not used for sorting - :arg track_total_hits: Indicate if the number of documents that match - the query should be tracked - :arg typed_keys: Specify whether aggregation and suggester names should - be prefixed by their respective types in the response - :arg version: Specify whether to return document version as part of a - hit - """ - # from is a reserved word so it cannot be used, use from_ instead - if "from_" in params: - params["from"] = params.pop("from_") - - if not index: - index = "_all" - res = self.transport.perform_request( - "GET", _make_path(index, doc_type, "_search"), params=params, body=body - ) - if type(res) == list or type(res) == tuple: - return res[1] - return res diff --git a/elastalert/create_index.py b/elastalert/create_index.py index 97eba9ee..82e077e0 100644 --- a/elastalert/create_index.py +++ b/elastalert/create_index.py @@ -31,10 +31,11 @@ def create_index_mappings(es_client, ea_index, recreate=False, old_ea_index=None es_index_mappings = {} if is_atleasteight(esversion): es_index_mappings = read_es_index_mappings() - elif is_atleastsix(esversion): - es_index_mappings = read_es_index_mappings(6) - else: - es_index_mappings = read_es_index_mappings(5) + elif is_atleastseven(esversion): + es_index_mappings = read_es_index_mappings(7) + else: + print('FATAL - Unsupported Elasticsearch version: ' + esversion + '. Aborting.') + exit(1) es_index = IndicesClient(es_client) if not recreate: @@ -43,7 +44,7 @@ def create_index_mappings(es_client, ea_index, recreate=False, old_ea_index=None return None # (Re-)Create indices. - if is_atleastsix(esversion): + if is_atleastseven(esversion): index_names = ( ea_index, ea_index + '_status', @@ -79,8 +80,6 @@ def create_index_mappings(es_client, ea_index, recreate=False, old_ea_index=None es_client.indices.put_mapping(index=ea_index + '_past', body=es_index_mappings['past_elastalert']) elif is_atleastseven(esversion): - # TODO remove doc_type completely when elasicsearch client allows doc_type=None - # doc_type is a deprecated feature and will be completely removed in Elasicsearch 8 es_client.indices.put_mapping(index=ea_index, doc_type='_doc', body=es_index_mappings['elastalert'], include_type_name=True) es_client.indices.put_mapping(index=ea_index + '_status', doc_type='_doc', @@ -91,39 +90,6 @@ def create_index_mappings(es_client, ea_index, recreate=False, old_ea_index=None body=es_index_mappings['elastalert_error'], include_type_name=True) es_client.indices.put_mapping(index=ea_index + '_past', doc_type='_doc', body=es_index_mappings['past_elastalert'], include_type_name=True) - elif is_atleastsixtwo(esversion): - es_client.indices.put_mapping(index=ea_index, doc_type='_doc', - body=es_index_mappings['elastalert']) - es_client.indices.put_mapping(index=ea_index + '_status', doc_type='_doc', - body=es_index_mappings['elastalert_status']) - es_client.indices.put_mapping(index=ea_index + '_silence', doc_type='_doc', - body=es_index_mappings['silence']) - es_client.indices.put_mapping(index=ea_index + '_error', doc_type='_doc', - body=es_index_mappings['elastalert_error']) - es_client.indices.put_mapping(index=ea_index + '_past', doc_type='_doc', - body=es_index_mappings['past_elastalert']) - elif is_atleastsix(esversion): - es_client.indices.put_mapping(index=ea_index, doc_type='elastalert', - body=es_index_mappings['elastalert']) - es_client.indices.put_mapping(index=ea_index + '_status', doc_type='elastalert_status', - body=es_index_mappings['elastalert_status']) - es_client.indices.put_mapping(index=ea_index + '_silence', doc_type='silence', - body=es_index_mappings['silence']) - es_client.indices.put_mapping(index=ea_index + '_error', doc_type='elastalert_error', - body=es_index_mappings['elastalert_error']) - es_client.indices.put_mapping(index=ea_index + '_past', doc_type='past_elastalert', - body=es_index_mappings['past_elastalert']) - else: - es_client.indices.put_mapping(index=ea_index, doc_type='elastalert', - body=es_index_mappings['elastalert']) - es_client.indices.put_mapping(index=ea_index, doc_type='elastalert_status', - body=es_index_mappings['elastalert_status']) - es_client.indices.put_mapping(index=ea_index, doc_type='silence', - body=es_index_mappings['silence']) - es_client.indices.put_mapping(index=ea_index, doc_type='elastalert_error', - body=es_index_mappings['elastalert_error']) - es_client.indices.put_mapping(index=ea_index, doc_type='past_elastalert', - body=es_index_mappings['past_elastalert']) print('New index %s created' % ea_index) if old_ea_index: @@ -145,7 +111,7 @@ def read_es_index_mappings(es_version=8): } -def read_es_index_mapping(mapping, es_version=6): +def read_es_index_mapping(mapping, es_version=7): base_path = os.path.abspath(os.path.dirname(__file__)) mapping_path = 'es_mappings/{0}/{1}.json'.format(es_version, mapping) path = os.path.join(base_path, mapping_path) @@ -153,16 +119,6 @@ def read_es_index_mapping(mapping, es_version=6): print("Reading index mapping '{0}'".format(mapping_path)) return json.load(f) - -def is_atleastsix(es_version): - return int(es_version.split(".")[0]) >= 6 - - -def is_atleastsixtwo(es_version): - major, minor = list(map(int, es_version.split(".")[:2])) - return major > 6 or (major == 6 and minor >= 2) - - def is_atleastseven(es_version): return int(es_version.split(".")[0]) >= 7 diff --git a/elastalert/elastalert.py b/elastalert/elastalert.py index 6c419053..4d225057 100755 --- a/elastalert/elastalert.py +++ b/elastalert/elastalert.py @@ -29,7 +29,6 @@ from elasticsearch.exceptions import NotFoundError from elasticsearch.exceptions import TransportError -from elastalert import kibana from elastalert.alerters.debug import DebugAlerter from elastalert.config import load_conf from elastalert.enhancements import DropMatchException @@ -204,8 +203,7 @@ def get_index(rule, starttime=None, endtime=None): return index @staticmethod - def get_query(filters, starttime=None, endtime=None, sort=True, timestamp_field='@timestamp', to_ts_func=dt_to_ts, desc=False, - five=False): + def get_query(filters, starttime=None, endtime=None, sort=True, timestamp_field='@timestamp', to_ts_func=dt_to_ts, desc=False): """ Returns a query dict that will apply a list of filters, filter by start and end time, and sort results by timestamp. @@ -218,33 +216,34 @@ def get_query(filters, starttime=None, endtime=None, sort=True, timestamp_field= starttime = to_ts_func(starttime) endtime = to_ts_func(endtime) filters = copy.copy(filters) - es_filters = {'filter': {'bool': {'must': filters}}} + + # ElastAlert documentation still specifies an old way of writing filters + # This snippet of code converts it into the new standard + new_filters = [] + for es_filter in filters: + if es_filter.get('query'): + new_filters.append(es_filter['query']) + else: + new_filters.append(es_filter) + + es_filters = {'filter': {'bool': {'must': new_filters}}} if starttime and endtime: es_filters['filter']['bool']['must'].insert(0, {'range': {timestamp_field: {'gt': starttime, 'lte': endtime}}}) - if five: - query = {'query': {'bool': es_filters}} - else: - query = {'query': {'filtered': es_filters}} + query = {'query': {'bool': es_filters}} if sort: query['sort'] = [{timestamp_field: {'order': 'desc' if desc else 'asc'}}] return query - def get_terms_query(self, query, rule, size, field, five=False): + def get_terms_query(self, query, rule, size, field): """ Takes a query generated by get_query and outputs a aggregation query """ query_element = query['query'] if 'sort' in query_element: query_element.pop('sort') - if not five: - query_element['filtered'].update({'aggs': {'counts': {'terms': {'field': field, - 'size': size, - 'min_doc_count': rule.get('min_doc_count', 1)}}}}) - aggs_query = {'aggs': query_element} - else: - aggs_query = query - aggs_query['aggs'] = {'counts': {'terms': {'field': field, - 'size': size, - 'min_doc_count': rule.get('min_doc_count', 1)}}} + aggs_query = query + aggs_query['aggs'] = {'counts': {'terms': {'field': field, + 'size': size, + 'min_doc_count': rule.get('min_doc_count', 1)}}} return aggs_query def get_aggregation_query(self, query, rule, query_key, terms_size, timestamp_field='@timestamp'): @@ -260,7 +259,7 @@ def get_aggregation_query(self, query, rule, query_key, terms_size, timestamp_fi 'interval_aggs': { 'date_histogram': { 'field': timestamp_field, - 'interval': bucket_interval_period}, + 'fixed_interval': bucket_interval_period}, 'aggs': metric_agg_element } } @@ -275,12 +274,8 @@ def get_aggregation_query(self, query, rule, query_key, terms_size, timestamp_fi 'min_doc_count': rule.get('min_doc_count', 1)}, 'aggs': aggs_element}} - if not rule['five']: - query_element['filtered'].update({'aggs': aggs_element}) - aggs_query = {'aggs': query_element} - else: - aggs_query = query - aggs_query['aggs'] = aggs_element + aggs_query = query + aggs_query['aggs'] = aggs_element return aggs_query def get_index_start(self, index, timestamp_field='@timestamp'): @@ -291,12 +286,8 @@ def get_index_start(self, index, timestamp_field='@timestamp'): """ query = {'sort': {timestamp_field: {'order': 'asc'}}} try: - if self.thread_data.current_es.is_atleastsixsix(): - res = self.thread_data.current_es.search(index=index, size=1, body=query, + res = self.thread_data.current_es.search(index=index, size=1, body=query, _source_includes=[timestamp_field], ignore_unavailable=True) - else: - res = self.thread_data.current_es.search(index=index, size=1, body=query, _source_include=[timestamp_field], - ignore_unavailable=True) except ElasticsearchException as e: self.handle_error("Elasticsearch query error: %s" % (e), {'index': index, 'query': query}) return '1969-12-30T00:00:00Z' @@ -365,18 +356,11 @@ def get_hits(self, rule, starttime, endtime, index, scroll=False): endtime, timestamp_field=rule['timestamp_field'], to_ts_func=rule['dt_to_ts'], - five=rule['five'], ) - if self.thread_data.current_es.is_atleastsixsix(): - extra_args = {'_source_includes': rule['include']} - else: - extra_args = {'_source_include': rule['include']} + extra_args = {'_source_includes': rule['include']} scroll_keepalive = rule.get('scroll_keepalive', self.scroll_keepalive) if not rule.get('_source_enabled'): - if rule['five']: - query['stored_fields'] = rule['include'] - else: - query['fields'] = rule['include'] + query['stored_fields'] = rule['include'] extra_args = {} try: @@ -394,10 +378,7 @@ def get_hits(self, rule, starttime, endtime, index, scroll=False): if '_scroll_id' in res: rule['scroll_id'] = res['_scroll_id'] - if self.thread_data.current_es.is_atleastseven(): - self.thread_data.total_hits = int(res['hits']['total']['value']) - else: - self.thread_data.total_hits = int(res['hits']['total']) + self.thread_data.total_hits = int(res['hits']['total']['value']) if len(res.get('_shards', {}).get('failures', [])) > 0: try: @@ -433,9 +414,6 @@ def get_hits(self, rule, starttime, endtime, index, scroll=False): hits = self.process_hits(rule, hits) - # Record doc_type for use in get_top_counts - if 'doc_type' not in rule and len(hits): - rule['doc_type'] = hits[0]['_type'] return hits def get_hits_count(self, rule, starttime, endtime, index): @@ -455,24 +433,15 @@ def get_hits_count(self, rule, starttime, endtime, index): timestamp_field=rule['timestamp_field'], sort=False, to_ts_func=rule['dt_to_ts'], - five=rule['five'] ) es_client = self.thread_data.current_es try: - if es_client.is_atleastsixtwo(): - res = es_client.count( - index=index, - body=query, - ignore_unavailable=True - ) - else: - res = es_client.count( - index=index, - doc_type=rule['doc_type'], - body=query, - ignore_unavailable=True - ) + res = es_client.count( + index=index, + body=query, + ignore_unavailable=True + ) except ElasticsearchException as e: # Elasticsearch sometimes gives us GIGANTIC error messages # (so big that they will fill the entire terminal buffer) @@ -493,24 +462,20 @@ def get_hits_terms(self, rule, starttime, endtime, index, key, qk=None, size=Non rule_filter = copy.copy(rule['filter']) if qk: qk_list = qk.split(",") - end = None - if rule['five']: - end = '.keyword' - else: - end = '.raw' + end = '.keyword' if len(qk_list) == 1: qk = qk_list[0] filter_key = rule['query_key'] if rule.get('raw_count_keys', True) and not rule['query_key'].endswith(end): - filter_key = add_raw_postfix(filter_key, rule['five']) + filter_key = add_raw_postfix(filter_key) rule_filter.extend([{'term': {filter_key: qk}}]) else: filter_keys = rule['compound_query_key'] for i in range(len(filter_keys)): key_with_postfix = filter_keys[i] if rule.get('raw_count_keys', True) and not key.endswith(end): - key_with_postfix = add_raw_postfix(key_with_postfix, rule['five']) + key_with_postfix = add_raw_postfix(key_with_postfix) rule_filter.extend([{'term': {key_with_postfix: qk_list[i]}}]) base_query = self.get_query( @@ -520,24 +485,13 @@ def get_hits_terms(self, rule, starttime, endtime, index, key, qk=None, size=Non timestamp_field=rule['timestamp_field'], sort=False, to_ts_func=rule['dt_to_ts'], - five=rule['five'] ) if size is None: size = rule.get('terms_size', 50) - query = self.get_terms_query(base_query, rule, size, key, rule['five']) + query = self.get_terms_query(base_query, rule, size, key) try: - if not rule['five']: - res = self.thread_data.current_es.deprecated_search( - index=index, - doc_type=rule['doc_type'], - body=query, - search_type='count', - ignore_unavailable=True - ) - else: - res = self.thread_data.current_es.deprecated_search(index=index, doc_type=rule['doc_type'], - body=query, size=0, ignore_unavailable=True) + res = self.thread_data.current_es.search(index=index, body=query, size=0, ignore_unavailable=True) except ElasticsearchException as e: # Elasticsearch sometimes gives us GIGANTIC error messages # (so big that they will fill the entire terminal buffer) @@ -548,10 +502,7 @@ def get_hits_terms(self, rule, starttime, endtime, index, key, qk=None, size=Non if 'aggregations' not in res: return {} - if not rule['five']: - buckets = res['aggregations']['filtered']['counts']['buckets'] - else: - buckets = res['aggregations']['counts']['buckets'] + buckets = res['aggregations']['counts']['buckets'] self.thread_data.num_hits += len(buckets) lt = rule.get('use_local_time') elastalert_logger.info( @@ -570,23 +521,12 @@ def get_hits_aggregation(self, rule, starttime, endtime, index, query_key, term_ timestamp_field=rule['timestamp_field'], sort=False, to_ts_func=rule['dt_to_ts'], - five=rule['five'] ) if term_size is None: term_size = rule.get('terms_size', 50) query = self.get_aggregation_query(base_query, rule, query_key, term_size, rule['timestamp_field']) try: - if not rule['five']: - res = self.thread_data.current_es.deprecated_search( - index=index, - doc_type=rule.get('doc_type'), - body=query, - search_type='count', - ignore_unavailable=True - ) - else: - res = self.thread_data.current_es.deprecated_search(index=index, doc_type=rule.get('doc_type'), - body=query, size=0, ignore_unavailable=True) + res = self.thread_data.current_es.search(index=index, body=query, size=0, ignore_unavailable=True) except ElasticsearchException as e: if len(str(e)) > 1024: e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024) @@ -594,15 +534,9 @@ def get_hits_aggregation(self, rule, starttime, endtime, index, query_key, term_ return None if 'aggregations' not in res: return {} - if not rule['five']: - payload = res['aggregations']['filtered'] - else: - payload = res['aggregations'] + payload = res['aggregations'] - if self.thread_data.current_es.is_atleastseven(): - self.thread_data.num_hits += res['hits']['total']['value'] - else: - self.thread_data.num_hits += res['hits']['total'] + self.thread_data.num_hits += res['hits']['total']['value'] return {endtime: payload} @@ -705,24 +639,14 @@ def get_starttime(self, rule): :return: A timestamp or None. """ sort = {'sort': {'@timestamp': {'order': 'desc'}}} - query = {'filter': {'term': {'rule_name': '%s' % (rule['name'])}}} - if self.writeback_es.is_atleastfive(): - query = {'query': {'bool': query}} + query = {'query': {'bool': {'filter': {'term': {'rule_name': '%s' % (rule['name'])}}}}} query.update(sort) try: doc_type = 'elastalert_status' index = self.writeback_es.resolve_writeback_index(self.writeback_index, doc_type) - if self.writeback_es.is_atleastsixtwo(): - if self.writeback_es.is_atleastsixsix(): - res = self.writeback_es.search(index=index, size=1, body=query, - _source_includes=['endtime', 'rule_name']) - else: - res = self.writeback_es.search(index=index, size=1, body=query, - _source_include=['endtime', 'rule_name']) - else: - res = self.writeback_es.deprecated_search(index=index, doc_type=doc_type, - size=1, body=query, _source_include=['endtime', 'rule_name']) + res = self.writeback_es.search(index=index, size=1, body=query, + _source_includes=['endtime', 'rule_name']) if res['hits']['hits']: endtime = ts_to_dt(res['hits']['hits'][0]['_source']['endtime']) @@ -871,10 +795,7 @@ def enhance_filter(self, rule): else: query = " OR ".join(additional_terms) query_str_filter = {'query_string': {'query': query}} - if self.writeback_es.is_atleastfive(): - filters.append(query_str_filter) - else: - filters.append({'query': query_str_filter}) + filters.append(query_str_filter) elastalert_logger.debug("Enhanced filter with {} terms: {}".format(listname, str(query_str_filter))) def get_elasticsearch_client(self, rule): @@ -1033,24 +954,14 @@ def init_rule(self, new_rule, new=True): if not new and self.scheduler.get_job(job_id=new_rule['name']): self.scheduler.remove_job(job_id=new_rule['name']) - try: - self.modify_rule_for_ES5(new_rule) - except TransportError as e: - elastalert_logger.warning('Error connecting to Elasticsearch for rule {}. ' - 'The rule has been disabled.'.format(new_rule['name'])) - self.send_notification_email(exception=e, rule=new_rule) - return False - self.enhance_filter(new_rule) # Change top_count_keys to .raw if 'top_count_keys' in new_rule and new_rule.get('raw_count_keys', True): if self.string_multi_field_name: string_multi_field_name = self.string_multi_field_name - elif self.writeback_es.is_atleastfive(): - string_multi_field_name = '.keyword' else: - string_multi_field_name = '.raw' + string_multi_field_name = '.keyword' for i, key in enumerate(new_rule['top_count_keys']): if not key.endswith(string_multi_field_name): @@ -1095,25 +1006,6 @@ def init_rule(self, new_rule, new=True): return new_rule - @staticmethod - def modify_rule_for_ES5(new_rule): - # Get ES version per rule - rule_es = elasticsearch_client(new_rule) - if rule_es.is_atleastfive(): - new_rule['five'] = True - else: - new_rule['five'] = False - return - - # In ES5, filters starting with 'query' should have the top wrapper removed - new_filters = [] - for es_filter in new_rule.get('filter', []): - if es_filter.get('query'): - new_filters.append(es_filter['query']) - else: - new_filters.append(es_filter) - new_rule['filter'] = new_filters - def load_rule_changes(self): """ Using the modification times of rule config files, syncs the running rules to match the files in rules_folder by removing, adding or reloading rules. """ @@ -1400,119 +1292,6 @@ def sleep_for(self, duration): elastalert_logger.info("Sleeping for %s seconds" % (duration)) time.sleep(duration) - def generate_kibana4_db(self, rule, match): - ''' Creates a link for a kibana4 dashboard which has time set to the match. ''' - db_name = rule.get('use_kibana4_dashboard') - start = ts_add( - lookup_es_key(match, rule['timestamp_field']), - -rule.get('kibana4_start_timedelta', rule.get('timeframe', datetime.timedelta(minutes=10))) - ) - end = ts_add( - lookup_es_key(match, rule['timestamp_field']), - rule.get('kibana4_end_timedelta', rule.get('timeframe', datetime.timedelta(minutes=10))) - ) - return kibana.kibana4_dashboard_link(db_name, start, end) - - def generate_kibana_db(self, rule, match): - ''' Uses a template dashboard to upload a temp dashboard showing the match. - Returns the url to the dashboard. ''' - db = copy.deepcopy(kibana.dashboard_temp) - - # Set timestamp fields to match our rule especially if - # we have configured something other than @timestamp - kibana.set_timestamp_field(db, rule['timestamp_field']) - - # Set filters - for filter in rule['filter']: - if filter: - kibana.add_filter(db, filter) - kibana.set_included_fields(db, rule['include']) - - # Set index - index = self.get_index(rule) - kibana.set_index_name(db, index) - - return self.upload_dashboard(db, rule, match) - - def upload_dashboard(self, db, rule, match): - ''' Uploads a dashboard schema to the kibana-int Elasticsearch index associated with rule. - Returns the url to the dashboard. ''' - # Set time range - start = ts_add(lookup_es_key(match, rule['timestamp_field']), -rule.get('timeframe', datetime.timedelta(minutes=10))) - end = ts_add(lookup_es_key(match, rule['timestamp_field']), datetime.timedelta(minutes=10)) - kibana.set_time(db, start, end) - - # Set dashboard name - db_name = 'ElastAlert - %s - %s' % (rule['name'], end) - kibana.set_name(db, db_name) - - # Add filter for query_key value - if 'query_key' in rule: - for qk in rule.get('compound_query_key', [rule['query_key']]): - if qk in match: - term = {'term': {qk: match[qk]}} - kibana.add_filter(db, term) - - # Add filter for aggregation_key value - if 'aggregation_key' in rule: - for qk in rule.get('compound_aggregation_key', [rule['aggregation_key']]): - if qk in match: - term = {'term': {qk: match[qk]}} - kibana.add_filter(db, term) - - # Convert to json - db_js = json.dumps(db) - db_body = {'user': 'guest', - 'group': 'guest', - 'title': db_name, - 'dashboard': db_js} - - # Upload - es = elasticsearch_client(rule) - # TODO: doc_type = _doc for elastic >= 6 - res = es.index(index='kibana-int', - doc_type='temp', - body=db_body) - - # Return dashboard URL - kibana_url = rule.get('kibana_url') - if not kibana_url: - kibana_url = 'http://%s:%s/_plugin/kibana/' % (rule['es_host'], - rule['es_port']) - return kibana_url + '#/dashboard/temp/%s' % (res['_id']) - - def get_dashboard(self, rule, db_name): - """ Download dashboard which matches use_kibana_dashboard from Elasticsearch. """ - es = elasticsearch_client(rule) - if not db_name: - raise EAException("use_kibana_dashboard undefined") - query = {'query': {'term': {'_id': db_name}}} - try: - # TODO use doc_type = _doc - res = es.deprecated_search(index='kibana-int', doc_type='dashboard', body=query, _source_include=['dashboard']) - except ElasticsearchException as e: - raise EAException("Error querying for dashboard: %s" % (e)).with_traceback(sys.exc_info()[2]) - - if res['hits']['hits']: - return json.loads(res['hits']['hits'][0]['_source']['dashboard']) - else: - raise EAException("Could not find dashboard named %s" % (db_name)) - - def use_kibana_link(self, rule, match): - """ Uploads an existing dashboard as a temp dashboard modified for match time. - Returns the url to the dashboard. """ - # Download or get cached dashboard - dashboard = rule.get('dashboard_schema') - if not dashboard: - db_name = rule.get('use_kibana_dashboard') - dashboard = self.get_dashboard(rule, db_name) - if dashboard: - rule['dashboard_schema'] = dashboard - else: - return None - dashboard = copy.deepcopy(dashboard) - return self.upload_dashboard(dashboard, rule, match) - def alert(self, matches, rule, alert_time=None, retried=False): """ Wraps alerting, Kibana linking and enhancements in an exception handler """ try: @@ -1554,24 +1333,6 @@ def send_alert(self, matches, rule, alert_time=None, retried=False): counts = self.get_top_counts(rule, start, end, keys, qk=qk) match.update(counts) - # Generate a kibana3 dashboard for the first match - if rule.get('generate_kibana_link') or rule.get('use_kibana_dashboard'): - try: - if rule.get('generate_kibana_link'): - kb_link = self.generate_kibana_db(rule, matches[0]) - else: - kb_link = self.use_kibana_link(rule, matches[0]) - except EAException as e: - self.handle_error("Could not generate Kibana dash for %s match: %s" % (rule['name'], e)) - else: - if kb_link: - matches[0]['kibana_link'] = kb_link - - if rule.get('use_kibana4_dashboard'): - kb_link = self.generate_kibana4_db(rule, matches[0]) - if kb_link: - matches[0]['kibana_link'] = kb_link - if rule.get('generate_kibana_discover_url'): kb_link = generate_kibana_discover_url(rule, matches[0]) if kb_link: @@ -1691,10 +1452,7 @@ def writeback(self, doc_type, body, rule=None, match_body=None): try: index = self.writeback_es.resolve_writeback_index(self.writeback_index, doc_type) - if self.writeback_es.is_atleastsixtwo(): - res = self.writeback_es.index(index=index, body=body) - else: - res = self.writeback_es.index(index=index, doc_type=doc_type, body=body) + res = self.writeback_es.index(index=index, body=body) return res except ElasticsearchException as e: elastalert_logger.exception("Error writing alert info to Elasticsearch: %s" % (e)) @@ -1711,17 +1469,10 @@ def find_recent_pending_alerts(self, time_limit): time_filter = {'range': {'alert_time': {'from': dt_to_ts(ts_now() - time_limit), 'to': dt_to_ts(ts_now())}}} sort = {'sort': {'alert_time': {'order': 'asc'}}} - if self.writeback_es.is_atleastfive(): - query = {'query': {'bool': {'must': inner_query, 'filter': time_filter}}} - else: - query = {'query': inner_query, 'filter': time_filter} + query = {'query': {'bool': {'must': inner_query, 'filter': time_filter}}} query.update(sort) try: - if self.writeback_es.is_atleastsixtwo(): - res = self.writeback_es.search(index=self.writeback_index, body=query, size=1000) - else: - res = self.writeback_es.deprecated_search(index=self.writeback_index, - doc_type='elastalert', body=query, size=1000) + res = self.writeback_es.search(index=self.writeback_index, body=query, size=1000) if res['hits']['hits']: return res['hits']['hits'] except ElasticsearchException as e: @@ -1773,10 +1524,7 @@ def send_pending_alerts(self): # Delete it from the index try: - if self.writeback_es.is_atleastsixtwo(): - self.writeback_es.delete(index=self.writeback_index, id=_id) - else: - self.writeback_es.delete(index=self.writeback_index, doc_type='elastalert', id=_id) + self.writeback_es.delete(index=self.writeback_index, id=_id) except ElasticsearchException: # TODO: Give this a more relevant exception, try:except: is evil. self.handle_error("Failed to delete alert %s at %s" % (_id, alert_time)) @@ -1806,18 +1554,11 @@ def get_aggregated_matches(self, _id): query = {'query': {'query_string': {'query': 'aggregate_id:"%s"' % (_id)}}, 'sort': {'@timestamp': 'asc'}} matches = [] try: - if self.writeback_es.is_atleastsixtwo(): - res = self.writeback_es.search(index=self.writeback_index, body=query, - size=self.max_aggregation) - else: - res = self.writeback_es.deprecated_search(index=self.writeback_index, doc_type='elastalert', - body=query, size=self.max_aggregation) + res = self.writeback_es.search(index=self.writeback_index, body=query, + size=self.max_aggregation) for match in res['hits']['hits']: matches.append(match['_source']) - if self.writeback_es.is_atleastsixtwo(): - self.writeback_es.delete(index=self.writeback_index, id=match['_id']) - else: - self.writeback_es.delete(index=self.writeback_index, doc_type='elastalert', id=match['_id']) + self.writeback_es.delete(index=self.writeback_index, id=match['_id']) except (KeyError, ElasticsearchException) as e: self.handle_error("Error fetching aggregated matches: %s" % (e), {'id': _id}) return matches @@ -1829,14 +1570,10 @@ def find_pending_aggregate_alert(self, rule, aggregation_key_value=None): 'must_not': [{'exists': {'field': 'aggregate_id'}}]}}} if aggregation_key_value: query['filter']['bool']['must'].append({'term': {'aggregation_key': aggregation_key_value}}) - if self.writeback_es.is_atleastfive(): - query = {'query': {'bool': query}} + query = {'query': {'bool': query}} query['sort'] = {'alert_time': {'order': 'desc'}} try: - if self.writeback_es.is_atleastsixtwo(): - res = self.writeback_es.search(index=self.writeback_index, body=query, size=1) - else: - res = self.writeback_es.deprecated_search(index=self.writeback_index, doc_type='elastalert', body=query, size=1) + res = self.writeback_es.search(index=self.writeback_index, body=query, size=1) if len(res['hits']['hits']) == 0: return None except (KeyError, ElasticsearchException) as e: @@ -1974,25 +1711,14 @@ def is_silenced(self, rule_name): return False query = {'term': {'rule_name': rule_name}} sort = {'sort': {'until': {'order': 'desc'}}} - if self.writeback_es.is_atleastfive(): - query = {'query': query} - else: - query = {'filter': query} + query = {'query': query} query.update(sort) try: doc_type = 'silence' index = self.writeback_es.resolve_writeback_index(self.writeback_index, doc_type) - if self.writeback_es.is_atleastsixtwo(): - if self.writeback_es.is_atleastsixsix(): - res = self.writeback_es.search(index=index, size=1, body=query, - _source_includes=['until', 'exponent']) - else: - res = self.writeback_es.search(index=index, size=1, body=query, - _source_include=['until', 'exponent']) - else: - res = self.writeback_es.deprecated_search(index=index, doc_type=doc_type, - size=1, body=query, _source_include=['until', 'exponent']) + res = self.writeback_es.search(index=index, size=1, body=query, + _source_includes=['until', 'exponent']) except ElasticsearchException as e: self.handle_error("Error while querying for alert silence status: %s" % (e), {'rule': rule_name}) diff --git a/elastalert/es_mappings/5/elastalert.json b/elastalert/es_mappings/5/elastalert.json deleted file mode 100644 index b522933b..00000000 --- a/elastalert/es_mappings/5/elastalert.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "elastalert": { - "properties": { - "rule_name": { - "index": "not_analyzed", - "type": "string" - }, - "@timestamp": { - "type": "date", - "format": "dateOptionalTime" - }, - "alert_time": { - "type": "date", - "format": "dateOptionalTime" - }, - "match_time": { - "type": "date", - "format": "dateOptionalTime" - }, - "match_body": { - "type": "object", - "enabled": "false" - }, - "aggregate_id": { - "index": "not_analyzed", - "type": "string" - } - } - } -} diff --git a/elastalert/es_mappings/5/elastalert_error.json b/elastalert/es_mappings/5/elastalert_error.json deleted file mode 100644 index 7f1b3c0a..00000000 --- a/elastalert/es_mappings/5/elastalert_error.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "elastalert_error": { - "properties": { - "data": { - "type": "object", - "enabled": "false" - }, - "@timestamp": { - "type": "date", - "format": "dateOptionalTime" - } - } - } -} diff --git a/elastalert/es_mappings/5/elastalert_status.json b/elastalert/es_mappings/5/elastalert_status.json deleted file mode 100644 index f8cd9643..00000000 --- a/elastalert/es_mappings/5/elastalert_status.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "elastalert_status": { - "properties": { - "rule_name": { - "index": "not_analyzed", - "type": "string" - }, - "@timestamp": { - "type": "date", - "format": "dateOptionalTime" - } - } - } -} diff --git a/elastalert/es_mappings/5/past_elastalert.json b/elastalert/es_mappings/5/past_elastalert.json deleted file mode 100644 index e1078374..00000000 --- a/elastalert/es_mappings/5/past_elastalert.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "past_elastalert": { - "properties": { - "rule_name": { - "index": "not_analyzed", - "type": "string" - }, - "match_body": { - "type": "object", - "enabled": "false" - }, - "@timestamp": { - "type": "date", - "format": "dateOptionalTime" - }, - "aggregate_id": { - "index": "not_analyzed", - "type": "string" - } - } - } -} diff --git a/elastalert/es_mappings/5/silence.json b/elastalert/es_mappings/5/silence.json deleted file mode 100644 index b04006da..00000000 --- a/elastalert/es_mappings/5/silence.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "silence": { - "properties": { - "rule_name": { - "index": "not_analyzed", - "type": "string" - }, - "until": { - "type": "date", - "format": "dateOptionalTime" - }, - "@timestamp": { - "type": "date", - "format": "dateOptionalTime" - } - } - } -} diff --git a/elastalert/es_mappings/6/elastalert.json b/elastalert/es_mappings/7/elastalert.json similarity index 100% rename from elastalert/es_mappings/6/elastalert.json rename to elastalert/es_mappings/7/elastalert.json diff --git a/elastalert/es_mappings/6/elastalert_error.json b/elastalert/es_mappings/7/elastalert_error.json similarity index 100% rename from elastalert/es_mappings/6/elastalert_error.json rename to elastalert/es_mappings/7/elastalert_error.json diff --git a/elastalert/es_mappings/6/elastalert_status.json b/elastalert/es_mappings/7/elastalert_status.json similarity index 100% rename from elastalert/es_mappings/6/elastalert_status.json rename to elastalert/es_mappings/7/elastalert_status.json diff --git a/elastalert/es_mappings/6/past_elastalert.json b/elastalert/es_mappings/7/past_elastalert.json similarity index 100% rename from elastalert/es_mappings/6/past_elastalert.json rename to elastalert/es_mappings/7/past_elastalert.json diff --git a/elastalert/es_mappings/6/silence.json b/elastalert/es_mappings/7/silence.json similarity index 100% rename from elastalert/es_mappings/6/silence.json rename to elastalert/es_mappings/7/silence.json diff --git a/elastalert/kibana.py b/elastalert/kibana.py deleted file mode 100644 index 64c4477e..00000000 --- a/elastalert/kibana.py +++ /dev/null @@ -1,256 +0,0 @@ -# -*- coding: utf-8 -*- -# flake8: noqa -import os.path -import urllib.error -import urllib.parse -import urllib.request - -from .util import EAException - - -dashboard_temp = {'editable': True, - 'failover': False, - 'index': {'default': 'NO_TIME_FILTER_OR_INDEX_PATTERN_NOT_MATCHED', - 'interval': 'none', - 'pattern': '', - 'warm_fields': True}, - 'loader': {'hide': False, - 'load_elasticsearch': True, - 'load_elasticsearch_size': 20, - 'load_gist': True, - 'load_local': True, - 'save_default': True, - 'save_elasticsearch': True, - 'save_gist': False, - 'save_local': True, - 'save_temp': True, - 'save_temp_ttl': '30d', - 'save_temp_ttl_enable': True}, - 'nav': [{'collapse': False, - 'enable': True, - 'filter_id': 0, - 'notice': False, - 'now': False, - 'refresh_intervals': ['5s', - '10s', - '30s', - '1m', - '5m', - '15m', - '30m', - '1h', - '2h', - '1d'], - 'status': 'Stable', - 'time_options': ['5m', - '15m', - '1h', - '6h', - '12h', - '24h', - '2d', - '7d', - '30d'], - 'timefield': '@timestamp', - 'type': 'timepicker'}], - 'panel_hints': True, - 'pulldowns': [{'collapse': False, - 'enable': True, - 'notice': True, - 'type': 'filtering'}], - 'refresh': False, - 'rows': [{'collapsable': True, - 'collapse': False, - 'editable': True, - 'height': '350px', - 'notice': False, - 'panels': [{'annotate': {'enable': False, - 'field': '_type', - 'query': '*', - 'size': 20, - 'sort': ['_score', 'desc']}, - 'auto_int': True, - 'bars': True, - 'derivative': False, - 'editable': True, - 'fill': 3, - 'grid': {'max': None, 'min': 0}, - 'group': ['default'], - 'interactive': True, - 'interval': '1m', - 'intervals': ['auto', - '1s', - '1m', - '5m', - '10m', - '30m', - '1h', - '3h', - '12h', - '1d', - '1w', - '1M', - '1y'], - 'legend': True, - 'legend_counts': True, - 'lines': False, - 'linewidth': 3, - 'mode': 'count', - 'options': True, - 'percentage': False, - 'pointradius': 5, - 'points': False, - 'queries': {'ids': [0], 'mode': 'all'}, - 'resolution': 100, - 'scale': 1, - 'show_query': True, - 'span': 12, - 'spyable': True, - 'stack': True, - 'time_field': '@timestamp', - 'timezone': 'browser', - 'title': 'Events over time', - 'tooltip': {'query_as_alias': True, - 'value_type': 'cumulative'}, - 'type': 'histogram', - 'value_field': None, - 'x-axis': True, - 'y-axis': True, - 'y_format': 'none', - 'zerofill': True, - 'zoomlinks': True}], - 'title': 'Graph'}, - {'collapsable': True, - 'collapse': False, - 'editable': True, - 'height': '350px', - 'notice': False, - 'panels': [{'all_fields': False, - 'editable': True, - 'error': False, - 'field_list': True, - 'fields': [], - 'group': ['default'], - 'header': True, - 'highlight': [], - 'localTime': True, - 'normTimes': True, - 'offset': 0, - 'overflow': 'min-height', - 'pages': 5, - 'paging': True, - 'queries': {'ids': [0], 'mode': 'all'}, - 'size': 100, - 'sort': ['@timestamp', 'desc'], - 'sortable': True, - 'span': 12, - 'spyable': True, - 'status': 'Stable', - 'style': {'font-size': '9pt'}, - 'timeField': '@timestamp', - 'title': 'All events', - 'trimFactor': 300, - 'type': 'table'}], - 'title': 'Events'}], - 'services': {'filter': {'ids': [0], - 'list': {'0': {'active': True, - 'alias': '', - 'field': '@timestamp', - 'from': 'now-24h', - 'id': 0, - 'mandate': 'must', - 'to': 'now', - 'type': 'time'}}}, - 'query': {'ids': [0], - 'list': {'0': {'alias': '', - 'color': '#7EB26D', - 'enable': True, - 'id': 0, - 'pin': False, - 'query': '', - 'type': 'lucene'}}}}, - 'style': 'dark', - 'title': 'ElastAlert Alert Dashboard'} - -kibana4_time_temp = "(refreshInterval:(display:Off,section:0,value:0),time:(from:'%s',mode:absolute,to:'%s'))" - - -def set_time(dashboard, start, end): - dashboard['services']['filter']['list']['0']['from'] = start - dashboard['services']['filter']['list']['0']['to'] = end - - -def set_index_name(dashboard, name): - dashboard['index']['default'] = name - - -def set_timestamp_field(dashboard, field): - # set the nav timefield if we don't want @timestamp - dashboard['nav'][0]['timefield'] = field - - # set the time_field for each of our panels - for row in dashboard.get('rows'): - for panel in row.get('panels'): - panel['time_field'] = field - - # set our filter's time field - dashboard['services']['filter']['list']['0']['field'] = field - - -def add_filter(dashboard, es_filter): - next_id = max(dashboard['services']['filter']['ids']) + 1 - - kibana_filter = {'active': True, - 'alias': '', - 'id': next_id, - 'mandate': 'must'} - - if 'not' in es_filter: - es_filter = es_filter['not'] - kibana_filter['mandate'] = 'mustNot' - - if 'query' in es_filter: - es_filter = es_filter['query'] - if 'query_string' in es_filter: - kibana_filter['type'] = 'querystring' - kibana_filter['query'] = es_filter['query_string']['query'] - elif 'term' in es_filter: - kibana_filter['type'] = 'field' - f_field, f_query = list(es_filter['term'].items())[0] - # Wrap query in quotes, otherwise certain characters cause Kibana to throw errors - if isinstance(f_query, str): - f_query = '"%s"' % (f_query.replace('"', '\\"')) - if isinstance(f_query, list): - # Escape quotes - f_query = [item.replace('"', '\\"') for item in f_query] - # Wrap in quotes - f_query = ['"%s"' % (item) for item in f_query] - # Convert into joined query - f_query = '(%s)' % (' AND '.join(f_query)) - kibana_filter['field'] = f_field - kibana_filter['query'] = f_query - elif 'range' in es_filter: - kibana_filter['type'] = 'range' - f_field, f_range = list(es_filter['range'].items())[0] - kibana_filter['field'] = f_field - kibana_filter.update(f_range) - else: - raise EAException("Could not parse filter %s for Kibana" % (es_filter)) - - dashboard['services']['filter']['ids'].append(next_id) - dashboard['services']['filter']['list'][str(next_id)] = kibana_filter - - -def set_name(dashboard, name): - dashboard['title'] = name - - -def set_included_fields(dashboard, fields): - dashboard['rows'][1]['panels'][0]['fields'] = list(set(fields)) - - -def kibana4_dashboard_link(dashboard, starttime, endtime): - dashboard = os.path.expandvars(dashboard) - time_settings = kibana4_time_temp % (starttime, endtime) - time_settings = urllib.parse.quote(time_settings) - return "%s?_g=%s" % (dashboard, time_settings) diff --git a/elastalert/kibana_discover.py b/elastalert/kibana_discover.py index 78db3118..a96c5b0d 100644 --- a/elastalert/kibana_discover.py +++ b/elastalert/kibana_discover.py @@ -14,8 +14,10 @@ kibana_default_timedelta = datetime.timedelta(minutes=10) -kibana5_kibana6_versions = frozenset(['5.6', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8']) -kibana7_versions = frozenset(['7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8', '7.9', '7.10', '7.11', '7.12', '7.13', '7.14', '7.15', '7.16', '7.17']) +kibana_versions = frozenset([ + '7.0', '7.1', '7.2', '7.3', '7.4', '7.5', '7.6', '7.7', '7.8', '7.9', '7.10', '7.11', '7.12', '7.13', '7.14', '7.15', '7.16', '7.17', + '8.0' + ]) def generate_kibana_discover_url(rule, match): ''' Creates a link for a kibana discover app. ''' @@ -62,11 +64,7 @@ def generate_kibana_discover_url(rule, match): to_timedelta = rule.get('kibana_discover_to_timedelta', timeframe) to_time = ts_add(timestamp, to_timedelta) - if kibana_version in kibana5_kibana6_versions: - globalState = kibana6_disover_global_state(from_time, to_time) - appState = kibana_discover_app_state(index, columns, filters, query_keys, match) - - elif kibana_version in kibana7_versions: + if kibana_version in kibana_versions: globalState = kibana7_disover_global_state(from_time, to_time) appState = kibana_discover_app_state(index, columns, filters, query_keys, match) @@ -86,20 +84,6 @@ def generate_kibana_discover_url(rule, match): ) -def kibana6_disover_global_state(from_time, to_time): - return prison.dumps( { - 'refreshInterval': { - 'pause': True, - 'value': 0 - }, - 'time': { - 'from': from_time, - 'mode': 'absolute', - 'to': to_time - } - } ) - - def kibana7_disover_global_state(from_time, to_time): return prison.dumps( { 'filters': [], @@ -118,6 +102,15 @@ def kibana_discover_app_state(index, columns, filters, query_keys, match): app_filters = [] if filters: + + # Remove nested query since the outer most query key will break Kibana 8. + new_filters = [] + for filter in filters: + if 'query' in filter: + filter = filter['query'] + new_filters.append(filter) + filters = new_filters + bool_filter = { 'must': filters } app_filters.append( { '$state': { diff --git a/elastalert/loaders.py b/elastalert/loaders.py index 2ec45401..33ae54c0 100644 --- a/elastalert/loaders.py +++ b/elastalert/loaders.py @@ -323,10 +323,6 @@ def load_options(self, rule, conf, filename, args=None): rule['bucket_interval_timedelta'] = datetime.timedelta(**rule['bucket_interval']) if 'exponential_realert' in rule: rule['exponential_realert'] = datetime.timedelta(**rule['exponential_realert']) - if 'kibana4_start_timedelta' in rule: - rule['kibana4_start_timedelta'] = datetime.timedelta(**rule['kibana4_start_timedelta']) - if 'kibana4_end_timedelta' in rule: - rule['kibana4_end_timedelta'] = datetime.timedelta(**rule['kibana4_end_timedelta']) if 'kibana_discover_from_timedelta' in rule: rule['kibana_discover_from_timedelta'] = datetime.timedelta(**rule['kibana_discover_from_timedelta']) if 'kibana_discover_to_timedelta' in rule: @@ -429,24 +425,6 @@ def _dt_to_ts_with_format(dt): include.append(rule['timestamp_field']) rule['include'] = list(set(include)) - # Check that generate_kibana_url is compatible with the filters - if rule.get('generate_kibana_link'): - for es_filter in rule.get('filter'): - if es_filter: - if 'not' in es_filter: - es_filter = es_filter['not'] - if 'query' in es_filter: - es_filter = es_filter['query'] - if list(es_filter.keys())[0] not in ('term', 'query_string', 'range'): - raise EAException( - 'generate_kibana_link is incompatible with filters other than term, query_string and range.' - 'Consider creating a dashboard and using use_kibana_dashboard instead.') - - # Check that doc_type is provided if use_count/terms_query - if rule.get('use_count_query') or rule.get('use_terms_query'): - if 'doc_type' not in rule: - raise EAException('doc_type must be specified.') - # Check that query_key is set if use_terms_query if rule.get('use_terms_query'): if 'query_key' not in rule: diff --git a/elastalert/schema.yaml b/elastalert/schema.yaml index bea7b207..938f6461 100644 --- a/elastalert/schema.yaml +++ b/elastalert/schema.yaml @@ -111,7 +111,6 @@ oneOf: num_events: {type: integer} timeframe: *timeframe use_count_query: {type: boolean} - doc_type: {type: string} use_terms_query: {type: boolean} terms_size: {type: integer} attach_related: {type: boolean} @@ -124,7 +123,6 @@ oneOf: spike_type: {enum: ["up", "down", "both"]} timeframe: *timeframe use_count_query: {type: boolean} - doc_type: {type: string} use_terms_query: {type: boolean} terms_size: {type: integer} alert_on_new_data: {type: boolean} @@ -140,7 +138,6 @@ oneOf: metric_agg_type: {enum: ["min", "max", "avg", "sum", "cardinality", "value_count", "percentiles"]} timeframe: *timeframe use_count_query: {type: boolean} - doc_type: {type: string} use_terms_query: {type: boolean} terms_size: {type: integer} alert_on_new_data: {type: boolean} @@ -156,7 +153,6 @@ oneOf: timeframe: *timeframe threshold: {type: integer} use_count_query: {type: boolean} - doc_type: {type: string} - title: New Term required: [] @@ -240,12 +236,9 @@ properties: top_count_keys: {type: array, items: {type: string}} top_count_number: {type: integer} raw_count_keys: {type: boolean} - generate_kibana_link: {type: boolean} - kibana_dashboard: {type: string} kibana_url: {type: string, format: uri} kibana_username: {type: string} kibana_password: {type: string} - use_kibana_dashboard: {type: string} use_local_time: {type: boolean} custom_pretty_ts_format: {type: string} match_enhancements: {type: array, items: {type: string}} @@ -264,7 +257,7 @@ properties: generate_kibana_discover_url: {type: boolean} shorten_kibana_discover_url: {type: boolean} kibana_discover_app_url: {type: string} - kibana_discover_version: {type: string, enum: ['7.17', '7.16', '7.15', '7.14', '7.13', '7.12', '7.11', '7.10', '7.9', '7.8', '7.7', '7.6', '7.5', '7.4', '7.3', '7.2', '7.1', '7.0', '6.8', '6.7', '6.6', '6.5', '6.4', '6.3', '6.2', '6.1', '6.0', '5.6']} + kibana_discover_version: {type: string, enum: ['8.0', '7.17', '7.16', '7.15', '7.14', '7.13', '7.12', '7.11', '7.10', '7.9', '7.8', '7.7', '7.6', '7.5', '7.4', '7.3', '7.2', '7.1', '7.0']} kibana_discover_index_pattern_id: {type: string, minLength: 1} kibana_discover_columns: {type: array, items: {type: string, minLength: 1}, minItems: 1} kibana_discover_from_timedelta: *timedelta diff --git a/elastalert/test_rule.py b/elastalert/test_rule.py index 05dd0c9d..530cbf9a 100644 --- a/elastalert/test_rule.py +++ b/elastalert/test_rule.py @@ -186,27 +186,13 @@ def test_file(self, conf): # Set up Elasticsearch client and query es_client = elasticsearch_client(conf) - try: - ElastAlerter.modify_rule_for_ES5(conf) - except EAException as ea: - print('Invalid filter provided:', str(ea), file=sys.stderr) - if self.args.stop_error: - exit(3) - return None - except Exception as e: - print("Error connecting to ElasticSearch:", file=sys.stderr) - print(repr(e)[:2048], file=sys.stderr) - if self.args.stop_error: - exit(1) - return None ts = conf.get('timestamp_field', '@timestamp') query = ElastAlerter.get_query( conf['filter'], starttime=self.starttime, endtime=self.endtime, timestamp_field=ts, - to_ts_func=conf['dt_to_ts'], - five=conf['five'] + to_ts_func=conf['dt_to_ts'] ) index = ElastAlerter.get_index(conf, self.starttime, self.endtime) @@ -225,7 +211,6 @@ def test_file(self, conf): return [] terms = res['hits']['hits'][0]['_source'] - doc_type = res['hits']['hits'][0]['_type'] # Get a count of all docs count_query = ElastAlerter.get_query( @@ -234,11 +219,10 @@ def test_file(self, conf): endtime=self.endtime, timestamp_field=ts, to_ts_func=conf['dt_to_ts'], - sort=False, - five=conf['five'] + sort=False ) try: - res = es_client.count(index=index, doc_type=doc_type, body=count_query, ignore_unavailable=True) + res = es_client.count(index=index, body=count_query, ignore_unavailable=True) except Exception as e: print("Error querying Elasticsearch:", file=sys.stderr) print(repr(e)[:2048], file=sys.stderr) diff --git a/examples/rules/example_new_term.yaml b/examples/rules/example_new_term.yaml index 56dbb230..5e67f782 100755 --- a/examples/rules/example_new_term.yaml +++ b/examples/rules/example_new_term.yaml @@ -47,6 +47,8 @@ terms_window_size: # We are filtering for only "login_event" type documents with username "admin" filter: - term: + # The _type field is deprecated in Elasticsearch 7.0 and removed in Elasticsearch 8.0 + # For more information, see https://www.elastic.co/guide/en/elasticsearch/reference/7.17/removal-of-types.html _type: "login_event" - term: username: admin diff --git a/examples/rules/example_opsgenie_frequency.yaml b/examples/rules/example_opsgenie_frequency.yaml index 939aad26..b80e674d 100755 --- a/examples/rules/example_opsgenie_frequency.yaml +++ b/examples/rules/example_opsgenie_frequency.yaml @@ -71,8 +71,6 @@ type: frequency # Index to search, wildcard supported index: logstash-* -#doc_type: "golog" - # (Required, frequency specific) # Alert when this many documents matching the query occur within a timeframe num_events: 50 diff --git a/examples/rules/example_percentage_match.yaml b/examples/rules/example_percentage_match.yaml index cb780962..0f7afede 100644 --- a/examples/rules/example_percentage_match.yaml +++ b/examples/rules/example_percentage_match.yaml @@ -9,13 +9,14 @@ description: "95% of all http requests should be successful" filter: - term: + # The _type field is deprecated in Elasticsearch 7.0 and removed in Elasticsearch 8.0 + # For more information, see https://www.elastic.co/guide/en/elasticsearch/reference/7.17/removal-of-types.html _type: http_request buffer_time: minutes: 5 query_key: Hostname.keyword -doc_type: http_request match_bucket_filter: - terms: diff --git a/examples/rules/example_single_metric_agg.yaml b/examples/rules/example_single_metric_agg.yaml index 921afe30..cdff72d5 100644 --- a/examples/rules/example_single_metric_agg.yaml +++ b/examples/rules/example_single_metric_agg.yaml @@ -12,8 +12,7 @@ buffer_time: metric_agg_key: system.cpu.user.pct metric_agg_type: avg query_key: beat.hostname -doc_type: metricsets - + bucket_interval: minutes: 5 diff --git a/examples/rules/example_spike_single_metric_agg.yaml b/examples/rules/example_spike_single_metric_agg.yaml index b26ade15..007237ca 100644 --- a/examples/rules/example_spike_single_metric_agg.yaml +++ b/examples/rules/example_spike_single_metric_agg.yaml @@ -15,7 +15,6 @@ buffer_time: metric_agg_key: system.cpu.user.pct metric_agg_type: avg query_key: beat.hostname -doc_type: metricsets #allow_buffer_time_overlap: true #use_run_every_query_size: true diff --git a/examples/rules/ssh-repeat-offender.yaml b/examples/rules/ssh-repeat-offender.yaml index 9075703c..2618fe9f 100644 --- a/examples/rules/ssh-repeat-offender.yaml +++ b/examples/rules/ssh-repeat-offender.yaml @@ -32,10 +32,9 @@ include: - match_body.user.name - match_body.source.ip -alert_subject: "SSH abuse (repeat offender) on <{}> | <{}|Show Dashboard>" +alert_subject: "SSH abuse (repeat offender) on <{}>" alert_subject_args: - match_body.host.hostname - - kibana_link alert_text: |- An reapeat offender has been active on {}. @@ -56,6 +55,3 @@ slack_username_override: "ElastAlert" # Alert body only cointains a title and text alert_text_type: alert_text_only - -# Link to BitSensor Kibana Dashboard -use_kibana4_dashboard: "https://dev.securely.ai/app/kibana#/dashboard/37739d80-a95c-11e9-b5ba-33a34ca252fb" diff --git a/examples/rules/ssh.yaml b/examples/rules/ssh.yaml index 7843ddee..5b95b562 100644 --- a/examples/rules/ssh.yaml +++ b/examples/rules/ssh.yaml @@ -35,10 +35,9 @@ include: include_match_in_root: true -alert_subject: "SSH abuse on <{}> | <{}|Show Dashboard>" +alert_subject: "SSH abuse on <{}>" alert_subject_args: - host.hostname - - kibana_link alert_text: |- An attack on {} is detected. @@ -59,6 +58,3 @@ slack_username_override: "ElastAlert" # Alert body only cointains a title and text alert_text_type: alert_text_only - -# Link to BitSensor Kibana Dashboard -use_kibana4_dashboard: "https://dev.securely.ai/app/kibana#/dashboard/37739d80-a95c-11e9-b5ba-33a34ca252fb" diff --git a/requirements.txt b/requirements.txt index 8699297f..784c638d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ sortedcontainers>=2.4.0 boto3>=1.20.53 cffi>=1.15.0 croniter>=1.2.0 -elasticsearch==7.0.0 +elasticsearch==7.10.1 envparse>=0.2.0 exotel>=0.1.5 Jinja2==3.0.3 @@ -22,4 +22,4 @@ statsd-tags==3.2.1.post1 twilio==6.57.0 tencentcloud-sdk-python>=3.0.577 jsonpointer>=2.2 -tzlocal==2.1 \ No newline at end of file +tzlocal==2.1 diff --git a/setup.py b/setup.py index c7076049..019ed4e1 100644 --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ 'sortedcontainers>=2.4.0', 'boto3>=1.20.53', 'croniter>=1.2.0', - 'elasticsearch==7.0.0', + 'elasticsearch==7.10.1', 'envparse>=0.2.0', 'exotel>=0.1.5', 'jira>=3.1.1', diff --git a/tests/alerts_test.py b/tests/alerts_test.py index 7c5bddd8..3cb27f4d 100644 --- a/tests/alerts_test.py +++ b/tests/alerts_test.py @@ -1,12 +1,8 @@ # -*- coding: utf-8 -*- -import datetime -import json -from unittest import mock from jinja2 import Template from elastalert.alerts import Alerter from elastalert.alerts import BasicMatchString -from elastalert.util import ts_add class mock_rule: @@ -58,44 +54,6 @@ def test_basic_match_string(ea): assert 'field: value' not in alert_text -def test_kibana(ea): - rule = {'filter': [{'query': {'query_string': {'query': 'xy:z'}}}], - 'name': 'Test rule!', - 'es_host': 'test.testing', - 'es_port': 12345, - 'timeframe': datetime.timedelta(hours=1), - 'index': 'logstash-test', - 'include': ['@timestamp'], - 'timestamp_field': '@timestamp'} - match = {'@timestamp': '2014-10-10T00:00:00'} - with mock.patch("elastalert.elastalert.elasticsearch_client") as mock_es: - mock_create = mock.Mock(return_value={'_id': 'ABCDEFGH'}) - mock_es_inst = mock.Mock() - mock_es_inst.index = mock_create - mock_es_inst.host = 'test.testing' - mock_es_inst.port = 12345 - mock_es.return_value = mock_es_inst - link = ea.generate_kibana_db(rule, match) - - assert 'http://test.testing:12345/_plugin/kibana/#/dashboard/temp/ABCDEFGH' == link - - # Name and index - dashboard = json.loads(mock_create.call_args_list[0][1]['body']['dashboard']) - assert dashboard['index']['default'] == 'logstash-test' - assert 'Test rule!' in dashboard['title'] - - # Filters and time range - filters = dashboard['services']['filter']['list'] - assert 'xy:z' in filters['1']['query'] - assert filters['1']['type'] == 'querystring' - time_range = filters['0'] - assert time_range['from'] == ts_add(match['@timestamp'], -rule['timeframe']) - assert time_range['to'] == ts_add(match['@timestamp'], datetime.timedelta(minutes=10)) - - # Included fields active in table - assert dashboard['rows'][1]['panels'][0]['fields'] == ['@timestamp'] - - def test_alert_text_kw(ea): rule = ea.rules[0].copy() rule['alert_text'] = '{field} at {time}' diff --git a/tests/base_test.py b/tests/base_test.py index c28e62c7..5be65ee8 100644 --- a/tests/base_test.py +++ b/tests/base_test.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- import copy import datetime -import json import threading import elasticsearch @@ -13,13 +12,11 @@ from elastalert.enhancements import BaseEnhancement from elastalert.enhancements import DropMatchException from elastalert.enhancements import TimeEnhancement -from elastalert.kibana import dashboard_temp from elastalert.kibana_external_url_formatter import AbsoluteKibanaExternalUrlFormatter from elastalert.kibana_external_url_formatter import ShortKibanaExternalUrlFormatter from elastalert.util import dt_to_ts from elastalert.util import dt_to_unix from elastalert.util import dt_to_unixms -from elastalert.util import EAException from elastalert.util import ts_now from elastalert.util import ts_to_dt from elastalert.util import unix_to_dt @@ -48,7 +45,7 @@ def generate_hits(timestamps, **kwargs): for field in ['_id', '_type', '_index']: data['_source'][field] = data[field] hits.append(data) - return {'hits': {'total': len(hits), 'hits': hits}} + return {'hits': {'total': {'value': len(hits)}, 'hits': hits}} def assert_alerts(ea_inst, calls): @@ -94,112 +91,59 @@ def test_init_rule(ea): def test_query(ea): - ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) ea.thread_data.current_es.search.assert_called_with(body={ - 'query': {'filtered': { - 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], - ignore_unavailable=True, - size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) - - -def test_query_sixsix(ea_sixsix): - ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} - ea_sixsix.run_query(ea_sixsix.rules[0], START, END) - ea_sixsix.thread_data.current_es.search.assert_called_with(body={ 'query': {'bool': { 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], + 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_includes=['@timestamp'], ignore_unavailable=True, - size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive']) + size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) def test_query_with_fields(ea): ea.rules[0]['_source_enabled'] = False - ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) ea.thread_data.current_es.search.assert_called_with(body={ - 'query': {'filtered': { - 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}], 'fields': ['@timestamp']}, index='idx', ignore_unavailable=True, - size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) - - -def test_query_sixsix_with_fields(ea_sixsix): - ea_sixsix.rules[0]['_source_enabled'] = False - ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} - ea_sixsix.run_query(ea_sixsix.rules[0], START, END) - ea_sixsix.thread_data.current_es.search.assert_called_with(body={ 'query': {'bool': { 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}], 'stored_fields': ['@timestamp']}, index='idx', - ignore_unavailable=True, - size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive']) + 'sort': [{'@timestamp': {'order': 'asc'}}], 'stored_fields': ['@timestamp']}, index='idx', ignore_unavailable=True, + size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) def test_query_with_unix(ea): ea.rules[0]['timestamp_type'] = 'unix' ea.rules[0]['dt_to_ts'] = dt_to_unix - ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) start_unix = dt_to_unix(START) end_unix = dt_to_unix(END) ea.thread_data.current_es.search.assert_called_with( - body={'query': {'filtered': { - 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], - ignore_unavailable=True, - size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) - - -def test_query_sixsix_with_unix(ea_sixsix): - ea_sixsix.rules[0]['timestamp_type'] = 'unix' - ea_sixsix.rules[0]['dt_to_ts'] = dt_to_unix - ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} - ea_sixsix.run_query(ea_sixsix.rules[0], START, END) - start_unix = dt_to_unix(START) - end_unix = dt_to_unix(END) - ea_sixsix.thread_data.current_es.search.assert_called_with( body={'query': {'bool': { 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], + 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_includes=['@timestamp'], ignore_unavailable=True, - size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive']) + size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) def test_query_with_unixms(ea): ea.rules[0]['timestamp_type'] = 'unixms' ea.rules[0]['dt_to_ts'] = dt_to_unixms - ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) start_unix = dt_to_unixms(START) end_unix = dt_to_unixms(END) ea.thread_data.current_es.search.assert_called_with( - body={'query': {'filtered': { - 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], - ignore_unavailable=True, - size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) - - -def test_query_sixsix_with_unixms(ea_sixsix): - ea_sixsix.rules[0]['timestamp_type'] = 'unixms' - ea_sixsix.rules[0]['dt_to_ts'] = dt_to_unixms - ea_sixsix.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} - ea_sixsix.run_query(ea_sixsix.rules[0], START, END) - start_unix = dt_to_unixms(START) - end_unix = dt_to_unixms(END) - ea_sixsix.thread_data.current_es.search.assert_called_with( body={'query': {'bool': { 'filter': {'bool': {'must': [{'range': {'@timestamp': {'lte': end_unix, 'gt': start_unix}}}]}}}}, - 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_include=['@timestamp'], + 'sort': [{'@timestamp': {'order': 'asc'}}]}, index='idx', _source_includes=['@timestamp'], ignore_unavailable=True, - size=ea_sixsix.rules[0]['max_query_size'], scroll=ea_sixsix.conf['scroll_keepalive']) + size=ea.rules[0]['max_query_size'], scroll=ea.conf['scroll_keepalive']) def test_no_hits(ea): - ea.thread_data.current_es.search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) assert ea.rules[0]['type'].add_data.call_count == 0 @@ -207,8 +151,7 @@ def test_no_hits(ea): def test_no_terms_hits(ea): ea.rules[0]['use_terms_query'] = True ea.rules[0]['query_key'] = 'QWERTY' - ea.rules[0]['doc_type'] = 'uiop' - ea.thread_data.current_es.deprecated_search.return_value = {'hits': {'total': 0, 'hits': []}} + ea.thread_data.current_es.search.return_value = {'hits': {'total': {'value': 0}, 'hits': []}} ea.run_query(ea.rules[0], START, END) assert ea.rules[0]['type'].add_terms_data.call_count == 0 @@ -303,7 +246,6 @@ def test_query_exception(ea): def test_query_exception_count_query(ea): ea.rules[0]['use_count_query'] = True - ea.rules[0]['doc_type'] = 'blahblahblahblah' mock_es = mock.Mock() mock_es.count.side_effect = ElasticsearchException run_rule_query_exception(ea, mock_es) @@ -325,16 +267,16 @@ def test_match_with_module_from_pending(ea): pending_alert = {'match_body': {'foo': 'bar'}, 'rule_name': ea.rules[0]['name'], 'alert_time': START_TIMESTAMP, '@timestamp': START_TIMESTAMP} # First call, return the pending alert, second, no associated aggregated alerts - ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}}, - {'hits': {'hits': []}}] + ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}}, + {'hits': {'hits': []}}] ea.send_pending_alerts() assert mod.process.call_count == 0 # If aggregation is set, enhancement IS called pending_alert = {'match_body': {'foo': 'bar'}, 'rule_name': ea.rules[0]['name'], 'alert_time': START_TIMESTAMP, '@timestamp': START_TIMESTAMP} - ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}}, - {'hits': {'hits': []}}] + ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': pending_alert}]}}, + {'hits': {'hits': []}}] ea.rules[0]['aggregation'] = datetime.timedelta(minutes=10) ea.send_pending_alerts() assert mod.process.call_count == 1 @@ -412,10 +354,10 @@ def test_agg_matchtime(ea): # First call - Find all pending alerts (only entries without agg_id) # Second call - Find matches with agg_id == 'ABCD' # Third call - Find matches with agg_id == 'CDEF' - ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1}, - {'_id': 'CDEF', '_index': 'wb', '_source': call3}]}}, - {'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call2}]}}, - {'hits': {'total': 0, 'hits': []}}] + ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1}, + {'_id': 'CDEF', '_index': 'wb', '_source': call3}]}}, + {'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call2}]}}, + {'hits': {'total': 0, 'hits': []}}] with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es: ea.send_pending_alerts() @@ -424,15 +366,15 @@ def test_agg_matchtime(ea): assert mock_es.call_count == 2 assert_alerts(ea, [hits_timestamps[:2], hits_timestamps[2:]]) - call1 = ea.writeback_es.deprecated_search.call_args_list[7][1]['body'] - call2 = ea.writeback_es.deprecated_search.call_args_list[8][1]['body'] - call3 = ea.writeback_es.deprecated_search.call_args_list[9][1]['body'] - call4 = ea.writeback_es.deprecated_search.call_args_list[10][1]['body'] + call1 = ea.writeback_es.search.call_args_list[7][1]['body'] + call2 = ea.writeback_es.search.call_args_list[8][1]['body'] + call3 = ea.writeback_es.search.call_args_list[9][1]['body'] + call4 = ea.writeback_es.search.call_args_list[10][1]['body'] - assert 'alert_time' in call2['filter']['range'] + assert 'alert_time' in call2['query']['bool']['filter']['range'] assert call3['query']['query_string']['query'] == 'aggregate_id:"ABCD"' assert call4['query']['query_string']['query'] == 'aggregate_id:"CDEF"' - assert ea.writeback_es.deprecated_search.call_args_list[9][1]['size'] == 1337 + assert ea.writeback_es.search.call_args_list[9][1]['size'] == 1337 def test_agg_not_matchtime(ea): @@ -580,10 +522,10 @@ def test_agg_with_aggregation_key(ea): # First call - Find all pending alerts (only entries without agg_id) # Second call - Find matches with agg_id == 'ABCD' # Third call - Find matches with agg_id == 'CDEF' - ea.writeback_es.deprecated_search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1}, - {'_id': 'CDEF', '_index': 'wb', '_source': call2}]}}, - {'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call3}]}}, - {'hits': {'total': 0, 'hits': []}}] + ea.writeback_es.search.side_effect = [{'hits': {'hits': [{'_id': 'ABCD', '_index': 'wb', '_source': call1}, + {'_id': 'CDEF', '_index': 'wb', '_source': call2}]}}, + {'hits': {'hits': [{'_id': 'BCDE', '_index': 'wb', '_source': call3}]}}, + {'hits': {'total': 0, 'hits': []}}] with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es: mock_es.return_value = ea.thread_data.current_es @@ -593,15 +535,15 @@ def test_agg_with_aggregation_key(ea): assert mock_es.call_count == 2 assert_alerts(ea, [[hits_timestamps[0], hits_timestamps[2]], [hits_timestamps[1]]]) - call1 = ea.writeback_es.deprecated_search.call_args_list[7][1]['body'] - call2 = ea.writeback_es.deprecated_search.call_args_list[8][1]['body'] - call3 = ea.writeback_es.deprecated_search.call_args_list[9][1]['body'] - call4 = ea.writeback_es.deprecated_search.call_args_list[10][1]['body'] + call1 = ea.writeback_es.search.call_args_list[7][1]['body'] + call2 = ea.writeback_es.search.call_args_list[8][1]['body'] + call3 = ea.writeback_es.search.call_args_list[9][1]['body'] + call4 = ea.writeback_es.search.call_args_list[10][1]['body'] - assert 'alert_time' in call2['filter']['range'] + assert 'alert_time' in call2['query']['bool']['filter']['range'] assert call3['query']['query_string']['query'] == 'aggregate_id:"ABCD"' assert call4['query']['query_string']['query'] == 'aggregate_id:"CDEF"' - assert ea.writeback_es.deprecated_search.call_args_list[9][1]['size'] == 1337 + assert ea.writeback_es.search.call_args_list[9][1]['size'] == 1337 def test_silence(ea): @@ -760,7 +702,6 @@ def test_realert_with_nested_query_key(ea): def test_count(ea): ea.rules[0]['use_count_query'] = True - ea.rules[0]['doc_type'] = 'doctype' with mock.patch('elastalert.elastalert.elasticsearch_client'), \ mock.patch.object(ea, 'get_hits_count') as mock_hits: ea.run_rule(ea.rules[0], END, START) @@ -910,7 +851,6 @@ def test_set_starttime(ea): # Count query, starttime, no previous endtime ea.rules[0]['use_count_query'] = True - ea.rules[0]['doc_type'] = 'blah' with mock.patch.object(ea, 'get_starttime') as mock_gs: mock_gs.return_value = None ea.set_starttime(ea.rules[0], end) @@ -947,53 +887,6 @@ def test_set_starttime(ea): assert ea.rules[0]['starttime'] == end - datetime.timedelta(days=3) -def test_kibana_dashboard(ea): - match = {'@timestamp': '2014-10-11T00:00:00'} - mock_es = mock.Mock() - ea.rules[0]['use_kibana_dashboard'] = 'my dashboard' - with mock.patch('elastalert.elastalert.elasticsearch_client') as mock_es_init: - mock_es_init.return_value = mock_es - - # No dashboard found - mock_es.deprecated_search.return_value = {'hits': {'total': 0, 'hits': []}} - with pytest.raises(EAException): - ea.use_kibana_link(ea.rules[0], match) - mock_call = mock_es.deprecated_search.call_args_list[0][1] - assert mock_call['body'] == {'query': {'term': {'_id': 'my dashboard'}}} - - # Dashboard found - mock_es.index.return_value = {'_id': 'ABCDEFG'} - mock_es.deprecated_search.return_value = {'hits': {'hits': [{'_source': {'dashboard': json.dumps(dashboard_temp)}}]}} - url = ea.use_kibana_link(ea.rules[0], match) - assert 'ABCDEFG' in url - db = json.loads(mock_es.index.call_args_list[0][1]['body']['dashboard']) - assert 'anytest' in db['title'] - - # Query key filtering added - ea.rules[0]['query_key'] = 'foobar' - match['foobar'] = 'baz' - url = ea.use_kibana_link(ea.rules[0], match) - db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard']) - assert db['services']['filter']['list']['1']['field'] == 'foobar' - assert db['services']['filter']['list']['1']['query'] == '"baz"' - - # Compound query key - ea.rules[0]['query_key'] = 'foo,bar' - ea.rules[0]['compound_query_key'] = ['foo', 'bar'] - match['foo'] = 'cat' - match['bar'] = 'dog' - match['foo,bar'] = 'cat, dog' - url = ea.use_kibana_link(ea.rules[0], match) - db = json.loads(mock_es.index.call_args_list[-1][1]['body']['dashboard']) - found_filters = 0 - for filter_id, filter_dict in list(db['services']['filter']['list'].items()): - if (filter_dict['field'] == 'foo' and filter_dict['query'] == '"cat"') or \ - (filter_dict['field'] == 'bar' and filter_dict['query'] == '"dog"'): - found_filters += 1 - continue - assert found_filters == 2 - - def test_rule_changes(ea): ea.rule_hashes = {'rules/rule1.yaml': 'ABC', 'rules/rule2.yaml': 'DEF'} @@ -1097,17 +990,15 @@ def test_count_keys(ea): ea.rules[0]['timeframe'] = datetime.timedelta(minutes=60) ea.rules[0]['top_count_keys'] = ['this', 'that'] ea.rules[0]['type'].matches = {'@timestamp': END} - ea.rules[0]['doc_type'] = 'blah' - buckets = [{'aggregations': { - 'filtered': {'counts': {'buckets': [{'key': 'a', 'doc_count': 10}, {'key': 'b', 'doc_count': 5}]}}}}, - {'aggregations': {'filtered': { - 'counts': {'buckets': [{'key': 'd', 'doc_count': 10}, {'key': 'c', 'doc_count': 12}]}}}}] - ea.thread_data.current_es.deprecated_search.side_effect = buckets + buckets = [{'aggregations': + {'counts': {'buckets': [{'key': 'a', 'doc_count': 10}, {'key': 'b', 'doc_count': 5}]}}}, + {'aggregations': + {'counts': {'buckets': [{'key': 'd', 'doc_count': 10}, {'key': 'c', 'doc_count': 12}]}}}] + ea.thread_data.current_es.search.side_effect = buckets counts = ea.get_top_counts(ea.rules[0], START, END, ['this', 'that']) - calls = ea.thread_data.current_es.deprecated_search.call_args_list - assert calls[0][1]['search_type'] == 'count' - assert calls[0][1]['body']['aggs']['filtered']['aggs']['counts']['terms'] == {'field': 'this', 'size': 5, - 'min_doc_count': 1} + calls = ea.thread_data.current_es.search.call_args_list + assert calls[0][1]['body']['aggs']['counts']['terms'] == {'field': 'this', 'size': 5, + 'min_doc_count': 1} assert counts['top_events_this'] == {'a': 10, 'b': 5} assert counts['top_events_that'] == {'d': 10, 'c': 12} @@ -1346,18 +1237,7 @@ def test_query_with_whitelist_filter_es(ea): new_rule = copy.copy(ea.rules[0]) ea.init_rule(new_rule, True) assert 'NOT username:"xudan1" AND NOT username:"xudan12" AND NOT username:"aa1"' \ - in new_rule['filter'][-1]['query']['query_string']['query'] - - -def test_query_with_whitelist_filter_es_five(ea_sixsix): - ea_sixsix.rules[0]['_source_enabled'] = False - ea_sixsix.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}] - ea_sixsix.rules[0]['compare_key'] = "username" - ea_sixsix.rules[0]['whitelist'] = ['xudan1', 'xudan12', 'aa1', 'bb1'] - new_rule = copy.copy(ea_sixsix.rules[0]) - ea_sixsix.init_rule(new_rule, True) - assert 'NOT username:"xudan1" AND NOT username:"xudan12" AND NOT username:"aa1"' in \ - new_rule['filter'][-1]['query_string']['query'] + in new_rule['filter'][-1]['query_string']['query'] def test_query_with_blacklist_filter_es(ea): @@ -1368,19 +1248,7 @@ def test_query_with_blacklist_filter_es(ea): new_rule = copy.copy(ea.rules[0]) ea.init_rule(new_rule, True) assert 'username:"xudan1" OR username:"xudan12" OR username:"aa1"' in \ - new_rule['filter'][-1]['query']['query_string']['query'] - - -def test_query_with_blacklist_filter_es_five(ea_sixsix): - ea_sixsix.rules[0]['_source_enabled'] = False - ea_sixsix.rules[0]['filter'] = [{'query_string': {'query': 'baz'}}] - ea_sixsix.rules[0]['compare_key'] = "username" - ea_sixsix.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1'] - ea_sixsix.rules[0]['blacklist'] = ['xudan1', 'xudan12', 'aa1', 'bb1'] - new_rule = copy.copy(ea_sixsix.rules[0]) - ea_sixsix.init_rule(new_rule, True) - assert 'username:"xudan1" OR username:"xudan12" OR username:"aa1"' in new_rule['filter'][-1]['query_string'][ - 'query'] + new_rule['filter'][-1]['query_string']['query'] def test_handle_rule_execution_error(ea, caplog): diff --git a/tests/conftest.py b/tests/conftest.py index f721053f..3a646584 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -63,7 +63,6 @@ def __init__(self, host='es', port=14900): self.port = port self.return_hits = [] self.search = mock.Mock() - self.deprecated_search = mock.Mock() self.create = mock.Mock() self.index = mock.Mock() self.delete = mock.Mock() @@ -71,48 +70,10 @@ def __init__(self, host='es', port=14900): self.ping = mock.Mock(return_value=True) self.indices = mock_es_indices_client() self.es_version = mock.Mock(return_value='2.0') - self.is_atleastfive = mock.Mock(return_value=False) - self.is_atleastsix = mock.Mock(return_value=False) - self.is_atleastsixtwo = mock.Mock(return_value=False) - self.is_atleastsixsix = mock.Mock(return_value=False) - self.is_atleastseven = mock.Mock(return_value=False) + self.is_atleastseven = mock.Mock(return_value=True) self.resolve_writeback_index = mock.Mock(return_value=writeback_index) -class mock_es_sixsix_client(object): - def __init__(self, host='es', port=14900): - self.host = host - self.port = port - self.return_hits = [] - self.search = mock.Mock() - self.deprecated_search = mock.Mock() - self.create = mock.Mock() - self.index = mock.Mock() - self.delete = mock.Mock() - self.info = mock.Mock(return_value={'status': 200, 'name': 'foo', 'version': {'number': '6.6.0'}}) - self.ping = mock.Mock(return_value=True) - self.indices = mock_es_indices_client() - self.es_version = mock.Mock(return_value='6.6.0') - self.is_atleastfive = mock.Mock(return_value=True) - self.is_atleastsix = mock.Mock(return_value=True) - self.is_atleastsixtwo = mock.Mock(return_value=False) - self.is_atleastsixsix = mock.Mock(return_value=True) - self.is_atleastseven = mock.Mock(return_value=False) - - def writeback_index_side_effect(index, doc_type): - if doc_type == 'silence': - return index + '_silence' - elif doc_type == 'past_elastalert': - return index + '_past' - elif doc_type == 'elastalert_status': - return index + '_status' - elif doc_type == 'elastalert_error': - return index + '_error' - return index - - self.resolve_writeback_index = mock.Mock(side_effect=writeback_index_side_effect) - - class mock_rule_loader(object): def __init__(self, conf): self.base_config = conf @@ -185,7 +146,6 @@ def ea(): ea.rules[0]['alert'] = [mock_alert()] ea.writeback_es = mock_es_client() ea.writeback_es.search.return_value = {'hits': {'hits': []}, 'total': 0} - ea.writeback_es.deprecated_search.return_value = {'hits': {'hits': []}} ea.writeback_es.index.return_value = {'_id': 'ABCD', 'created': True} ea.current_es = mock_es_client('', '') ea.thread_data.current_es = ea.current_es @@ -194,57 +154,6 @@ def ea(): return ea -@pytest.fixture -def ea_sixsix(): - rules = [{'es_host': '', - 'es_port': 14900, - 'name': 'anytest', - 'index': 'idx', - 'filter': [], - 'include': ['@timestamp'], - 'run_every': datetime.timedelta(seconds=1), - 'aggregation': datetime.timedelta(0), - 'realert': datetime.timedelta(0), - 'processed_hits': {}, - 'timestamp_field': '@timestamp', - 'match_enhancements': [], - 'rule_file': 'blah.yaml', - 'max_query_size': 10000, - 'ts_to_dt': ts_to_dt, - 'dt_to_ts': dt_to_ts, - '_source_enabled': True}] - conf = {'rules_folder': 'rules', - 'run_every': datetime.timedelta(minutes=10), - 'buffer_time': datetime.timedelta(minutes=5), - 'alert_time_limit': datetime.timedelta(hours=24), - 'es_host': 'es', - 'es_port': 14900, - 'writeback_index': writeback_index, - 'rules': rules, - 'max_query_size': 10000, - 'old_query_limit': datetime.timedelta(weeks=1), - 'disable_rules_on_error': False, - 'scroll_keepalive': '30s', - 'custom_pretty_ts_format': '%Y-%m-%d %H:%M'} - conf['rules_loader'] = mock_rule_loader(conf) - elastalert.elastalert.elasticsearch_client = mock_es_sixsix_client - elastalert.util.elasticsearch_client = mock_es_sixsix_client - with mock.patch('elastalert.elastalert.load_conf') as load_conf: - with mock.patch('elastalert.elastalert.BackgroundScheduler'): - load_conf.return_value = conf - conf['rules_loader'].load.return_value = rules - conf['rules_loader'].get_hashes.return_value = {} - ea_sixsix = elastalert.elastalert.ElastAlerter(['--pin_rules']) - ea_sixsix.rules[0]['type'] = mock_ruletype() - ea_sixsix.rules[0]['alert'] = [mock_alert()] - ea_sixsix.writeback_es = mock_es_sixsix_client() - ea_sixsix.writeback_es.search.return_value = {'hits': {'hits': []}} - ea_sixsix.writeback_es.deprecated_search.return_value = {'hits': {'hits': []}} - ea_sixsix.writeback_es.index.return_value = {'_id': 'ABCD'} - ea_sixsix.current_es = mock_es_sixsix_client('', -1) - return ea_sixsix - - @pytest.fixture(scope='function') def environ(): """py.test fixture to get a fresh mutable environment.""" diff --git a/tests/create_index_test.py b/tests/create_index_test.py index 3ac7251e..ab053a2e 100644 --- a/tests/create_index_test.py +++ b/tests/create_index_test.py @@ -21,106 +21,18 @@ def test_read_default_index_mapping(es_mapping): print((json.dumps(mapping, indent=2))) -@pytest.mark.parametrize('es_mapping', es_mappings) -def test_read_es_5_index_mapping(es_mapping): - mapping = elastalert.create_index.read_es_index_mapping(es_mapping, 5) - assert es_mapping in mapping - print((json.dumps(mapping, indent=2))) - - -@pytest.mark.parametrize('es_mapping', es_mappings) -def test_read_es_6_index_mapping(es_mapping): - mapping = elastalert.create_index.read_es_index_mapping(es_mapping, 6) - assert es_mapping not in mapping - print((json.dumps(mapping, indent=2))) - - def test_read_default_index_mappings(): mappings = elastalert.create_index.read_es_index_mappings() assert len(mappings) == len(es_mappings) print((json.dumps(mappings, indent=2))) -def test_read_es_5_index_mappings(): - mappings = elastalert.create_index.read_es_index_mappings(5) - assert len(mappings) == len(es_mappings) - print((json.dumps(mappings, indent=2))) - - -def test_read_es_6_index_mappings(): - mappings = elastalert.create_index.read_es_index_mappings(6) - assert len(mappings) == len(es_mappings) - print((json.dumps(mappings, indent=2))) - - def test_read_es_8_index_mappings(): mappings = elastalert.create_index.read_es_index_mappings(8) assert len(mappings) == len(es_mappings) print((json.dumps(mappings, indent=2))) -@pytest.mark.parametrize('es_version, expected', [ - ('5.6.0', False), - ('6.0.0', True), - ('6.1.0', True), - ('6.2.0', True), - ('6.3.0', True), - ('6.4.0', True), - ('6.5.0', True), - ('6.6.0', True), - ('6.7.0', True), - ('6.8.0', True), - ('7.0.0', True), - ('7.1.0', True), - ('7.2.0', True), - ('7.3.0', True), - ('7.4.0', True), - ('7.5.0', True), - ('7.6.0', True), - ('7.7.0', True), - ('7.8.0', True), - ('7.9.0', True), - ('7.10.0', True), - ('7.11.0', True), - ('7.12.0', True), - ('7.13.0', True) -]) -def test_is_atleastsix(es_version, expected): - result = elastalert.create_index.is_atleastsix(es_version) - assert result == expected - - -@pytest.mark.parametrize('es_version, expected', [ - ('5.6.0', False), - ('6.0.0', False), - ('6.1.0', False), - ('6.2.0', True), - ('6.3.0', True), - ('6.4.0', True), - ('6.5.0', True), - ('6.6.0', True), - ('6.7.0', True), - ('6.8.0', True), - ('7.0.0', True), - ('7.1.0', True), - ('7.2.0', True), - ('7.3.0', True), - ('7.4.0', True), - ('7.5.0', True), - ('7.6.0', True), - ('7.7.0', True), - ('7.8.0', True), - ('7.9.0', True), - ('7.10.0', True), - ('7.11.0', True), - ('7.12.0', True), - ('7.13.0', True) -]) -def test_is_atleastsixtwo(es_version, expected): - result = elastalert.create_index.is_atleastsixtwo(es_version) - assert result == expected - - @pytest.mark.parametrize('es_version, expected', [ ('5.6.0', False), ('6.0.0', False), diff --git a/tests/elasticsearch_test.py b/tests/elasticsearch_test.py index 308356c2..7c7617f7 100644 --- a/tests/elasticsearch_test.py +++ b/tests/elasticsearch_test.py @@ -37,18 +37,11 @@ def test_create_indices(self, es_client): print(('-' * 50)) print((json.dumps(indices_mappings, indent=2))) print(('-' * 50)) - if es_client.is_atleastsix(): - assert test_index in indices_mappings - assert test_index + '_error' in indices_mappings - assert test_index + '_status' in indices_mappings - assert test_index + '_silence' in indices_mappings - assert test_index + '_past' in indices_mappings - else: - assert 'elastalert' in indices_mappings[test_index]['mappings'] - assert 'elastalert_error' in indices_mappings[test_index]['mappings'] - assert 'elastalert_status' in indices_mappings[test_index]['mappings'] - assert 'silence' in indices_mappings[test_index]['mappings'] - assert 'past_elastalert' in indices_mappings[test_index]['mappings'] + assert test_index in indices_mappings + assert test_index + '_error' in indices_mappings + assert test_index + '_status' in indices_mappings + assert test_index + '_silence' in indices_mappings + assert test_index + '_past' in indices_mappings @pytest.mark.usefixtures("ea") def test_aggregated_alert(self, ea, es_client): # noqa: F811 @@ -61,10 +54,7 @@ def test_aggregated_alert(self, ea, es_client): # noqa: F811 } ea.writeback_es = es_client res = ea.add_aggregated_alert(match, ea.rules[0]) - if ea.writeback_es.is_atleastsix(): - assert res['result'] == 'created' - else: - assert res['created'] is True + assert res['result'] == 'created' # Make sure added data is available for querying time.sleep(2) # Now lets find the pending aggregated alert @@ -76,10 +66,7 @@ def test_silenced(self, ea, es_client): # noqa: F811 days=1) ea.writeback_es = es_client res = ea.set_realert(ea.rules[0]['name'], until_timestamp, 0) - if ea.writeback_es.is_atleastsix(): - assert res['result'] == 'created' - else: - assert res['created'] is True + assert res['result'] == 'created' # Make sure added data is available for querying time.sleep(2) # Force lookup in elasticsearch diff --git a/tests/kibana_discover_test.py b/tests/kibana_discover_test.py index b5ab0f5f..4514104b 100644 --- a/tests/kibana_discover_test.py +++ b/tests/kibana_discover_test.py @@ -5,39 +5,6 @@ from elastalert.kibana_discover import generate_kibana_discover_url -@pytest.mark.parametrize("kibana_version", ['5.6', '6.0', '6.1', '6.2', '6.3', '6.4', '6.5', '6.6', '6.7', '6.8']) -def test_generate_kibana_discover_url_with_kibana_5x_and_6x(kibana_version): - url = generate_kibana_discover_url( - rule={ - 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': kibana_version, - 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', - 'timestamp_field': 'timestamp' - }, - match={ - 'timestamp': '2019-09-01T00:30:00Z' - } - ) - expectedUrl = ( - 'http://kibana:5601/#/discover' - + '?_g=%28' # global start - + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' - + 'time%3A%28' # time start - + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' - + 'to%3A%272019-09-01T00%3A40%3A00Z%27' - + '%29' # time end - + '%29' # global end - + '&_a=%28' # app start - + 'columns%3A%21%28_source%29%2C' - + 'filters%3A%21%28%29%2C' - + 'index%3Ad6cabfb6-aaef-44ea-89c5-600e9a76991a%2C' - + 'interval%3Aauto' - + '%29' # app end - ) - assert url == expectedUrl - - @pytest.mark.parametrize("kibana_version", [ '7.0', '7.1', @@ -56,7 +23,8 @@ def test_generate_kibana_discover_url_with_kibana_5x_and_6x(kibana_version): '7.14', '7.15', '7.16', - '7.17' + '8.0', + '8.0', ]) def test_generate_kibana_discover_url_with_kibana_7x(kibana_version): url = generate_kibana_discover_url( @@ -90,11 +58,11 @@ def test_generate_kibana_discover_url_with_kibana_7x(kibana_version): assert url == expectedUrl -def test_generate_kibana_discover_url_with_relative_kinbana_discover_app_url(): +def test_generate_kibana_discover_url_with_relative_kibana_discover_app_url(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'app/discover#/', - 'kibana_discover_version': '7.17', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': '620ad0e6-43df-4557-bda2-384960fa9086', 'timestamp_field': 'timestamp' }, @@ -140,7 +108,7 @@ def test_generate_kibana_discover_url_with_missing_kibana_discover_version(): def test_generate_kibana_discover_url_with_missing_kibana_discover_app_url(): url = generate_kibana_discover_url( rule={ - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'logs', 'timestamp_field': 'timestamp', 'name': 'test' @@ -156,7 +124,7 @@ def test_generate_kibana_discover_url_with_missing_kibana_discover_index_pattern url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'timestamp_field': 'timestamp', 'name': 'test' }, @@ -190,7 +158,7 @@ def test_generate_kibana_discover_url_with_kibana_discover_app_url_env_substitut url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://$KIBANA_HOST:$KIBANA_PORT/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'timestamp_field': 'timestamp' }, @@ -201,10 +169,10 @@ def test_generate_kibana_discover_url_with_kibana_discover_app_url_env_substitut expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -222,7 +190,7 @@ def test_generate_kibana_discover_url_with_from_timedelta(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '7.17', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'kibana_discover_from_timedelta': timedelta(hours=1), 'timestamp_field': 'timestamp' @@ -255,7 +223,7 @@ def test_generate_kibana_discover_url_with_from_timedelta_and_timeframe(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '7.17', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'kibana_discover_from_timedelta': timedelta(hours=1), 'timeframe': timedelta(minutes=20), @@ -289,7 +257,7 @@ def test_generate_kibana_discover_url_with_to_timedelta(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '7.17', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'kibana_discover_to_timedelta': timedelta(hours=1), 'timestamp_field': 'timestamp' @@ -322,7 +290,7 @@ def test_generate_kibana_discover_url_with_to_timedelta_and_timeframe(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '7.17', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'kibana_discover_to_timedelta': timedelta(hours=1), 'timeframe': timedelta(minutes=20), @@ -356,7 +324,7 @@ def test_generate_kibana_discover_url_with_timeframe(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '7.17', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'd6cabfb6-aaef-44ea-89c5-600e9a76991a', 'timeframe': timedelta(minutes=20), 'timestamp_field': 'timestamp' @@ -389,7 +357,7 @@ def test_generate_kibana_discover_url_with_custom_columns(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'logs-*', 'kibana_discover_columns': ['level', 'message'], 'timestamp_field': 'timestamp' @@ -401,10 +369,10 @@ def test_generate_kibana_discover_url_with_custom_columns(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -422,7 +390,7 @@ def test_generate_kibana_discover_url_with_single_filter(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'filter': [ @@ -436,10 +404,10 @@ def test_generate_kibana_discover_url_with_single_filter(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -473,7 +441,7 @@ def test_generate_kibana_discover_url_with_multiple_filters(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': '90943e30-9a47-11e8-b64d-95841ca0b247', 'timestamp_field': 'timestamp', 'filter': [ @@ -488,10 +456,10 @@ def test_generate_kibana_discover_url_with_multiple_filters(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -527,7 +495,7 @@ def test_generate_kibana_discover_url_with_int_query_key(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'query_key': 'geo.dest' @@ -540,10 +508,10 @@ def test_generate_kibana_discover_url_with_int_query_key(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -587,7 +555,7 @@ def test_generate_kibana_discover_url_with_str_query_key(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'query_key': 'geo.dest' @@ -602,10 +570,10 @@ def test_generate_kibana_discover_url_with_str_query_key(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -649,7 +617,7 @@ def test_generate_kibana_discover_url_with_null_query_key_value(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'query_key': 'status' @@ -662,10 +630,10 @@ def test_generate_kibana_discover_url_with_null_query_key_value(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -699,7 +667,7 @@ def test_generate_kibana_discover_url_with_missing_query_key_value(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'query_key': 'status' @@ -711,10 +679,10 @@ def test_generate_kibana_discover_url_with_missing_query_key_value(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -748,7 +716,7 @@ def test_generate_kibana_discover_url_with_compound_query_key(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'compound_query_key': ['geo.src', 'geo.dest'], @@ -765,10 +733,10 @@ def test_generate_kibana_discover_url_with_compound_query_key(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -836,7 +804,7 @@ def test_generate_kibana_discover_url_with_filter_and_query_key(): url = generate_kibana_discover_url( rule={ 'kibana_discover_app_url': 'http://kibana:5601/#/discover', - 'kibana_discover_version': '6.8', + 'kibana_discover_version': '8.0', 'kibana_discover_index_pattern_id': 'logs-*', 'timestamp_field': 'timestamp', 'filter': [ @@ -852,10 +820,10 @@ def test_generate_kibana_discover_url_with_filter_and_query_key(): expectedUrl = ( 'http://kibana:5601/#/discover' + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + 'time%3A%28' # time start + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' - + 'mode%3Aabsolute%2C' + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + '%29' # time end + '%29' # global end @@ -907,3 +875,80 @@ def test_generate_kibana_discover_url_with_filter_and_query_key(): + '%29' # app end ) assert url == expectedUrl + + +def test_generate_kibana_discover_url_with_querystring_filter_and_query_key(): + url = generate_kibana_discover_url( + rule={ + 'kibana_discover_app_url': 'http://kibana:5601/#/discover', + 'kibana_discover_version': '8.0', + 'kibana_discover_index_pattern_id': 'logs-*', + 'timestamp_field': 'timestamp', + 'filter': [ + {'query': {'query_string': {'query': 'hello world'}}} + ], + 'query_key': 'status' + }, + match={ + 'timestamp': '2019-09-01T00:30:00Z', + 'status': 'ok' + } + ) + expectedUrl = ( + 'http://kibana:5601/#/discover' + + '?_g=%28' # global start + + 'filters%3A%21%28%29%2C' + + 'refreshInterval%3A%28pause%3A%21t%2Cvalue%3A0%29%2C' + + 'time%3A%28' # time start + + 'from%3A%272019-09-01T00%3A20%3A00Z%27%2C' + + 'to%3A%272019-09-01T00%3A40%3A00Z%27' + + '%29' # time end + + '%29' # global end + + '&_a=%28' # app start + + 'columns%3A%21%28_source%29%2C' + + 'filters%3A%21%28' # filters start + + + '%28' # filter start + + '%27%24state%27%3A%28store%3AappState%29%2C' + + 'bool%3A%28must%3A%21%28%28query_string%3A%28query%3A%27hello%20world%27%29%29%29%29%2C' + + 'meta%3A%28' # meta start + + 'alias%3Afilter%2C' + + 'disabled%3A%21f%2C' + + 'index%3A%27logs-%2A%27%2C' + + 'key%3Abool%2C' + + 'negate%3A%21f%2C' + + 'type%3Acustom%2C' + + 'value%3A%27%7B%22must%22%3A%5B%7B%22query_string%22%3A%7B%22query%22%3A%22hello%20world%22%7D%7D%5D%7D%27' + + '%29' # meta end + + '%29%2C' # filter end + + + '%28' # filter start + + '%27%24state%27%3A%28store%3AappState%29%2C' + + 'meta%3A%28' # meta start + + 'alias%3A%21n%2C' + + 'disabled%3A%21f%2C' + + 'index%3A%27logs-%2A%27%2C' + + 'key%3Astatus%2C' + + 'negate%3A%21f%2C' + + 'params%3A%28query%3Aok%2C' # params start + + 'type%3Aphrase' + + '%29%2C' # params end + + 'type%3Aphrase%2C' + + 'value%3Aok' + + '%29%2C' # meta end + + 'query%3A%28' # query start + + 'match%3A%28' # match start + + 'status%3A%28' # status start + + 'query%3Aok%2C' + + 'type%3Aphrase' + + '%29' # status end + + '%29' # match end + + '%29' # query end + + '%29' # filter end + + + '%29%2C' # filters end + + 'index%3A%27logs-%2A%27%2C' + + 'interval%3Aauto' + + '%29' # app end + ) + assert url == expectedUrl diff --git a/tests/kibana_test.py b/tests/kibana_test.py deleted file mode 100644 index bc303a93..00000000 --- a/tests/kibana_test.py +++ /dev/null @@ -1,223 +0,0 @@ -import copy -import json - -from elastalert.kibana import add_filter -from elastalert.kibana import dashboard_temp -from elastalert.kibana import kibana4_dashboard_link -from elastalert.util import EAException - - -# Dashboard schema with only filters section -test_dashboard = '''{ - "title": "AD Lock Outs", - "services": { - "filter": { - "list": { - "0": { - "type": "time", - "field": "@timestamp", - "from": "now-7d", - "to": "now", - "mandate": "must", - "active": true, - "alias": "", - "id": 0 - }, - "1": { - "type": "field", - "field": "_log_type", - "query": "\\"active_directory\\"", - "mandate": "must", - "active": true, - "alias": "", - "id": 1 - }, - "2": { - "type": "querystring", - "query": "ad.security_auditing_code:4740", - "mandate": "must", - "active": true, - "alias": "", - "id": 2 - }, - "3": { - "type": "range", - "field": "@timestamp", - "mandate": "must", - "active": true, - "alias": "", - "from": "2014-09-27T12:34:45Z", - "to": "2014-09-26T12:34:45Z", - "id": 3 - }, - "4": { - "field": "@timestamp", - "alias": "", - "mandate": "mustNot", - "active": true, - "query": "that", - "type": "field", - "id": 4 - }, - "5": { - "field": "@timestamp", - "alias": "", - "mandate": "either", - "active": true, - "query": "that", - "type": "field", - "id": 5 - } - }, - "ids": [ - 0, - 1, - 2, - 3, - 4, - 5 - ] - } - } -}''' -test_dashboard = json.loads(test_dashboard) - -test_dashboard2 = '''{ - "title": "AD Lock Outs", - "services": { - "filter": { - "list": { - "0": { - "type": "time", - "field": "@timestamp", - "from": "now-7d", - "to": "now", - "mandate": "must", - "active": true, - "alias": "", - "id": 0 - }, - "1": { - "type": "field", - "field": "_log_type", - "query": "\\"active_directory\\"", - "mandate": "must", - "active": true, - "alias": "", - "id": 1 - } - }, - "ids": [ - 0, - 1 - ] - } - } -}''' -test_dashboard2 = json.loads(test_dashboard2) - - -def test_add_filter(): - basic_filter = {"term": {"this": "that"}} - db = copy.deepcopy(dashboard_temp) - add_filter(db, basic_filter) - assert db['services']['filter']['list']['1'] == { - 'field': 'this', - 'alias': '', - 'mandate': 'must', - 'active': True, - 'query': '"that"', - 'type': 'field', - 'id': 1 - } - - list_filter = {"term": {"this": ["that", "those"]}} - db = copy.deepcopy(dashboard_temp) - add_filter(db, list_filter) - assert db['services']['filter']['list']['1'] == { - 'field': 'this', - 'alias': '', - 'mandate': 'must', - 'active': True, - 'query': '("that" AND "those")', - 'type': 'field', - 'id': 1 - } - - not_filter = {'not': {'term': {'this': 'that'}}} - db = copy.deepcopy(dashboard_temp) - add_filter(db, not_filter) - assert db['services']['filter']['list']['1'] == { - 'field': 'this', - 'alias': '', - 'mandate': 'mustNot', - 'active': True, - 'query': '"that"', - 'type': 'field', - 'id': 1 - } - - START_TIMESTAMP = '2014-09-26T12:34:45Z' - END_TIMESTAMP = '2014-09-27T12:34:45Z' - range_filter = {'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}} - db = copy.deepcopy(dashboard_temp) - add_filter(db, range_filter) - assert db['services']['filter']['list']['1'] == { - 'field': '@timestamp', - 'alias': '', - 'mandate': 'must', - 'active': True, - 'lte': '2014-09-27T12:34:45Z', - 'gt': '2014-09-26T12:34:45Z', - 'type': 'range', - 'id': 1 - } - - query_filter = {'query': {'wildcard': 'this*that'}} - db = copy.deepcopy(dashboard_temp) - add_filter(db, query_filter) - assert db['services']['filter']['list']['1'] == { - 'alias': '', - 'mandate': 'must', - 'active': True, - 'id': 1 - } - - query_string_filter = {'query': {'query_string': {'query': 'ad.security_auditing_code:4740'}}} - db = copy.deepcopy(dashboard_temp) - add_filter(db, query_string_filter) - assert db['services']['filter']['list']['1'] == { - 'alias': '', - 'mandate': 'must', - 'active': True, - 'query': 'ad.security_auditing_code:4740', - 'type': 'querystring', - 'id': 1 - } - - try: - error_filter = {'bool': {'must': [{'range': {'@timestamp': {'lte': END_TIMESTAMP, 'gt': START_TIMESTAMP}}}]}} - db = copy.deepcopy(dashboard_temp) - add_filter(db, error_filter) - except EAException as ea: - excepted = "Could not parse filter {'bool': {'must': [{'range': {'@timestamp': " - excepted += "{'lte': '2014-09-27T12:34:45Z', 'gt': '2014-09-26T12:34:45Z'}}}]}} for Kibana" - assert excepted == str(ea) - - -def test_url_encoded(): - url = kibana4_dashboard_link('example.com/#/Dashboard', '2015-01-01T00:00:00Z', '2017-01-01T00:00:00Z') - assert not any([special_char in url for special_char in ["',\":;?&=()"]]) - - -def test_url_env_substitution(environ): - environ.update({ - 'KIBANA_HOST': 'kibana', - 'KIBANA_PORT': '5601', - }) - url = kibana4_dashboard_link( - 'http://$KIBANA_HOST:$KIBANA_PORT/#/Dashboard', - '2015-01-01T00:00:00Z', - '2017-01-01T00:00:00Z', - ) - assert url.startswith('http://kibana:5601/#/Dashboard') diff --git a/tests/loaders_test.py b/tests/loaders_test.py index 58e3672d..71ec00f4 100644 --- a/tests/loaders_test.py +++ b/tests/loaders_test.py @@ -39,7 +39,6 @@ 'filter': [{'term': {'key': 'value'}}], 'alert': 'email', 'use_count_query': True, - 'doc_type': 'blsh', 'email': 'test@test.test', 'aggregation': {'hours': 2}, 'include': ['comparekey', '@timestamp']} @@ -439,36 +438,6 @@ def test_name_inference(): assert test_rule_copy['name'] == 'msmerc woz ere' -def test_raises_on_bad_generate_kibana_filters(): - test_rule['generate_kibana_link'] = True - bad_filters = [[{'not': {'terms': {'blah': 'blah'}}}], - [{'terms': {'blah': 'blah'}}], - [{'query': {'not_querystring': 'this:that'}}], - [{'query': {'wildcard': 'this*that'}}], - [{'blah': 'blah'}]] - good_filters = [[{'term': {'field': 'value'}}], - [{'not': {'term': {'this': 'that'}}}], - [{'not': {'query': {'query_string': {'query': 'this:that'}}}}], - [{'query': {'query_string': {'query': 'this:that'}}}], - [{'range': {'blah': {'from': 'a', 'to': 'b'}}}], - [{'not': {'range': {'blah': {'from': 'a', 'to': 'b'}}}}]] - - # Test that all the good filters work, but fail with a bad filter added - for good in good_filters: - test_config_copy = copy.deepcopy(test_config) - rules_loader = FileRulesLoader(test_config_copy) - - test_rule_copy = copy.deepcopy(test_rule) - test_rule_copy['filter'] = good - with mock.patch.object(rules_loader, 'get_yaml') as mock_open: - mock_open.return_value = test_rule_copy - rules_loader.load_configuration('blah', test_config) - for bad in bad_filters: - test_rule_copy['filter'] = good + bad - with pytest.raises(EAException): - rules_loader.load_configuration('blah', test_config) - - def test_kibana_discover_from_timedelta(): test_config_copy = copy.deepcopy(test_config) rules_loader = FileRulesLoader(test_config_copy)