Skip to content

Commit

Permalink
style(lint): Auto commit lint changes
Browse files Browse the repository at this point in the history
  • Loading branch information
getsantry[bot] authored Jun 27, 2024
1 parent d10e011 commit e6ed20b
Showing 1 changed file with 19 additions and 5 deletions.
24 changes: 19 additions & 5 deletions snuba/web/db_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import logging
import random
from snuba.datasets.storages.storage_key import StorageKey
import uuid
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
Expand All @@ -26,6 +25,7 @@
from snuba.clickhouse.query import Query
from snuba.clickhouse.query_dsl.accessors import get_time_range_estimate
from snuba.clickhouse.query_profiler import generate_profile
from snuba.datasets.storages.storage_key import StorageKey
from snuba.query import ProcessableQuery
from snuba.query.allocation_policies import (
MAX_THRESHOLD,
Expand Down Expand Up @@ -682,15 +682,24 @@ def visit_join_clause(self, node: JoinClause[Table]) -> None:
node.right_node.accept(self)


def _record_bytes_scanned(result_or_error: QueryResultOrError, attribution_info: AttributionInfo, dataset_name: str, storage_key: StorageKey):
def _record_bytes_scanned(
result_or_error: QueryResultOrError,
attribution_info: AttributionInfo,
dataset_name: str,
storage_key: StorageKey,
):
custom_metrics = MetricsWrapper(environment.metrics, "allocation_policy")

if result_or_error.query_result:
progress_bytes_scanned = cast(int, result_or_error.query_result.result.get("profile", {}).get("progress_bytes", 0)) # type: ignore
custom_metrics.increment(
"bytes_scanned",
progress_bytes_scanned,
tags={"referrer": attribution_info.referrer, "dataset": dataset_name, "storage_key": storage_key.value},
tags={
"referrer": attribution_info.referrer,
"dataset": dataset_name,
"storage_key": storage_key.value,
},
)


Expand Down Expand Up @@ -806,12 +815,17 @@ def db_query(
raise e
finally:
result_or_error = QueryResultOrError(query_result=result, error=error)
_record_bytes_scanned(result_or_error, attribution_info, dataset_name, allocation_policies[0].storage_key)
_record_bytes_scanned(
result_or_error,
attribution_info,
dataset_name,
allocation_policies[0].storage_key,
)
for allocation_policy in allocation_policies:
allocation_policy.update_quota_balance(
tenant_ids=attribution_info.tenant_ids,
query_id=query_id,
result_or_error=result_or_error
result_or_error=result_or_error,
)
if stats.get("cache_hit"):
metrics.increment("cache_hit", tags={"dataset": dataset_name})
Expand Down

0 comments on commit e6ed20b

Please sign in to comment.