-
Notifications
You must be signed in to change notification settings - Fork 520
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
snuba events_analytics_ platform requires clickhouse 23.3 #1474
Comments
Please write your values.yaml |
we facing similar problem with release 24.8
|
I also raised the same issue #1508 but mine is for a total fresh installation. When you read this error: getsentry/snuba#6355, it matches the exact point at which my failure occurred.
Then I have a failure straight after 0001_spans |
Try tag 25.16.0 |
This issue is stale because it has been open for 30 days with no activity. |
Fix this with command:
aaaand after use this code /usr/src/snuba/snuba/snuba_migrations/events_analytics_platform/0001_spans.pyfrom typing import List, Sequence
from snuba.clusters.storage_sets import StorageSetKey
from snuba.migrations import migration, operations, table_engines
from snuba.migrations.columns import MigrationModifiers as Modifiers
from snuba.migrations.operations import AddIndicesData, OperationTarget, SqlOperation
from snuba.utils.schemas import (
UUID,
Column,
DateTime,
DateTime64,
Float,
Int,
Map,
String,
UInt,
)
storage_set_name = StorageSetKey.EVENTS_ANALYTICS_PLATFORM
local_table_name = "eap_spans_local"
dist_table_name = "eap_spans_dist"
num_attr_buckets = 20
columns: List[Column[Modifiers]] = [
Column("organization_id", UInt(64)),
Column("project_id", UInt(64)),
Column("service", String(Modifiers(codecs=["ZSTD(1)"]))),
Column("trace_id", UUID()),
Column("span_id", UInt(64)),
Column("parent_span_id", UInt(64, Modifiers(codecs=["ZSTD(1)"]))),
Column("segment_id", UInt(64, Modifiers(codecs=["ZSTD(1)"]))),
Column("segment_name", String(Modifiers(codecs=["ZSTD(1)"]))),
Column("is_segment", UInt(8, Modifiers(codecs=["T64", "ZSTD(1)"]))),
Column("_sort_timestamp", DateTime(Modifiers(codecs=["DoubleDelta", "ZSTD(1)"]))),
Column(
"start_timestamp",
DateTime64(6, modifiers=Modifiers(codecs=["DoubleDelta", "ZSTD(1)"])),
),
Column(
"end_timestamp",
DateTime64(6, modifiers=Modifiers(codecs=["DoubleDelta", "ZSTD(1)"])),
),
Column(
"duration_ms",
UInt(32, modifiers=Modifiers(codecs=["DoubleDelta", "ZSTD(1)"])),
),
Column("exclusive_time_ms", Float(64, modifiers=Modifiers(codecs=["ZSTD(1)"]))),
Column(
"retention_days",
UInt(16, modifiers=Modifiers(codecs=["DoubleDelta", "ZSTD(1)"])),
),
Column("name", String(modifiers=Modifiers(codecs=["ZSTD(1)"]))),
Column("sampling_factor", Float(64, modifiers=Modifiers(codecs=["ZSTD(1)"]))),
Column("sampling_weight", Float(64, modifiers=Modifiers(codecs=["ZSTD(1)"]))),
Column("sign", Int(8, modifiers=Modifiers(codecs=["DoubleDelta"]))),
]
columns.extend(
[
Column(
f"attr_str_{i}",
Map(String(), String(), modifiers=Modifiers(codecs=["ZSTD(1)"])),
)
for i in range(num_attr_buckets)
]
)
columns.extend(
[
Column(
f"attr_num_{i}",
Map(String(), Float(64), modifiers=Modifiers(codecs=["ZSTD(1)"])),
)
for i in range(num_attr_buckets)
]
)
indices: Sequence[AddIndicesData] = (
[
AddIndicesData(
name="bf_trace_id",
expression="toString(trace_id)", # Convert UUID to string
type="bloom_filter",
granularity=1,
)
]
+ [
AddIndicesData(
name=f"bf_attr_str_{i}",
expression=f"toString(mapKeys(attr_str_{i}))", # Convert Map keys to strings
type="bloom_filter",
granularity=1,
)
for i in range(num_attr_buckets)
]
+ [
AddIndicesData(
name=f"ngrambf_attr_str_val_{i}",
expression=f"toString(mapValues(attr_str_{i}))", # Convert Map values to strings
type="ngrambf_v1(4, 1024, 10, 1)",
granularity=1,
)
for i in range(num_attr_buckets)
]
)
class Migration(migration.ClickhouseNodeMigration):
blocking = False
def forwards_ops(self) -> Sequence[SqlOperation]:
res: List[SqlOperation] = [
operations.CreateTable(
storage_set=storage_set_name,
table_name=local_table_name,
columns=columns,
engine=table_engines.CollapsingMergeTree(
primary_key="(organization_id, _sort_timestamp, trace_id)",
order_by="(organization_id, _sort_timestamp, trace_id, span_id)",
sign_column="sign",
partition_by="(toMonday(_sort_timestamp))",
settings={"index_granularity": "8192"},
storage_set=storage_set_name,
ttl="_sort_timestamp + toIntervalDay(retention_days)",
),
target=OperationTarget.LOCAL,
),
operations.CreateTable(
storage_set=storage_set_name,
table_name=dist_table_name,
columns=columns,
engine=table_engines.Distributed(
local_table_name=local_table_name,
sharding_key="cityHash64(reinterpretAsUInt128(trace_id))",
),
target=OperationTarget.DISTRIBUTED,
),
operations.AddIndices(
storage_set=storage_set_name,
table_name=local_table_name,
indices=indices,
target=OperationTarget.LOCAL,
),
]
return res
def backwards_ops(self) -> Sequence[SqlOperation]:
return [
operations.DropTable(
storage_set=storage_set_name,
table_name=local_table_name,
target=OperationTarget.LOCAL,
),
operations.DropTable(
storage_set=storage_set_name,
table_name=dist_table_name,
target=OperationTarget.DISTRIBUTED,
),
] With this code you can do
On 0009 migration you will encounter an alter_sync error, which is not supported in the current version of ClickHouse. To resolve this issue, you need change code like this: /usr/src/snuba/snuba/snuba_migrations/events_analytics_platform/0009_drop_index_attribute_key_buckets_1_19.pyfrom typing import Sequence
from snuba.clusters.storage_sets import StorageSetKey
from snuba.migrations import migration, operations
class Migration(migration.ClickhouseNodeMigration):
blocking = False
def forwards_ops(self) -> Sequence[operations.SqlOperation]:
indices_to_drop = [
f"bf_attr_num_{bucket}" for bucket in range(1, 20)
] + [
f"bf_attr_str_{bucket}" for bucket in range(1, 20)
]
drop_indices_statements = ", ".join(
f"DROP INDEX IF EXISTS {index_name}" for index_name in indices_to_drop
)
return [
operations.RunSql(
storage_set=StorageSetKey.EVENTS_ANALYTICS_PLATFORM,
statement=f"""
ALTER TABLE eap_spans_local
{drop_indices_statements}
SETTINGS mutations_sync=0;
""",
target=operations.OperationTarget.LOCAL,
)
]
def backwards_ops(self) -> Sequence[operations.SqlOperation]:
return []
and the same error for 0010, use this: /usr/src/snuba/snuba/snuba_migrations/events_analytics_platform/0010_drop_indexes_on_attribute_values.py
|
Issue submitter TODO list
Describe the bug (actual behavior)
The upgrading process failed with snuba-migrate because events_analytics_ platform can't be migrated
Executing
snuba migrations migrate
manually and it failsExpected behavior
migrations successful
values.yaml
.
Helm chart version
25.9.0
Steps to reproduce
official self-hosted image uses 23.8 and automatically upgrade clickhouse to 23.8
https://github.com/getsentry/self-hosted/blob/master/install/upgrade-clickhouse.sh
I tried setting clickhouse image to 23.3.19.32 and the migrations went through.
Screenshots
No response
Logs
No response
Additional context
No response
The text was updated successfully, but these errors were encountered: