Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(types): support ns timestamp #19827

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

115 changes: 115 additions & 0 deletions e2e_test/batch/types/timestamp_ns.slt.part
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
statement ok
SET RW_IMPLICIT_FLUSH TO true;

statement ok
create table t1(v1 int, v2 timestamp);

statement ok
insert into t1 values(1,'2013-01-01 01:01:01.123456789'),(2,'2012-01-01 01:01:01.123456'),(3,'0000-01-01 01:01:01.123456789'),(4,'2213-01-01 01:01:01.123456789'),(5,null);

query T rowsort
select * from t1;
----
1 2013-01-01 01:01:01.123456789
2 2012-01-01 01:01:01.123456
3 0001-01-01 01:01:01.123456789 BC
4 2213-01-01 01:01:01.123456789
5 NULL

query T
select * from t1 where v2 is null;
----
5 NULL

query T rowsort
select v1, v2,
case
when extract(year from v2) < 2000 then 'Before 2000'
when extract(year from v2) >= 2000 and extract(year from v2) < 2100 then '21st Century'
else 'Future'
end as time_period
from t1;
----
1 2013-01-01 01:01:01.123456789 21st Century
2 2012-01-01 01:01:01.123456 21st Century
3 0001-01-01 01:01:01.123456789 BC Before 2000
4 2213-01-01 01:01:01.123456789 Future
5 NULL Future

query T rowsort
select v1, v2, coalesce(v2, '1900-01-01 00:00:00') as coalesce_v2 from t1;
----
1 2013-01-01 01:01:01.123456789 2013-01-01 01:01:01.123456789
2 2012-01-01 01:01:01.123456 2012-01-01 01:01:01.123456
3 0001-01-01 01:01:01.123456789 BC 0001-01-01 01:01:01.123456789 BC
4 2213-01-01 01:01:01.123456789 2213-01-01 01:01:01.123456789
5 NULL 1900-01-01 00:00:00

query T
select count(v2) as total_rows from t1;
----
4

query T rowsort
select * from t1 order by v2;
----
1 2013-01-01 01:01:01.123456789
2 2012-01-01 01:01:01.123456
3 0001-01-01 01:01:01.123456789 BC
4 2213-01-01 01:01:01.123456789
5 NULL

query T rowsort
select * from t1 where v2 >= '2012-01-01 01:01:01.123456';
----
1 2013-01-01 01:01:01.123456789
2 2012-01-01 01:01:01.123456
4 2213-01-01 01:01:01.123456789

query T rowsort
select v1, cast(v2 as date) as date_v2, cast(v2 as timestamp with time zone) as timestamptz_v2 from t1;
----
1 2013-01-01 2013-01-01 01:01:01.123456+00:00
2 2012-01-01 2012-01-01 01:01:01.123456+00:00
3 0001-01-01 BC 0001-01-01 01:01:01.123456+00:00 BC
4 2213-01-01 2213-01-01 01:01:01.123456+00:00
5 NULL NULL

query T rowsort
select v1, date_trunc('day', v2) AS truncated_v2 from t1;
----
1 2013-01-01 00:00:00
2 2012-01-01 00:00:00
3 0001-01-01 00:00:00 BC
4 2213-01-01 00:00:00
5 NULL

query T rowsort
select v1, v2 at time zone 'UTC' as v2_utc from t1;
----
1 2013-01-01 01:01:01.123456+00:00
2 2012-01-01 01:01:01.123456+00:00
3 0001-01-01 01:01:01.123456+00:00 BC
4 2213-01-01 01:01:01.123456+00:00
5 NULL

query T rowsort
select v1, to_char(v2, 'YYYY-MM-DD HH24:MI:SS.NS') as formatted_v2 from t1;
----
1 2013-01-01 01:01:01.123456789
2 2012-01-01 01:01:01.123456000
3 0000-01-01 01:01:01.123456789
4 2213-01-01 01:01:01.123456789
5 NULL

query T rowsort
select generate_series('2013-01-01 01:01:01.123456789'::timestamp,'2013-01-01 01:01:05.123456790'::timestamp, '1 s');
----
2013-01-01 01:01:01.123456789
2013-01-01 01:01:02.123456789
2013-01-01 01:01:03.123456789
2013-01-01 01:01:04.123456789
2013-01-01 01:01:05.123456789

statement ok
drop table t1;
1 change: 1 addition & 0 deletions src/common/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ humantime = "2.1"
hytra = { workspace = true }
itertools = { workspace = true }
itoa = "1.0"
jiff = "0.1.15"
jsonbb = { workspace = true }
lru = { workspace = true }
memcomparable = { version = "0.2", features = ["decimal"] }
Expand Down
111 changes: 79 additions & 32 deletions src/common/src/types/datetime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -168,29 +168,18 @@ impl FromStr for Timestamp {
type Err = InvalidParamsError;

fn from_str(s: &str) -> Result<Self> {
if let Ok(res) = speedate::DateTime::parse_str_rfc3339(s) {
if res.time.tz_offset.is_some() {
return Err(ErrorKind::ParseTimestamp.into());
}
Ok(Date::from_ymd_uncheck(
res.date.year as i32,
res.date.month as u32,
res.date.day as u32,
)
.and_hms_micro_uncheck(
res.time.hour as u32,
res.time.minute as u32,
res.time.second as u32,
res.time.microsecond,
))
} else {
let res =
speedate::Date::parse_str_rfc3339(s).map_err(|_| ErrorKind::ParseTimestamp)?;
Ok(
Date::from_ymd_uncheck(res.year as i32, res.month as u32, res.day as u32)
.and_hms_micro_uncheck(0, 0, 0, 0),
)
}
let dt = s
.parse::<jiff::civil::DateTime>()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we need to introduce yet another dependency in addition to chrono and speedate?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because chrono can't be parsed, and speedate is only parsed at the us level

.map_err(|_| ErrorKind::ParseTimestamp)?;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Error message of ParseTimestamp shall be updated from up to 6 digits to up to 9 digits.

Ok(
Date::from_ymd_uncheck(dt.year() as i32, dt.month() as u32, dt.day() as u32)
.and_hms_nano_uncheck(
dt.hour() as u32,
dt.minute() as u32,
dt.second() as u32,
dt.subsec_nanosecond() as u32,
),
)
}
}

Expand Down Expand Up @@ -422,6 +411,13 @@ impl Date {
.and_time(Time::from_hms_micro_uncheck(hour, min, sec, micro).0),
)
}

pub fn and_hms_nano_uncheck(self, hour: u32, min: u32, sec: u32, nano: u32) -> Timestamp {
Timestamp::new(
self.0
.and_time(Time::from_hms_nano_uncheck(hour, min, sec, nano).0),
)
}
}

impl Time {
Expand Down Expand Up @@ -485,6 +481,41 @@ impl Time {
}
}

// The first 64 bits of protobuf encoding for `Timestamp` type has 2 possible meanings.
// * When the highest 2 bits are `11` or `00` (i.e. values ranging from `0b1100...00` to `0b0011..11`),
// it is *microseconds* since 1970-01-01 midnight. 2^62 microseconds covers 146235 years.
// * When the highest 2 bits are `10` or `01`, we flip the second bit to get values from `0b1100...00` to `0b0011..11` again.
// It is *seconds* since 1970-01-01 midnight. It is then followed by another 32 bits as nanoseconds within a second.
enum FirstI64 {
V0 { usecs: i64 },
V1 { secs: i64 },
}
impl FirstI64 {
pub fn to_protobuf(&self) -> i64 {
match self {
FirstI64::V0 { usecs } => *usecs,
FirstI64::V1 { secs } => secs ^ (0b01 << 62),
}
}

pub fn from_protobuf(cur: &mut Cursor<&[u8]>) -> ArrayResult<FirstI64> {
let value = cur
.read_i64::<BigEndian>()
.context("failed to read i64 from Time buffer")?;
if Self::is_v1_format_state(value) {
let secs = value ^ (0b01 << 62);
Ok(FirstI64::V1 { secs })
} else {
Ok(FirstI64::V0 { usecs: value })
}
}

fn is_v1_format_state(value: i64) -> bool {
let state = (value >> 62) & 0b11;
state == 0b10 || state == 0b01
}
}

impl Timestamp {
pub fn with_secs_nsecs(secs: i64, nsecs: u32) -> Result<Self> {
Ok(Timestamp::new({
Expand All @@ -495,18 +526,34 @@ impl Timestamp {
}

pub fn from_protobuf(cur: &mut Cursor<&[u8]>) -> ArrayResult<Timestamp> {
let micros = cur
.read_i64::<BigEndian>()
.context("failed to read i64 from Timestamp buffer")?;

Ok(Timestamp::with_micros(micros)?)
match FirstI64::from_protobuf(cur)? {
FirstI64::V0 { usecs } => Ok(Timestamp::with_micros(usecs)?),
FirstI64::V1 { secs } => {
let nsecs = cur
.read_u32::<BigEndian>()
.context("failed to read u32 from Time buffer")?;
Ok(Timestamp::with_secs_nsecs(secs, nsecs)?)
}
}
}

/// Although `Timestamp` takes 12 bytes, we drop 4 bytes in protobuf encoding.
// Since timestamp secs is much smaller than i64, we use the highest 2 bit to store the format information, which is compatible with the old format.
// New format: secs(i64) + nsecs(u32)
// Old format: micros(i64)
pub fn to_protobuf<T: Write>(self, output: &mut T) -> ArrayResult<usize> {
output
.write(&(self.0.and_utc().timestamp_micros()).to_be_bytes())
.map_err(Into::into)
let timestamp_size = output
.write(
&(FirstI64::V1 {
secs: self.0.and_utc().timestamp(),
}
.to_protobuf())
.to_be_bytes(),
)
.map_err(Into::<ArrayError>::into)?;
let timestamp_subsec_nanos_size = output
.write(&(self.0.and_utc().timestamp_subsec_nanos()).to_be_bytes())
.map_err(Into::<ArrayError>::into)?;
Ok(timestamp_subsec_nanos_size + timestamp_size)
xxhZs marked this conversation as resolved.
Show resolved Hide resolved
}

pub fn get_timestamp_nanos(&self) -> i64 {
Expand Down
2 changes: 2 additions & 0 deletions src/expr/impl/src/scalar/to_char.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ impl ChronoPattern {
("Mon", "%b"),
("DD", "%d"),
("dd", "%d"),
("NS", "%9f"),
("ns", "%9f"),
("US", "%6f"),
("us", "%6f"),
("MS", "%3f"),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -494,8 +494,8 @@ impl HummockVersion {
.member_table_ids
.clone_from(&group_construct.table_ids);
self.levels.insert(*compaction_group_id, new_levels);
let member_table_ids = if group_construct.version
>= CompatibilityVersion::NoMemberTableIds as _
let member_table_ids = if group_construct.version()
>= CompatibilityVersion::NoMemberTableIds
{
self.state_table_info
.compaction_group_member_table_ids(*compaction_group_id)
Expand All @@ -508,8 +508,7 @@ impl HummockVersion {
BTreeSet::from_iter(group_construct.table_ids.clone())
};

if group_construct.version >= CompatibilityVersion::SplitGroupByTableId as _
{
if group_construct.version() >= CompatibilityVersion::SplitGroupByTableId {
let split_key = if group_construct.split_key.is_some() {
Some(Bytes::from(group_construct.split_key.clone().unwrap()))
} else {
Expand Down
Loading