Skip to content

Commit

Permalink
chore(deps): Bump Rust version to 1.79 (#20670)
Browse files Browse the repository at this point in the history
* chore(deps): Bump Rust version to 1.79

* Update Tiltfile

* Delete unused fields in MongoDB metrics

* Fix Windows build
  • Loading branch information
bruceg authored Jun 14, 2024
1 parent 0e034ee commit b11ca5d
Show file tree
Hide file tree
Showing 27 changed files with 56 additions and 89 deletions.
2 changes: 1 addition & 1 deletion Tiltfile
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ load('ext://helm_resource', 'helm_resource', 'helm_repo')
docker_build(
ref='timberio/vector',
context='.',
build_args={'RUST_VERSION': '1.75.0'},
build_args={'RUST_VERSION': '1.79.0'},
dockerfile='tilt/Dockerfile'
)

Expand Down
2 changes: 1 addition & 1 deletion lib/file-source/src/metadata_ext.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ impl PortableFileExt for File {

// This code is from the Rust stdlib https://github.com/rust-lang/rust/blob/a916ac22b9f7f1f0f7aba0a41a789b3ecd765018/src/libstd/sys/windows/c.rs#L380-L386
#[cfg(windows)]
#[allow(non_snake_case, non_camel_case_types)]
#[allow(dead_code, non_snake_case, non_camel_case_types)]
pub struct REPARSE_DATA_BUFFER {
pub ReparseTag: libc::c_uint,
pub ReparseDataLength: libc::c_ushort,
Expand Down
7 changes: 2 additions & 5 deletions lib/vector-buffers/src/variants/disk_v2/record.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ pub enum RecordStatus {
/// The record was able to be read from the buffer, and the checksum is valid.
///
/// Contains the ID for the given record, as well as the metadata.
Valid { id: u64, metadata: u32 },
Valid { id: u64 },
/// The record was able to be read from the buffer, but the checksum was not valid.
Corrupted { calculated: u32, actual: u32 },
/// The record was not able to be read from the buffer due to an error during deserialization.
Expand Down Expand Up @@ -142,10 +142,7 @@ impl<'a> ArchivedRecord<'a> {
pub fn verify_checksum(&self, checksummer: &Hasher) -> RecordStatus {
let calculated = generate_checksum(checksummer, self.id, self.metadata, &self.payload);
if self.checksum == calculated {
RecordStatus::Valid {
id: self.id,
metadata: self.metadata,
}
RecordStatus::Valid { id: self.id }
} else {
RecordStatus::Corrupted {
calculated,
Expand Down
10 changes: 5 additions & 5 deletions lib/vector-buffers/src/variants/disk_v2/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ macro_rules! assert_buffer_records {
($ledger:expr, $record_count:expr) => {
assert_eq!(
$ledger.get_total_records(),
$record_count as u64,
u64::try_from($record_count).expect("Record count is out of range"),
"ledger should have {} records, but had {}",
$record_count,
$ledger.get_total_records()
Expand All @@ -95,14 +95,14 @@ macro_rules! assert_buffer_size {
($ledger:expr, $record_count:expr, $buffer_size:expr) => {
assert_eq!(
$ledger.get_total_records(),
$record_count as u64,
u64::try_from($record_count).expect("Record count is out of range"),
"ledger should have {} records, but had {}",
$record_count,
$ledger.get_total_records()
);
assert_eq!(
$ledger.get_total_buffer_size(),
$buffer_size as u64,
u64::try_from($buffer_size).expect("Buffer size is out of range"),
"ledger should have {} bytes, but had {} bytes",
$buffer_size,
$ledger.get_total_buffer_size()
Expand All @@ -115,14 +115,14 @@ macro_rules! assert_reader_writer_v2_file_positions {
($ledger:expr, $reader:expr, $writer:expr) => {{
let (reader, writer) = $ledger.get_current_reader_writer_file_id();
assert_eq!(
($reader) as u16,
u16::try_from($reader).expect("Reader value is out of range"),
reader,
"expected reader file ID of {}, got {} instead",
($reader),
reader
);
assert_eq!(
($writer) as u16,
u16::try_from($writer).expect("Writer value is out of range"),
writer,
"expected writer file ID of {}, got {} instead",
($writer),
Expand Down
8 changes: 4 additions & 4 deletions lib/vector-buffers/src/variants/disk_v2/tests/size_limits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -262,15 +262,15 @@ async fn writer_rolls_data_files_when_the_limit_is_exceeded() {
writer.flush().await.expect("flush should not fail");
writer.close();

assert_buffer_size!(ledger, 2, (first_bytes_written + second_bytes_written));
assert_buffer_size!(ledger, 2, first_bytes_written + second_bytes_written);
assert_reader_writer_v2_file_positions!(ledger, 0, 1);

// Now read both records, make sure they are what we expect, etc.
let first_record_read = read_next_some(&mut reader).await;
assert_eq!(first_record_read, first_record);
acknowledge(first_record_read).await;

assert_buffer_size!(ledger, 2, (first_bytes_written + second_bytes_written));
assert_buffer_size!(ledger, 2, first_bytes_written + second_bytes_written);
assert_reader_writer_v2_file_positions!(ledger, 0, 1);

let second_record_read = read_next_some(&mut reader).await;
Expand Down Expand Up @@ -358,15 +358,15 @@ async fn writer_rolls_data_files_when_the_limit_is_exceeded_after_reload() {
writer.flush().await.expect("flush should not fail");
writer.close();

assert_buffer_size!(ledger, 2, (first_bytes_written + second_bytes_written));
assert_buffer_size!(ledger, 2, first_bytes_written + second_bytes_written);
assert_reader_writer_v2_file_positions!(ledger, 0, 1);

// Now read both records, make sure they are what we expect, etc.
let first_record_read = read_next_some(&mut reader).await;
assert_eq!(first_record_read, first_record);
acknowledge(first_record_read).await;

assert_buffer_size!(ledger, 2, (first_bytes_written + second_bytes_written));
assert_buffer_size!(ledger, 2, first_bytes_written + second_bytes_written);
assert_reader_writer_v2_file_positions!(ledger, 0, 1);

let second_record_read = read_next_some(&mut reader).await;
Expand Down
4 changes: 4 additions & 0 deletions lib/vector-config-common/src/validation.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
// Code generated by the `darling` derive macro triggers a clippy lint.
// https://github.com/TedDriggs/darling/issues/293
#![allow(clippy::manual_unwrap_or_default)]

use darling::FromMeta;
use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
Expand Down
4 changes: 4 additions & 0 deletions lib/vector-config-macros/src/ast/container.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
// Code generated by the `darling` derive macro triggers a clippy lint.
// https://github.com/TedDriggs/darling/issues/293
#![allow(clippy::manual_unwrap_or_default)]

use std::collections::HashSet;

use darling::{error::Accumulator, util::Flag, FromAttributes};
Expand Down
10 changes: 5 additions & 5 deletions lib/vector-stream/src/partitioned_batcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -454,15 +454,15 @@ mod test {
}

fn arb_partitioner() -> impl Strategy<Value = TestPartitioner> {
(1..u8::max_value(),).prop_map(|(ks,)| TestPartitioner {
(1..u8::MAX,).prop_map(|(ks,)| TestPartitioner {
key_space: NonZeroU8::new(ks).unwrap(),
})
}

proptest! {
#[test]
fn size_hint_eq(stream: Vec<u64>,
item_limit in 1..u16::max_value(),
item_limit in 1..u16::MAX,
allocation_limit in 8..128,
partitioner in arb_partitioner(),
timer in arb_timer()) {
Expand All @@ -488,7 +488,7 @@ mod test {
proptest! {
#[test]
fn batch_item_size_leq_limit(stream: Vec<u64>,
item_limit in 1..u16::max_value(),
item_limit in 1..u16::MAX,
allocation_limit in 8..128,
partitioner in arb_partitioner(),
timer in arb_timer()) {
Expand Down Expand Up @@ -556,7 +556,7 @@ mod test {
proptest! {
#[test]
fn batch_does_not_reorder(stream: Vec<u64>,
item_limit in 1..u16::max_value(),
item_limit in 1..u16::MAX,
allocation_limit in 8..128,
partitioner in arb_partitioner(),
timer in arb_timer()) {
Expand Down Expand Up @@ -602,7 +602,7 @@ mod test {
proptest! {
#[test]
fn batch_does_not_lose_items(stream: Vec<u64>,
item_limit in 1..u16::max_value(),
item_limit in 1..u16::MAX,
allocation_limit in 8..128,
partitioner in arb_partitioner(),
timer in arb_timer()) {
Expand Down
2 changes: 1 addition & 1 deletion rust-toolchain.toml
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[toolchain]
channel = "1.78"
channel = "1.79"
profile = "default"
1 change: 1 addition & 0 deletions src/api/schema/components/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use crate::{
filter_check,
};

#[allow(clippy::duplicated_attributes)] // False positive caused by `ty = "String"`
#[derive(Debug, Clone, Interface)]
#[graphql(
field(name = "component_id", ty = "String"),
Expand Down
2 changes: 0 additions & 2 deletions src/sinks/elasticsearch/retry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,6 @@ mod tests {
logic.should_retry_response(&ElasticsearchResponse {
http_response: response,
event_status: EventStatus::Rejected,
batch_size: 1,
events_byte_size: CountByteSize(1, JsonSize::new(1)).into(),
}),
RetryAction::DontRetry(_)
Expand All @@ -200,7 +199,6 @@ mod tests {
logic.should_retry_response(&ElasticsearchResponse {
http_response: response,
event_status: EventStatus::Errored,
batch_size: 1,
events_byte_size: CountByteSize(1, JsonSize::new(1)).into(),
}),
RetryAction::Retry(_)
Expand Down
6 changes: 0 additions & 6 deletions src/sinks/elasticsearch/service.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use std::{
collections::HashMap,
sync::Arc,
task::{Context, Poll},
};
Expand Down Expand Up @@ -93,7 +92,6 @@ impl ElasticsearchService {

pub struct HttpRequestBuilder {
pub bulk_uri: Uri,
pub query_params: HashMap<String, String>,
pub auth: Option<Auth>,
pub compression: Compression,
pub http_request_config: RequestConfig,
Expand All @@ -105,7 +103,6 @@ impl HttpRequestBuilder {
bulk_uri: common.bulk_uri.clone(),
http_request_config: config.request.clone(),
auth: common.auth.clone(),
query_params: common.query_params.clone(),
compression: config.compression,
}
}
Expand Down Expand Up @@ -161,7 +158,6 @@ impl HttpRequestBuilder {
pub struct ElasticsearchResponse {
pub http_response: Response<Bytes>,
pub event_status: EventStatus,
pub batch_size: usize,
pub events_byte_size: GroupedCountByteSize,
}

Expand Down Expand Up @@ -190,7 +186,6 @@ impl Service<ElasticsearchRequest> for ElasticsearchService {
let mut http_service = self.batch_service.clone();
Box::pin(async move {
http_service.ready().await?;
let batch_size = req.batch_size;
let events_byte_size =
std::mem::take(req.metadata_mut()).into_events_estimated_json_encoded_byte_size();
let http_response = http_service.call(req).await?;
Expand All @@ -199,7 +194,6 @@ impl Service<ElasticsearchRequest> for ElasticsearchService {
Ok(ElasticsearchResponse {
event_status,
http_response,
batch_size,
events_byte_size,
})
})
Expand Down
8 changes: 2 additions & 6 deletions src/sinks/greptimedb/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ impl MetaDescriptive for GreptimeDBRequest {

#[derive(Debug)]
pub struct GreptimeDBBatchOutput {
pub item_count: u32,
pub metadata: RequestMetadata,
}

Expand Down Expand Up @@ -170,12 +169,9 @@ impl Service<GreptimeDBRequest> for GreptimeDBService {

Box::pin(async move {
let metadata = req.metadata;
let result = client.row_insert(req.items).await?;
client.row_insert(req.items).await?;

Ok(GreptimeDBBatchOutput {
item_count: result,
metadata,
})
Ok(GreptimeDBBatchOutput { metadata })
})
}
}
7 changes: 1 addition & 6 deletions src/sinks/splunk_hec/logs/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -213,11 +213,7 @@ impl SinkConfig for HecLogsSinkConfig {
}

impl HecLogsSinkConfig {
pub fn build_processor(
&self,
client: HttpClient,
cx: SinkContext,
) -> crate::Result<VectorSink> {
pub fn build_processor(&self, client: HttpClient, _: SinkContext) -> crate::Result<VectorSink> {
let ack_client = if self.acknowledgements.indexer_acknowledgements_enabled {
Some(client.clone())
} else {
Expand Down Expand Up @@ -265,7 +261,6 @@ impl HecLogsSinkConfig {
let sink = HecLogsSink {
service,
request_builder,
context: cx,
batch_settings,
sourcetype: self.sourcetype.clone(),
source: self.source.clone(),
Expand Down
1 change: 0 additions & 1 deletion src/sinks/splunk_hec/logs/sink.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ use vrl::path::OwnedTargetPath;
// `None` type and an empty string. This is necessary because `OptionalTargetPath` deserializes an
// empty string to a `None` path internally.
pub struct HecLogsSink<S> {
pub context: SinkContext,
pub service: S,
pub request_builder: HecLogsRequestBuilder,
pub batch_settings: BatcherSettings,
Expand Down
7 changes: 1 addition & 6 deletions src/sinks/splunk_hec/metrics/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -166,11 +166,7 @@ impl SinkConfig for HecMetricsSinkConfig {
}

impl HecMetricsSinkConfig {
pub fn build_processor(
&self,
client: HttpClient,
cx: SinkContext,
) -> crate::Result<VectorSink> {
pub fn build_processor(&self, client: HttpClient, _: SinkContext) -> crate::Result<VectorSink> {
let ack_client = if self.acknowledgements.indexer_acknowledgements_enabled {
Some(client.clone())
} else {
Expand Down Expand Up @@ -207,7 +203,6 @@ impl HecMetricsSinkConfig {
let batch_settings = self.batch.into_batcher_settings()?;

let sink = HecMetricsSink {
context: cx,
service,
batch_settings,
request_builder,
Expand Down
1 change: 0 additions & 1 deletion src/sinks/splunk_hec/metrics/sink.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ use crate::{
};

pub struct HecMetricsSink<S> {
pub context: SinkContext,
pub service: S,
pub batch_settings: BatcherSettings,
pub request_builder: HecMetricsRequestBuilder,
Expand Down
4 changes: 2 additions & 2 deletions src/sinks/util/batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -285,8 +285,8 @@ pub struct BatchSize<B> {
impl<B> BatchSize<B> {
pub const fn const_default() -> Self {
BatchSize {
bytes: usize::max_value(),
events: usize::max_value(),
bytes: usize::MAX,
events: usize::MAX,
_type_marker: PhantomData,
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/sinks/util/retries.rs
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ impl Iterator for ExponentialBackoff {
let duration = if let Some(duration) = self.current.checked_mul(self.factor) {
Duration::from_millis(duration)
} else {
Duration::from_millis(std::u64::MAX)
Duration::from_millis(u64::MAX)
};

// check if we reached max delay
Expand All @@ -309,7 +309,7 @@ impl Iterator for ExponentialBackoff {
if let Some(next) = self.current.checked_mul(self.base) {
self.current = next;
} else {
self.current = std::u64::MAX;
self.current = u64::MAX;
}

Some(duration)
Expand Down
8 changes: 4 additions & 4 deletions src/sinks/util/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,8 +88,8 @@ pub trait TowerRequestConfigDefaults {
const CONCURRENCY: Concurrency = Concurrency::Adaptive;
const TIMEOUT_SECS: u64 = 60;
const RATE_LIMIT_DURATION_SECS: u64 = 1;
const RATE_LIMIT_NUM: u64 = i64::max_value() as u64; // i64 avoids TOML deserialize issue
const RETRY_ATTEMPTS: usize = isize::max_value() as usize; // isize avoids TOML deserialize issue
const RATE_LIMIT_NUM: u64 = i64::MAX as u64; // i64 avoids TOML deserialize issue
const RETRY_ATTEMPTS: usize = isize::MAX as usize; // isize avoids TOML deserialize issue
const RETRY_MAX_DURATION_SECS: u64 = 30;
const RETRY_INITIAL_BACKOFF_SECS: u64 = 1;
}
Expand Down Expand Up @@ -459,8 +459,8 @@ mod tests {
assert_eq!(settings.concurrency, None);
assert_eq!(settings.timeout, Duration::from_secs(60));
assert_eq!(settings.rate_limit_duration, Duration::from_secs(1));
assert_eq!(settings.rate_limit_num, i64::max_value() as u64);
assert_eq!(settings.retry_attempts, isize::max_value() as usize);
assert_eq!(settings.rate_limit_num, i64::MAX as u64);
assert_eq!(settings.retry_attempts, isize::MAX as usize);
assert_eq!(settings.retry_max_duration, Duration::from_secs(30));
assert_eq!(settings.retry_initial_backoff, Duration::from_secs(1));
}
Expand Down
2 changes: 1 addition & 1 deletion src/sources/fluent/message.rs
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ mod test {

quickcheck! {
fn from_u64(input: u64) -> () {
if input > i64::max_value() as u64 {
if input > i64::MAX as u64 {
assert_eq!(Value::from(FluentValue(rmpv::Value::Integer(rmpv::Integer::from(input)))),
Value::Bytes(input.to_string().into()))
} else {
Expand Down
Loading

0 comments on commit b11ca5d

Please sign in to comment.