Skip to content

Commit

Permalink
Made some optimizations
Browse files Browse the repository at this point in the history
  • Loading branch information
amigin committed Apr 7, 2024
1 parent bcbc4f6 commit 6e37e2d
Show file tree
Hide file tree
Showing 11 changed files with 39 additions and 30 deletions.
34 changes: 27 additions & 7 deletions src/data_readers/http_connection/into_http_payload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,17 @@ fn write_init_table_result(table_name: &str, content: JsonArrayWriter) -> Vec<u8
let mut header_json = JsonObjectWriter::new();
header_json.write("tableName", table_name);

let header = format!("initTable:{}", header_json.build());
let header = unsafe {
format!(
"initTable:{}",
std::str::from_utf8_unchecked(header_json.build().as_slice())
)
};

write_pascal_string(header.as_str(), &mut result);

let content = content.build();
write_byte_array(content.as_bytes(), &mut result);
write_byte_array(content.as_slice(), &mut result);
result
}

Expand All @@ -48,12 +53,17 @@ fn write_init_partitions_result(sync_data: &InitPartitionsSyncEventData) -> Vec<
let mut header_json = JsonObjectWriter::new();
header_json.write("tableName", sync_data.table_data.table_name.as_str());

let header = format!("initPartitions:{}", header_json.build());
let header = unsafe {
format!(
"initPartitions:{}",
std::str::from_utf8_unchecked(header_json.build().as_slice())
)
};

write_pascal_string(header.as_str(), &mut result);

let content = sync_data.as_json().build();
write_byte_array(content.as_bytes(), &mut result);
write_byte_array(content.as_slice(), &mut result);
result
}

Expand All @@ -62,12 +72,17 @@ pub fn compile_update_rows_result(sync_data: &UpdateRowsSyncData) -> Vec<u8> {
let mut header_json = JsonObjectWriter::new();
header_json.write("tableName", sync_data.table_data.table_name.as_str());

let header = format!("updateRows:{}", header_json.build());
let header = unsafe {
format!(
"updateRows:{}",
std::str::from_utf8_unchecked(header_json.build().as_slice())
)
};

write_pascal_string(header.as_str(), &mut result);

let content = sync_data.rows_by_partition.as_json_array().build();
write_byte_array(content.as_bytes(), &mut result);
write_byte_array(content.as_slice(), &mut result);
result
}

Expand All @@ -77,7 +92,12 @@ pub fn compile_delete_rows_result(sync_data: &DeleteRowsEventSyncData) -> Vec<u8

header_json.write("tableName", sync_data.table_data.table_name.as_str());

let header = format!("deleteRows:{}", header_json.build());
let header = unsafe {
format!(
"deleteRows:{}",
std::str::from_utf8_unchecked(header_json.build().as_slice())
)
};

write_pascal_string(header.as_str(), &mut result);

Expand Down
7 changes: 3 additions & 4 deletions src/data_readers/tcp_connection/tcp_payload_to_send.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ pub async fn serialize(sync_event: &SyncEvent, compress: bool) -> Vec<MyNoSqlTcp

let tcp_contract = MyNoSqlTcpContract::InitTable {
table_name: sync_data.db_table.name.to_string(),
data: data.into_bytes(),
data,
};

if compress {
Expand All @@ -27,7 +27,7 @@ pub async fn serialize(sync_event: &SyncEvent, compress: bool) -> Vec<MyNoSqlTcp

let tcp_contract = MyNoSqlTcpContract::InitTable {
table_name: sync_data.table_data.table_name.to_string(),
data: data.into_bytes(),
data,
};

if compress {
Expand All @@ -47,7 +47,6 @@ pub async fn serialize(sync_event: &SyncEvent, compress: bool) -> Vec<MyNoSqlTcp
.db_rows_snapshot
.as_json_array()
.build()
.into_bytes()
} else {
EMPTY_ARRAY.to_vec()
},
Expand All @@ -65,7 +64,7 @@ pub async fn serialize(sync_event: &SyncEvent, compress: bool) -> Vec<MyNoSqlTcp
SyncEvent::UpdateRows(data) => {
let tcp_contract = MyNoSqlTcpContract::UpdateRows {
table_name: data.table_data.table_name.to_string(),
data: data.rows_by_partition.as_json_array().build().into_bytes(),
data: data.rows_by_partition.as_json_array().build(),
};

if compress {
Expand Down
4 changes: 1 addition & 3 deletions src/db_operations/read/get_highest_row_and_below.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,7 @@ pub async fn get_highest_row_and_below(
count += 1;
}

return Ok(ReadOperationResult::RowsArray(
json_array_writer.build().into_bytes(),
));
return Ok(ReadOperationResult::RowsArray(json_array_writer.build()));
/*
let mut json_array_writer = JsonArrayWriter::new();
Expand Down
2 changes: 1 addition & 1 deletion src/db_operations/read/multipart.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,5 +25,5 @@ pub async fn get_next(
) -> Option<ReadOperationResult> {
let db_rows = app.multipart_list.get(multipart_id, amount).await?;

ReadOperationResult::RowsArray(db_rows.as_json_array().build().into_bytes()).into()
ReadOperationResult::RowsArray(db_rows.as_json_array().build()).into()
}
4 changes: 1 addition & 3 deletions src/db_operations/read/rows/get_all.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,7 @@ pub async fn get_all(
json_array_writer.write(db_row.as_ref());
}

return Ok(ReadOperationResult::RowsArray(
json_array_writer.build().into_bytes(),
));
return Ok(ReadOperationResult::RowsArray(json_array_writer.build()));
}

/*
Expand Down
4 changes: 1 addition & 3 deletions src/db_operations/read/rows/get_all_by_partition_key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,5 @@ pub async fn get_all_by_partition_key(
},
);

return Ok(ReadOperationResult::RowsArray(
json_array_writer.build().into_bytes(),
));
return Ok(ReadOperationResult::RowsArray(json_array_writer.build()));
}
4 changes: 1 addition & 3 deletions src/db_operations/read/rows/get_all_by_row_key.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,5 @@ pub async fn get_all_by_row_key(
json_array_writer.write(db_row.as_ref());
}

return Ok(ReadOperationResult::RowsArray(
json_array_writer.build().into_bytes(),
));
return Ok(ReadOperationResult::RowsArray(json_array_writer.build()));
}
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,7 @@ pub async fn get_single_partition_multiple_rows(
json_array_writer.write(db_row.as_ref());
}
}
return Ok(ReadOperationResult::RowsArray(
json_array_writer.build().into_bytes(),
));
return Ok(ReadOperationResult::RowsArray(json_array_writer.build()));
}

/*
Expand Down
2 changes: 1 addition & 1 deletion src/db_sync/states/delete_rows_event_sync_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,6 @@ impl DeleteRowsEventSyncData {
}
}

json_object_writer.build().into_bytes()
json_object_writer.build()
}
}
2 changes: 1 addition & 1 deletion src/persist_operations/sync/upload_partition.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ pub async fn upload_partition(app: &AppContext, table_name: &str, snapshot: DbPa
.save_table_file(
table_name,
&TableFile::DbPartition(snapshot.partition_key.clone()),
content.build().into_bytes(),
content.build(),
)
.await;

Expand Down
2 changes: 1 addition & 1 deletion src/zip/db_zip_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ impl DbZipBuilder {

let json = itm.db_rows_snapshot.as_json_array();

let payload = json.build().into_bytes();
let payload = json.build();

write_to_zip_file(&mut self.zip_writer, &payload)?;
}
Expand Down

0 comments on commit 6e37e2d

Please sign in to comment.