Skip to content

Commit

Permalink
feat: migrated handlers to a separate file
Browse files Browse the repository at this point in the history
  • Loading branch information
zleyyij committed Apr 27, 2024
1 parent 73ce1c6 commit 489c28d
Show file tree
Hide file tree
Showing 5 changed files with 155 additions and 156 deletions.
7 changes: 0 additions & 7 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion server/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ chrono = "0.4.32"
clap = { version = "4.0", features = ["derive"] }
colored = "2.1.0"
http = "1.0.0"
levenshtein = "1.0.5"
nom = "7.1.3"
log = "0.4.20"
serde = { version = "1.0.195", features = ["derive"] }
Expand Down
8 changes: 4 additions & 4 deletions server/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@ use std::path::Path;

fn main() {
// TODO: configure cargo to rerun only if the database changes
println!("cargo::rerun-if-changed=../parsing/src/cpu/amd/input.json");
println!("cargo::rerun-if-changed=../parsing/src/cpu/intel/chunks/*");
// println!("cargo::rerun-if-changed=../parsing/src/cpu/amd/input.json");
println!("cargo::rerun-if-changed=build.rs");
gen_amd_cpus();
gen_intel_cpus();
}
Expand All @@ -32,7 +32,7 @@ fn gen_amd_cpus() {
}
write!(
&mut generated_file,
"// This file was autogenerated by build.rs\npub static AMD_CPUS: phf::Map<&'static str, phf::Map<&'static str, &'static str>> = {}",
"// This file was autogenerated by build.rs\n#[rustfmt::skip]\n#[allow(clippy::all)]\npub static AMD_CPUS: phf::Map<&'static str, phf::Map<&'static str, &'static str>> = {}",
generated_map.build()
)
.unwrap();
Expand All @@ -59,7 +59,7 @@ fn gen_intel_cpus() {
}
write!(
&mut generated_file,
"// This file was autogenerated by build.rs\npub static INTEL_CPUS: phf::Map<&'static str, phf::Map<&'static str, &'static str>> = {}",
"// This file was autogenerated by build.rs\n#[rustfmt::skip]\n#[allow(clippy::all)]\npub static INTEL_CPUS: phf::Map<&'static str, phf::Map<&'static str, &'static str>> = {}",
generated_map.build()
)
.unwrap();
Expand Down
145 changes: 145 additions & 0 deletions server/src/handlers/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
use crate::cpu::Cpu;
use crate::AppState;
use axum::extract::Query;
use axum::http::StatusCode;
use axum::{extract::State, Json};
use log::{error, warn};
use serde::{Deserialize, Serialize};

#[derive(Debug, Deserialize, Serialize)]
pub struct UsbQuery {
pub identifier: String,
}

#[derive(Debug, Deserialize, Serialize)]
pub struct UsbResponse {
pub vendor: Option<String>,
pub device: Option<String>,
}

/// This handler accepts a `GET` request to `/api/usbs/?identifier`.
/// It relies on a globally shared [AppState] to re-use the usb cache.
pub async fn get_usb_handler(
State(state): State<AppState>,
Query(query): Query<UsbQuery>,
) -> Result<Json<UsbResponse>, StatusCode> {
// TODO: update docs
let results = state.usb_cache.find(&query.identifier);
match results {
Ok(r) => Ok(Json(UsbResponse {
vendor: r.0.map(|v| v.name),
device: r.1.map(|d| d.name),
})),
Err(e) => {
error!("usb handler error: {:?} caused by query: {:?}", e, query);
Err(StatusCode::NOT_FOUND)
}
}
}

#[derive(Debug, Deserialize, Serialize)]
pub struct GetPcieQuery {
identifier: String,
}

#[derive(Debug, Deserialize, Serialize)]
pub struct PcieResponse {
pub vendor: Option<String>,
pub device: Option<String>,
pub subsystem: Option<String>,
}

/// This handler accepts a `GET` request to `/api/pcie/?identifier`.
/// It relies on a globally shared [AppState] to re-use the pcie cache
pub async fn get_pcie_handler(
State(state): State<AppState>,
Query(query): Query<GetPcieQuery>,
) -> Result<Json<PcieResponse>, StatusCode> {
let results = state.pcie_cache.find(&query.identifier);
match results {
Ok(r) => Ok(Json(PcieResponse {
vendor: r.0.map(|v| v.name),
device: r.1.map(|d| d.name),
subsystem: r.2.map(|s| s.name),
})),
Err(e) => {
error!("pcie handler error: {:?} caused by query: {:?}", e, query);
Err(StatusCode::NOT_FOUND)
}
}
}

/// This handler accepts a `POST` request to `/api/pcie/`, with a body containing a serialized array of strings.
/// It relies on a globally shared [AppState] to re-use the pcie cache, and is largely identical to [get_pcie_handler], but
/// is intended for batching
pub async fn post_pcie_handler(
State(state): State<AppState>,
Json(query): Json<Vec<String>>,
) -> Result<Json<Vec<Option<PcieResponse>>>, StatusCode> {
let mut response: Vec<Option<PcieResponse>> = Vec::with_capacity(16);
for entry in query {
match state.pcie_cache.find(&entry) {
Ok(r) => response.push(Some(PcieResponse {
vendor: r.0.map(|v| v.name),
device: r.1.map(|d| d.name),
subsystem: r.2.map(|s| s.name),
})),
Err(e) => {
warn!("post pcie handler error: when processing the device identifier {:?}, an error was returned: {:?}", entry, e);
response.push(None);
}
}
}
Ok(Json(response))
}

/// This handler accepts a `POST` request to `/api/usbs/`, with a body containing a serialized array of usb device identifier strings.
/// It relies on a globally shared [AppState] to re-use the pcie cache, and is largely identical to [get_usb_handler], but
/// is intended for batching
pub async fn post_usbs_handler(
State(state): State<AppState>,
Json(query): Json<Vec<String>>,
) -> Result<Json<Vec<Option<UsbResponse>>>, StatusCode> {
let mut response: Vec<Option<UsbResponse>> = Vec::with_capacity(16);
for entry in query {
match state.usb_cache.find(&entry) {
Ok(r) => response.push(Some(UsbResponse {
vendor: r.0.map(|v| v.name),
device: r.1.map(|d| d.name),
})),
Err(e) => {
warn!("post usb handler error: when processing the device identifier {:?}, an error was returned: {:?}", entry, e);
response.push(None);
}
}
}
Ok(Json(response))
}

#[derive(Debug, Deserialize, Serialize)]
pub struct CpuQuery {
pub name: String,
}

/// This handler accepts a `GET` request to `/api/cpus/?name=[CPU_NAME]`.
/// It relies on a globally shared [AppState] to re-use the cpu cache, and responds to the request with a serialized [Cpu].
/// It will always attempt to find a cpu, and should always return a cpu. The correctness of the return value is not guaranteed.
pub async fn get_cpu_handler(
State(mut state): State<AppState>,
Query(query): Query<CpuQuery>,
) -> Result<Json<Cpu<String>>, StatusCode> {
match state.cpu_cache.find(&query.name) {
Ok(c) => Ok(Json(Cpu {
name: c.name.to_string(),
attributes: c
.attributes
.iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect(),
})),
Err(e) => {
error!("cpu handler error {:?} caused by query {:?}", e, query);
Err(StatusCode::NOT_FOUND)
}
}
}
150 changes: 6 additions & 144 deletions server/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
mod cpu;
mod handlers;

use axum::extract::Query;
use axum::http::{HeaderValue, StatusCode};
use axum::http::HeaderValue;
use axum::routing::post;
use axum::{extract::State, routing::get, Json, Router};
use axum::{routing::get, Router};
use chrono::Local;
use clap::builder::TypedValueParser;
use clap::{Parser, ValueEnum};
use colored::*;
use cpu::{Cpu, CpuCache};
use cpu::CpuCache;
use handlers::*;
use http::{header, Method};
use log::{error, info, warn};
use log::info;
use log::{Level, LevelFilter, Metadata, Record};
use parsing::pcie::PcieCache;
use parsing::usb::UsbCache;
use serde::{Deserialize, Serialize};
use std::env;
use tower_http::cors::CorsLayer;

Expand Down Expand Up @@ -85,144 +85,6 @@ struct AppState {
pub pcie_cache: PcieCache,
}

#[derive(Debug, Deserialize, Serialize)]
struct UsbQuery {
pub identifier: String,
}

#[derive(Debug, Deserialize, Serialize)]
struct UsbResponse {
pub vendor: Option<String>,
pub device: Option<String>,
}

/// This handler accepts a `GET` request to `/api/usbs/?identifier`.
/// It relies on a globally shared [AppState] to re-use the usb cache.
async fn get_usb_handler(
State(state): State<AppState>,
Query(query): Query<UsbQuery>,
) -> Result<Json<UsbResponse>, StatusCode> {
// TODO: update docs
let results = state.usb_cache.find(&query.identifier);
match results {
Ok(r) => Ok(Json(UsbResponse {
vendor: r.0.map(|v| v.name),
device: r.1.map(|d| d.name),
})),
Err(e) => {
error!("usb handler error: {:?} caused by query: {:?}", e, query);
Err(StatusCode::NOT_FOUND)
}
}
}

#[derive(Debug, Deserialize, Serialize)]
struct GetPcieQuery {
identifier: String,
}

#[derive(Debug, Deserialize, Serialize)]
struct PcieResponse {
pub vendor: Option<String>,
pub device: Option<String>,
pub subsystem: Option<String>,
}

/// This handler accepts a `GET` request to `/api/pcie/?identifier`.
/// It relies on a globally shared [AppState] to re-use the pcie cache
async fn get_pcie_handler(
State(state): State<AppState>,
Query(query): Query<GetPcieQuery>,
) -> Result<Json<PcieResponse>, StatusCode> {
let results = state.pcie_cache.find(&query.identifier);
match results {
Ok(r) => Ok(Json(PcieResponse {
vendor: r.0.map(|v| v.name),
device: r.1.map(|d| d.name),
subsystem: r.2.map(|s| s.name),
})),
Err(e) => {
error!("pcie handler error: {:?} caused by query: {:?}", e, query);
Err(StatusCode::NOT_FOUND)
}
}
}

/// This handler accepts a `POST` request to `/api/pcie/`, with a body containing a serialized array of strings.
/// It relies on a globally shared [AppState] to re-use the pcie cache, and is largely identical to [get_pcie_handler], but
/// is intended for batching
async fn post_pcie_handler(
State(state): State<AppState>,
Json(query): Json<Vec<String>>,
) -> Result<Json<Vec<Option<PcieResponse>>>, StatusCode> {
let mut response: Vec<Option<PcieResponse>> = Vec::with_capacity(16);
for entry in query {
match state.pcie_cache.find(&entry) {
Ok(r) => response.push(Some(PcieResponse {
vendor: r.0.map(|v| v.name),
device: r.1.map(|d| d.name),
subsystem: r.2.map(|s| s.name),
})),
Err(e) => {
warn!("post pcie handler error: when processing the device identifier {:?}, an error was returned: {:?}", entry, e);
response.push(None);
}
}
}
Ok(Json(response))
}

/// This handler accepts a `POST` request to `/api/usbs/`, with a body containing a serialized array of usb device identifier strings.
/// It relies on a globally shared [AppState] to re-use the pcie cache, and is largely identical to [get_usb_handler], but
/// is intended for batching
async fn post_usbs_handler(
State(state): State<AppState>,
Json(query): Json<Vec<String>>,
) -> Result<Json<Vec<Option<UsbResponse>>>, StatusCode> {
let mut response: Vec<Option<UsbResponse>> = Vec::with_capacity(16);
for entry in query {
match state.usb_cache.find(&entry) {
Ok(r) => response.push(Some(UsbResponse {
vendor: r.0.map(|v| v.name),
device: r.1.map(|d| d.name),
})),
Err(e) => {
warn!("post usb handler error: when processing the device identifier {:?}, an error was returned: {:?}", entry, e);
response.push(None);
}
}
}
Ok(Json(response))
}

#[derive(Debug, Deserialize, Serialize)]
struct CpuQuery {
pub name: String,
}

/// This handler accepts a `GET` request to `/api/cpus/?name=[CPU_NAME]`.
/// It relies on a globally shared [AppState] to re-use the cpu cache, and responds to the request with a serialized [Cpu].
/// It will always attempt to find a cpu, and should always return a cpu. The correctness of the return value is not guaranteed.
async fn get_cpu_handler(
State(mut state): State<AppState>,
Query(query): Query<CpuQuery>,
) -> Result<Json<Cpu<String>>, StatusCode> {
match state.cpu_cache.find(&query.name) {
Ok(c) => Ok(Json(Cpu {
name: c.name.to_string(),
attributes: c
.attributes
.iter()
.map(|(k, v)| (k.to_string(), v.to_string()))
.collect(),
})),
Err(e) => {
error!("cpu handler error {:?} caused by query {:?}", e, query);
Err(StatusCode::NOT_FOUND)
}
}
}

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// initialize logging
Expand Down

0 comments on commit 489c28d

Please sign in to comment.