From 5d9ab7df0ec12fbbde101ca71ff532ab9fb31047 Mon Sep 17 00:00:00 2001 From: YanceyOfficial Date: Tue, 22 Oct 2024 14:53:12 +0800 Subject: [PATCH] feat: migrate to --- examples/Cargo.toml | 2 +- examples/fine_tunes/Cargo.toml | 2 +- examples/fine_tunes/src/main.rs | 14 +- rs_openai/src/apis/fine_tunes.rs | 119 ------------- rs_openai/src/apis/fine_tuning.rs | 93 ++++++++++ rs_openai/src/apis/mod.rs | 2 +- rs_openai/src/client.rs | 6 +- rs_openai/src/interfaces/fine_tunes.rs | 163 ----------------- rs_openai/src/interfaces/fine_tuning.rs | 222 ++++++++++++++++++++++++ rs_openai/src/interfaces/mod.rs | 2 +- 10 files changed, 329 insertions(+), 296 deletions(-) delete mode 100644 rs_openai/src/apis/fine_tunes.rs create mode 100644 rs_openai/src/apis/fine_tuning.rs delete mode 100644 rs_openai/src/interfaces/fine_tunes.rs create mode 100644 rs_openai/src/interfaces/fine_tuning.rs diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 7e94a3e..18426cf 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -7,7 +7,7 @@ members = [ "embeddings", "engines", "files", - "fine_tunes", + "fine_tuning", "images", "models", "moderations", diff --git a/examples/fine_tunes/Cargo.toml b/examples/fine_tunes/Cargo.toml index 810f096..8014ec4 100644 --- a/examples/fine_tunes/Cargo.toml +++ b/examples/fine_tunes/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "fine_tunes" +name = "fine_tuning" version = "0.1.0" edition = "2021" publish = false diff --git a/examples/fine_tunes/src/main.rs b/examples/fine_tunes/src/main.rs index 2519448..bed7134 100644 --- a/examples/fine_tunes/src/main.rs +++ b/examples/fine_tunes/src/main.rs @@ -1,6 +1,6 @@ use dotenvy::dotenv; use futures::StreamExt; -use rs_openai::{fine_tunes::CreateFineTuneRequestBuilder, OpenAI}; +use rs_openai::{fine_tuning::CreateFineTuneRequestBuilder, OpenAI}; use std::env::var; use std::io::{stdout, Write}; @@ -24,29 +24,29 @@ async fn main() -> Result<(), Box> { .prompt_loss_weight(0.01) .build()?; - let res = client.fine_tunes().create(&req).await?; + let res = client.fine_tuning().create(&req).await?; println!("{:?}", res); // list - let res = client.fine_tunes().list().await?; + let res = client.fine_tuning().list().await?; println!("{:?}", res); // retrieve - let res = client.fine_tunes().retrieve("").await?; + let res = client.fine_tuning().retrieve("").await?; println!("{:?}", res); // cancel - let res = client.fine_tunes().cancel("").await?; + let res = client.fine_tuning().cancel("").await?; println!("{:?}", res); // retrieve_content // TODO: Since free accounts cannot read fine-tune event content, I have to verify this api until purchase a Plus. - let res = client.fine_tunes().retrieve_content("").await?; + let res = client.fine_tuning().retrieve_content("").await?; println!("{:?}", res); // retrieve_content_stream // TODO: Since free accounts cannot read fine-tune event content, I have to verify this api until purchase a Plus. - let mut stream = client.fine_tunes().retrieve_content_stream("").await?; + let mut stream = client.fine_tuning().retrieve_content_stream("").await?; let mut lock = stdout().lock(); while let Some(response) = stream.next().await { diff --git a/rs_openai/src/apis/fine_tunes.rs b/rs_openai/src/apis/fine_tunes.rs deleted file mode 100644 index 58477b5..0000000 --- a/rs_openai/src/apis/fine_tunes.rs +++ /dev/null @@ -1,119 +0,0 @@ -//! Manage fine-tuning jobs to tailor a model to your specific training data. -//! -//! Related guide: [Fine-tune models](https://platform.openai.com/docs/guides/fine-tuning) - -use crate::client::OpenAI; -use crate::interfaces::fine_tunes; -use crate::shared::response_wrapper::{OpenAIError, OpenAIResponse}; -use futures::Stream; -use std::pin::Pin; - -pub struct FineTunes<'a> { - openai: &'a OpenAI, -} - -impl<'a> FineTunes<'a> { - pub fn new(openai: &'a OpenAI) -> Self { - Self { openai } - } - - /// Creates a job that fine-tunes a specified model from a given dataset. - /// - /// OpenAIResponse includes details of the enqueued job including job status and the name of the fine-tuned models once complete. - /// - /// [Learn more about Fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) - pub async fn create( - &self, - req: &fine_tunes::CreateFineTuneRequest, - ) -> OpenAIResponse { - self.openai.post("/fine-tunes", req).await - } - - /// Gets info about the fine-tune job. - /// - /// # Path parameters - /// - /// - `fine_tune_id` - The ID of the fine-tune job - /// - /// [Learn more about Fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) - pub async fn retrieve( - &self, - fine_tune_id: &str, - ) -> OpenAIResponse { - self.openai - .get(&format!("/fine-tunes/{fine_tune_id}"), &()) - .await - } - - /// Immediately cancel a fine-tune job. - /// - /// # Path parameters - /// - /// - `fine_tune_id` - The ID of the fine-tune job to cancel - pub async fn cancel(&self, fine_tune_id: &str) -> OpenAIResponse { - self.openai - .post(&format!("/fine-tunes/{fine_tune_id}/cancel"), &()) - .await - } - - /// List your organization's fine-tuning jobs - pub async fn list(&self) -> OpenAIResponse { - self.openai.get("/fine-tunes", &()).await - } - - /// Get fine-grained status updates for a fine-tune job. - /// - /// Only events generated so far will be returned. - /// - /// # Path parameters - /// - /// - `fine_tune_id` - The ID of the fine-tune job to get events for. - /// - /// TODO: Since free accounts cannot read fine-tune event content, I have to verify this api until purchase a Plus. - pub async fn retrieve_content( - &self, - fine_tune_id: &str, - ) -> OpenAIResponse { - self.openai - .get(&format!("/fine-tunes/{fine_tune_id}/events"), &()) - .await - } - - /// Get fine-grained status updates for a fine-tune job by stream. - /// - /// Events will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available. - /// The stream will terminate with a `data: [DONE]` message when the job is finished (succeeded, cancelled, or failed). - /// - /// # Path parameters - /// - /// - `fine_tune_id` - The ID of the fine-tune job to get events for. - /// - /// TODO: Since free accounts cannot read fine-tune event content, I have to verify this api until purchase a Plus. - pub async fn retrieve_content_stream( - &self, - fine_tune_id: &str, - ) -> Result< - Pin> + Send>>, - OpenAIError, - > { - Ok(self - .openai - .get_stream( - &format!("/fine-tunes/{fine_tune_id}/events"), - &("stream", true), - ) - .await) - } - - /// Delete a fine-tuned model. You must have the Owner role in your organization. - /// - /// # Path parameters - /// - /// - `model` - The model to delete - pub async fn delete_model( - &self, - model: &str, - ) -> OpenAIResponse { - self.openai.delete(&format!("/models/{model}"), &()).await - } -} diff --git a/rs_openai/src/apis/fine_tuning.rs b/rs_openai/src/apis/fine_tuning.rs new file mode 100644 index 0000000..6992242 --- /dev/null +++ b/rs_openai/src/apis/fine_tuning.rs @@ -0,0 +1,93 @@ +//! Manage fine-tuning jobs to tailor a model to your specific training data. Related guide: [Fine-tune models](https://platform.openai.com/docs/guides/fine-tuning) + +use crate::client::OpenAI; +use crate::interfaces::fine_tuning; +use crate::shared::response_wrapper::OpenAIResponse; + +pub struct FineTuning<'a> { + openai: &'a OpenAI, +} + +impl<'a> FineTuning<'a> { + pub fn new(openai: &'a OpenAI) -> Self { + Self { openai } + } + + /// Creates a job that fine-tunes a specified model from a given dataset. + /// + /// OpenAIResponse includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + /// + /// [Learn more about Fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + pub async fn create( + &self, + req: &fine_tuning::CreateFineTuningRequest, + ) -> OpenAIResponse { + self.openai.post("/fine-tuning/jobs", req).await + } + + /// List your organization's fine-tuning jobs + pub async fn list( + &self, + req: &fine_tuning::ListFineTuningRequest, + ) -> OpenAIResponse { + self.openai.get("/fine-tuning/jobs", req).await + } + + /// Get status updates for a fine-tuning job. + pub async fn list_events( + &self, + fine_tuning_job_id: &str, // The ID of the fine-tuning job to get events for. + req: &fine_tuning::ListFineTuningRequest, + ) -> OpenAIResponse { + self.openai + .get( + &format!("/fine-tuning/jobs/{fine_tuning_job_id}/events"), + req, + ) + .await + } + + /// Get status updates for a fine-tuning job. + pub async fn list_checkpoints( + &self, + fine_tuning_job_id: &str, // The ID of the fine-tuning job to get checkpoints for. + req: &fine_tuning::ListFineTuningRequest, + ) -> OpenAIResponse { + self.openai + .get( + &format!("/fine-tuning/jobs/{fine_tuning_job_id}/checkpoints"), + req, + ) + .await + } + + /// Gets info about the fine-tune job. + /// + /// # Path parameters + /// + /// - `fine_tune_id` - The ID of the fine-tune job + /// + /// [Learn more about Fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) + pub async fn retrieve( + &self, + fine_tuning_job_id: &str, + ) -> OpenAIResponse { + self.openai + .get(&format!("/fine-tuning/jobs/{fine_tuning_job_id}"), &()) + .await + } + + /// Immediately cancel a fine-tune job. + /// + /// # Path parameters + /// + /// - `fine_tune_job_id` - The ID of the fine-tune job to cancel + pub async fn cancel( + &self, + fine_tune_job_id: &str, + ) -> OpenAIResponse { + self.openai + .post(&format!("/fine-tuning/jobs/{fine_tune_job_id}/cancel"), &()) + .await + } +} diff --git a/rs_openai/src/apis/mod.rs b/rs_openai/src/apis/mod.rs index 198efd2..66b8cf2 100644 --- a/rs_openai/src/apis/mod.rs +++ b/rs_openai/src/apis/mod.rs @@ -7,7 +7,7 @@ pub mod edits; pub mod embeddings; pub mod engines; pub mod files; -pub mod fine_tunes; +pub mod fine_tuning; pub mod images; pub mod models; pub mod moderations; diff --git a/rs_openai/src/client.rs b/rs_openai/src/client.rs index 5003701..c639307 100644 --- a/rs_openai/src/client.rs +++ b/rs_openai/src/client.rs @@ -1,5 +1,5 @@ use crate::apis::{ - audio, chat, completions, edits, embeddings, engines, files, fine_tunes, images, models, + audio, chat, completions, edits, embeddings, engines, files, fine_tuning, images, models, moderations, }; use crate::shared::response_wrapper::{ApiErrorResponse, OpenAIError, OpenAIResponse}; @@ -273,8 +273,8 @@ impl OpenAI { files::Files::new(self) } - pub fn fine_tunes(&self) -> fine_tunes::FineTunes { - fine_tunes::FineTunes::new(self) + pub fn fine_tuning(&self) -> fine_tuning::FineTuning { + fine_tuning::FineTuning::new(self) } pub fn images(&self) -> images::Images { diff --git a/rs_openai/src/interfaces/fine_tunes.rs b/rs_openai/src/interfaces/fine_tunes.rs deleted file mode 100644 index 488bab1..0000000 --- a/rs_openai/src/interfaces/fine_tunes.rs +++ /dev/null @@ -1,163 +0,0 @@ -use crate::shared::response_wrapper::OpenAIError; -use derive_builder::Builder; -use serde::{Deserialize, Serialize}; - -#[derive(Builder, Clone, Debug, Default, Serialize)] -#[builder(name = "CreateFineTuneRequestBuilder")] -#[builder(pattern = "mutable")] -#[builder(setter(into, strip_option), default)] -#[builder(derive(Debug))] -#[builder(build_fn(error = "OpenAIError"))] -pub struct CreateFineTuneRequest { - /// The ID of an uploaded file that contains training data. - /// - /// See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. - /// - /// - /// Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys "prompt" and "completion". - /// Additionally, you must upload your file with the purpose `fine-tune`. - /// - /// - /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning/creating-training-data) for more details. - pub training_file: String, - - /// The ID of an uploaded file that contains validation data. - /// - /// If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. - /// These metrics can be viewed in the [fine-tuning results file](https://platform.openai.com/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). - /// Your train and validation data should be mutually exclusive. - /// - /// Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys "prompt" and "completion". - /// Additionally, you must upload your file with the purpose `fine-tune`. - /// - /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning/creating-training-data) for more details. - #[serde(skip_serializing_if = "Option::is_none")] - pub validation_file: Option, - - /// The name of the base model to fine-tune. - /// You can select one of "ada", "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. - /// To learn more about these models, see the [Models](https://platform.openai.com/docs/models) documentation. - pub model: Option, - - /// The number of epochs to train the model for. - /// An epoch refers to one full cycle through the training dataset. - #[serde(skip_serializing_if = "Option::is_none")] - pub n_epochs: Option, - - /// The batch size to use for training. - /// The batch size is the number of training examples used to train a single forward and backward pass. - /// - /// By default, the batch size will be dynamically configured to be ~0.2% of the number of examples in the training set, capped at 256. - /// In general, we've found that larger batch sizes tend to work better for larger datasets. - #[serde(skip_serializing_if = "Option::is_none")] - pub batch_size: Option, - - /// The learning rate multiplier to use for training. - /// The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value. - /// - /// By default, the learning rate multiplier is 0.05, 0.1, or 0.2 depending on final `batch_size` (larger learning rates tend to perform better with larger batch sizes). - /// We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results. - #[serde(skip_serializing_if = "Option::is_none")] - pub learning_rate_multiplier: Option, - - /// The weight to use for loss on the prompt tokens. - /// This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short. - /// - /// If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt. - #[serde(skip_serializing_if = "Option::is_none")] - pub prompt_loss_weight: Option, - - /// If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. - /// These metrics can be viewed in the [results file](https://platform.openai.com/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). - /// - /// In order to compute classification metrics, you must provide a `validation_file`. - /// Additionally, you must specify `classification_n_classes` for multiclass classification or `classification_positive_class` for binary classification. - #[serde(skip_serializing_if = "Option::is_none")] - pub compute_classification_metrics: Option, - - /// The number of classes in a classification task. - /// - /// This parameter is required for multiclass classification. - #[serde(skip_serializing_if = "Option::is_none")] - pub classification_n_classes: Option, - - /// The positive class in binary classification. - /// - /// This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification. - #[serde(skip_serializing_if = "Option::is_none")] - pub classification_positive_class: Option, - - /// If provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification. - /// - /// With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall. - #[serde(skip_serializing_if = "Option::is_none")] - classification_betas: Option>, - - /// A string of up to 40 characters that will be added to your fine-tuned model name. - /// - /// For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. - #[serde(skip_serializing_if = "Option::is_none")] - suffix: Option, -} - -#[derive(Debug, Deserialize, Clone, Serialize)] -pub struct FineTuneResponse { - pub id: String, - pub object: String, - pub model: String, - pub created_at: u32, - pub events: Option>, - pub fine_tuned_model: Option, - pub hyperparams: HyperParams, - pub organization_id: String, - pub result_files: Vec, - pub status: String, - pub validation_files: Vec, - pub training_files: Vec, - pub updated_at: u32, -} - -#[derive(Debug, Deserialize, Clone, Serialize)] -pub struct FineTuneEvent { - pub object: String, - pub created_at: u32, - pub level: String, - pub message: String, -} - -#[derive(Debug, Deserialize, Clone, Serialize)] -pub struct HyperParams { - pub batch_size: u32, - pub learning_rate_multiplier: f32, - pub n_epochs: u32, - pub prompt_loss_weight: f32, -} - -#[derive(Debug, Deserialize, Clone, Serialize)] -pub struct TrainingFile { - pub id: String, - pub object: String, - pub bytes: u32, - pub created_at: u32, - pub filename: String, - pub purpose: String, -} - -#[derive(Debug, Deserialize, Clone, Serialize)] -pub struct FineTuneListResponse { - pub object: String, - pub data: Vec, -} - -#[derive(Debug, Deserialize, Clone, Serialize)] -pub struct EventListResponse { - pub object: String, - pub data: Vec, -} - -#[derive(Debug, Deserialize, Clone, Serialize)] -pub struct DeleteFileResponse { - pub id: String, - pub object: String, - pub deleted: bool, -} diff --git a/rs_openai/src/interfaces/fine_tuning.rs b/rs_openai/src/interfaces/fine_tuning.rs new file mode 100644 index 0000000..435e474 --- /dev/null +++ b/rs_openai/src/interfaces/fine_tuning.rs @@ -0,0 +1,222 @@ +use crate::shared::response_wrapper::OpenAIError; +use derive_builder::Builder; +use serde::{Deserialize, Serialize}; + +#[derive(Builder, Clone, Debug, Default, Serialize)] +#[builder(name = "CreateFineTuningRequestBuilder")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct CreateFineTuningRequest { + /// The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/guides/fine-tuning/which-models-can-be-fine-tuned). + pub model: String, + + /// The ID of an uploaded file that contains training data. + /// + /// See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. + /// + /// Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + /// + /// The contents of the file should differ depending on if the model uses the [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format. + /// + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + pub training_file: String, + + /// The hyperparameters used for the fine-tuning job. + #[serde(skip_serializing_if = "Option::is_none")] + pub hyperparameters: Option, + + /// A string of up to 40 characters that will be added to your fine-tuned model name. + /// + /// For example, a `suffix` of "custom-model-name" would produce a model name like `ada:ft-your-org:custom-model-name-2022-02-15-04-21-04`. + #[serde(skip_serializing_if = "Option::is_none")] + suffix: Option, // Defaults to null + + /// The ID of an uploaded file that contains validation data. + /// + /// If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. + /// These metrics can be viewed in the [fine-tuning results file](https://platform.openai.com/docs/guides/fine-tuning/analyzing-your-fine-tuned-model). + /// Your train and validation data should be mutually exclusive. + /// + /// Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys "prompt" and "completion". + /// Additionally, you must upload your file with the purpose `fine-tune`. + /// + /// See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning/creating-training-data) for more details. + #[serde(skip_serializing_if = "Option::is_none")] + pub validation_file: Option, + + /// A list of integrations to enable for your fine-tuning job. + #[serde(skip_serializing_if = "Option::is_none")] + pub integrations: Option>, + + /// The seed controls the reproducibility of the job. + /// Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. + /// If a seed is not specified, one will be generated for you. + #[serde(skip_serializing_if = "Option::is_none")] + pub seed: Option, +} + +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct Hyperparameters { + /// Number of examples in each batch. + /// A larger batch size means that model parameters are updated less frequently, but with lower variance. + #[serde(skip_serializing_if = "Option::is_none")] + pub batch_size: Option, // Defaults to auto + + /// Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting. + #[serde(skip_serializing_if = "Option::is_none")] + pub learning_rate_multiplier: Option, // Defaults to auto + + /// The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + #[serde(skip_serializing_if = "Option::is_none")] + pub n_epochs: Option, // Defaults to auto +} + +#[derive(Debug, Serialize, Deserialize, Clone, strum::Display)] +pub enum AutoOrInteger { + Auto(String), + Integer(f32), +} + +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct Integration { + /// The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. + pub r#type: String, + + /// The settings for your integration with Weights and Biases. + /// This payload specifies the project that metrics will be sent to. + /// Optionally, you can set an explicit display name for your run, add tags to your run, and set a default entity (team, username, etc) to be associated with your run. + pub wandb: Wandb, +} + +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct Wandb { + /// The name of the project that the new run will be created under. + pub project: String, + + /// A display name to set for the run. If not set, we will use the Job ID as the name. + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + + /// The entity to use for the run. + /// This allows you to set the team or username of the WandB user that you would like associated with the run. + /// If not set, the default entity for the registered WandB API key is used. + #[serde(skip_serializing_if = "Option::is_none")] + pub entity: Option, + + /// A list of tags to be attached to the newly created run. + /// These tags are passed through directly to WandB. Some default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option>, +} + +/// The fine_tuning.job object represents a fine-tuning job that has been created through the API. +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct FineTuningResponse { + /// The object type, which is always "fine_tuning.job". + pub object: String, + /// The object identifier, which can be referenced in the API endpoints. + pub id: String, + /// The base model that is being fine-tuned. + pub model: String, + /// The Unix timestamp (in seconds) for when the fine-tuning job was created. + pub created_at: u64, + /// The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + pub finished_at: Option, + /// The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + pub fine_tuned_model: Option, + /// The organization that owns the fine-tuning job. + pub organization_id: String, + /// The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + pub result_files: Vec, + /// The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + pub status: String, + /// The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + pub validation_file: Option, + /// The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). + pub training_file: String, + /// The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + pub trained_tokens: Option, + /// The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. + pub hyperparameters: Hyperparameters, + /// A list of integrations to enable for this fine-tuning job. + pub integrations: Option>, + /// The seed used for the fine-tuning job. + pub seed: i32, + /// The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. + pub estimated_finish: Option, +} + +#[derive(Builder, Clone, Debug, Default, Serialize)] +#[builder(name = "ListFineTuningRequestBuilder")] +#[builder(pattern = "mutable")] +#[builder(setter(into, strip_option), default)] +#[builder(derive(Debug))] +#[builder(build_fn(error = "OpenAIError"))] +pub struct ListFineTuningRequest { + /// Identifier for the last job from the previous pagination request. + #[serde(skip_serializing_if = "Option::is_none")] + pub after: Option, + /// Number of fine-tuning jobs to retrieve. + #[serde(skip_serializing_if = "Option::is_none")] + pub limit: Option, // Defaults to 20 +} + +/// Fine-tuning job event object +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct FineTuningEventResponse { + pub object: String, + pub data: Vec, + pub has_more: bool, +} + +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct FineTuningEvent { + object: String, + id: String, + created_at: u64, + level: String, + message: String, + r#type: String, + data: Option, +} + +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct Metrics { + step: f64, + train_loss: f64, + train_mean_token_accuracy: f64, + valid_loss: f64, + valid_mean_token_accuracy: f64, + full_valid_loss: f64, + full_valid_mean_token_accuracy: f64, +} + +/// The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct Checkpoint { + /// The object type, which is always "fine_tuning.job.checkpoint". + object: String, + /// The checkpoint identifier, which can be referenced in the API endpoints. + id: String, + /// The Unix timestamp (in seconds) for when the checkpoint was created. + created_at: u64, + /// The name of the fine-tuned checkpoint model that is created. + fine_tuned_model_checkpoint: String, + /// Metrics at the step number during the fine-tuning job. + metrics: Metrics, + /// The name of the fine-tuning job that this checkpoint was created from. + fine_tuning_job_id: String, + /// The step number that the checkpoint was created at. + step_number: u32, +} + +#[derive(Debug, Deserialize, Clone, Serialize)] +pub struct FineTuningCheckpointResponse { + object: String, + data: Vec, + first_id: String, + last_id: String, + has_more: bool, +} diff --git a/rs_openai/src/interfaces/mod.rs b/rs_openai/src/interfaces/mod.rs index 198efd2..66b8cf2 100644 --- a/rs_openai/src/interfaces/mod.rs +++ b/rs_openai/src/interfaces/mod.rs @@ -7,7 +7,7 @@ pub mod edits; pub mod embeddings; pub mod engines; pub mod files; -pub mod fine_tunes; +pub mod fine_tuning; pub mod images; pub mod models; pub mod moderations;