Skip to content

Commit

Permalink
feat: update moderations apis
Browse files Browse the repository at this point in the history
  • Loading branch information
YanceyOfficial committed Oct 23, 2024
1 parent ce44e39 commit c59323e
Show file tree
Hide file tree
Showing 2 changed files with 153 additions and 42 deletions.
6 changes: 2 additions & 4 deletions rs_openai/src/apis/moderations.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
//! Given a input text, outputs if the model classifies it as violating OpenAI's content policy.
//!
//! Related guide: [Moderations](https://platform.openai.com/docs/guides/moderation)
//! Given a input text, outputs if the model classifies it as violating OpenAI's content policy. Related guide: [Moderations](https://platform.openai.com/docs/guides/moderation)
use crate::client::OpenAI;
use crate::interfaces::moderations;
Expand All @@ -15,7 +13,7 @@ impl<'a> Moderations<'a> {
Self { openai }
}

/// Classifies if text violates OpenAI's Content Policy.
/// Classifies if text and/or image inputs are potentially harmful. Learn more in the [moderation guide](https://platform.openai.com/docs/guides/moderation).
pub async fn create(
&self,
req: &moderations::CreateModerationRequest,
Expand Down
189 changes: 151 additions & 38 deletions rs_openai/src/interfaces/moderations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,82 +2,195 @@ use crate::shared::response_wrapper::OpenAIError;
use derive_builder::Builder;
use serde::{Deserialize, Serialize};

#[derive(Debug, Serialize, Clone)]
#[serde(untagged)]
pub enum ModerationInput {
String(String),
ArrayOfString(Vec<String>),
}

#[derive(Debug, Serialize, Default, Clone)]
pub enum ModerationModel {
#[default]
#[serde(rename = "text-moderation-latest")]
Latest,
#[serde(rename = "text-moderation-stable")]
Stable,
}

#[derive(Builder, Clone, Debug, Default, Serialize)]
#[builder(name = "CreateModerationRequestBuilder")]
#[builder(pattern = "mutable")]
#[builder(setter(into, strip_option), default)]
#[builder(derive(Debug))]
#[builder(build_fn(error = "OpenAIError"))]
pub struct CreateModerationRequest {
/// The input text to classify.
/// Input (or inputs) to classify. Can be a single string, an array of strings, or an array of multi-modal input objects similar to other models.
pub input: ModerationInput,

/// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.
///
/// The default is `text-moderation-latest` which will be automatically upgraded over time.
/// This ensures you are always using our most accurate model.
/// If you use `text-moderation-stable`, we will provide advanced notice before updating the model.
/// Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
/// The content moderation model you would like to use. Learn more in [the moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about available models [here](https://platform.openai.com/docs/models/moderation).
#[serde(skip_serializing_if = "Option::is_none")]
pub model: Option<ModerationModel>, // default: "text-moderation-latest"
pub model: Option<String>, // default: "omni-moderation-latest"
}

#[derive(Debug, Serialize, Clone)]
#[serde(untagged)]
pub enum ModerationInput {
/// A string of text to classify for moderation.
String(String),
/// An array of strings to classify for moderation.
ArrayOfString(Vec<String>),
/// An array of multi-modal inputs to the moderation model.
ArrayOfMultiModalInput(Vec<ArrayOfMultiModalInput>),
}

#[derive(Debug, Serialize, Clone)]
pub enum ArrayOfMultiModalInput {
/// An object describing an image to classify.
Image(Image),
/// An object describing text to classify.
Text(Text),
}

#[derive(Debug, Serialize, Clone)]
pub struct Image {
/// Always `image_url`.
pub r#type: String,
/// Contains either an image URL or a data URL for a base64 encoded image.
pub image_url: ImageUrl,
}

#[derive(Debug, Serialize, Clone)]
pub struct ImageUrl {
/// Either a URL of the image or the base64 encoded image data.
pub url: String,
}

#[derive(Debug, Serialize, Clone)]
pub struct Text {
/// Always `text`.
pub r#type: String,
/// A string of text to classify.
pub text: String,
}

/// Represents if a given text input is potentially harmful.
#[derive(Debug, Deserialize, Clone, Serialize)]
pub struct ModerationResponse {
/// The unique identifier for the moderation request.
pub id: String,
/// The unique identifier for the moderation request.
pub model: String,
/// A list of moderation objects.
pub results: Vec<ModerationCategory>,
}

#[derive(Debug, Deserialize, Clone, Serialize)]
pub struct ModerationCategory {
pub categories: ModerationCategories,
pub category_scores: ModerationCategoryScores,
/// Whether any of the below categories are flagged.
pub flagged: bool,
/// A list of the categories, and whether they are flagged or not.
pub categories: Categories,
/// A list of the categories along with their scores as predicted by model.
pub category_scores: CategoryScores,
/// A list of the categories along with the input type(s) that the score applies to.
pub category_applied_input_types: CategoryAppliedInputTypes,
}

#[derive(Debug, Deserialize, Clone, Serialize)]
pub struct ModerationCategories {
pub struct Categories {
/// Content that expresses, incites, or promotes harassing language towards any target.
pub harassment: bool,
/// Harassment content that also includes violence or serious harm towards any target.
#[serde(rename = "harassment/threatening")]
pub harassment_threatening: bool,
/// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
pub sexual: bool,
/// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment.
pub hate: bool,
pub violence: bool,
#[serde(rename = "hate/threatening")]
/// Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
pub hate_threatening: bool,
/// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category.
pub illicit: bool,
/// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or instruction on the procurement of any weapon.
#[serde(rename = "illicit/violent")]
pub illicit_violent: bool,
/// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
#[serde(rename = "self-harm/intent")]
pub self_harm_intent: bool,
/// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
#[serde(rename = "self-harm/instructions")]
pub self_harm_instructions: bool,
/// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
#[serde(rename = "self-harm")]
pub self_harm: bool,
/// Sexual content that includes an individual who is under 18 years old.
#[serde(rename = "sexual/minors")]
pub sexual_minors: bool,
#[serde(rename = "hate/threatening")]
pub hate_threatening: bool,
/// Content that depicts death, violence, or physical injury.
pub violence: bool,
/// Content that depicts death, violence, or physical injury in graphic detail.
#[serde(rename = "violence/graphic")]
pub violence_graphic: bool,
}

#[derive(Debug, Deserialize, Clone, Serialize)]
pub struct ModerationCategoryScores {
pub sexual: f32,
pub hate: f32,
pub violence: f32,
pub struct CategoryScores {
/// The score for the category 'harassment'.
pub harassment: f64,
/// The score for the category 'harassment/threatening'.
#[serde(rename = "harassment/threatening")]
pub harassment_threatening: f64,
/// The score for the category 'sexual'.
pub sexual: f64,
/// The score for the category 'hate'.
pub hate: f64,
/// The score for the category 'hate/threatening'.
#[serde(rename = "hate/threatening")]
pub hate_threatening: f64,
/// The score for the category 'illicit'.
pub illicit: f64,
#[serde(rename = "illicit/violent")]
/// The score for the category 'illicit/violent'.
pub illicit_violent: f64,
/// The score for the category 'self-harm/intent'.
#[serde(rename = "self-harm/intent")]
pub self_harm_intent: f64,
/// The score for the category 'self-harm/instructions'.
#[serde(rename = "self-harm/instructions")]
pub self_harm_instructions: f64,
/// The score for the category 'self-harm'.
#[serde(rename = "self-harm")]
pub self_harm: f32,
pub self_harm: f64,
/// The score for the category 'sexual/minors'.
#[serde(rename = "sexual/minors")]
pub sexual_minors: f32,
pub sexual_minors: f64,
/// The score for the category 'violence'.
pub violence: f64,
/// The score for the category 'violence/graphic'.
#[serde(rename = "violence/graphic")]
pub violence_graphic: f64,
}

#[derive(Debug, Deserialize, Clone, Serialize)]
pub struct CategoryAppliedInputTypes {
/// The applied input type(s) for the category 'harassment'.
pub harassment: Vec<String>,
/// The applied input type(s) for the category 'harassment/threatening'.
#[serde(rename = "harassment/threatening")]
pub harassment_threatening: Vec<String>,
/// The applied input type(s) for the category 'sexual'.
pub sexual: Vec<String>,
// The applied input type(s) for the category 'hate'.
pub hate: Vec<String>,
/// The applied input type(s) for the category 'hate/threatening'.
#[serde(rename = "hate/threatening")]
pub hate_threatening: f32,
pub hate_threatening: Vec<String>,
/// The applied input type(s) for the category 'illicit'.
pub illicit: Vec<String>,
#[serde(rename = "illicit/violent")]
/// The applied input type(s) for the category 'illicit/violent'.
pub illicit_violent: Vec<String>,
/// The applied input type(s) for the category 'self-harm/intent'.
#[serde(rename = "self-harm/intent")]
pub self_harm_intent: Vec<String>,
/// The applied input type(s) for the category 'self-harm/instructions'.
#[serde(rename = "self-harm/instructions")]
pub self_harm_instructions: Vec<String>,
/// The applied input type(s) for the category 'self-harm'.
#[serde(rename = "self-harm")]
pub self_harm: Vec<String>,
/// The applied input type(s) for the category 'sexual/minors'.
#[serde(rename = "sexual/minors")]
pub sexual_minors: Vec<String>,
/// The applied input type(s) for the category 'violence'.
pub violence: Vec<String>,
/// The applied input type(s) for the category 'violence/graphic'.
#[serde(rename = "violence/graphic")]
pub violence_graphic: f32,
pub violence_graphic: Vec<String>,
}

0 comments on commit c59323e

Please sign in to comment.