Skip to content

Commit

Permalink
Merge pull request #191 from open-webui/gemini-permissive-safety
Browse files Browse the repository at this point in the history
Add Gemini Manifold Pipeline Safety Settings
  • Loading branch information
justinh-rahb authored Aug 1, 2024
2 parents c76d24b + 7b784bc commit 3425667
Showing 1 changed file with 16 additions and 4 deletions.
20 changes: 16 additions & 4 deletions examples/pipelines/providers/google_manifold_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
title: Google GenAI Manifold Pipeline
author: Marc Lopez (refactor by justinh-rahb)
date: 2024-06-06
version: 1.1
version: 1.2
license: MIT
description: A pipeline for generating text using Google's GenAI models in Open-WebUI.
requirements: google-generativeai
Expand All @@ -12,7 +12,7 @@
from typing import List, Union, Iterator
import os

from pydantic import BaseModel
from pydantic import BaseModel, Field

import google.generativeai as genai
from google.generativeai.types import GenerationConfig
Expand All @@ -25,13 +25,17 @@ class Valves(BaseModel):
"""Options to change from the WebUI"""

GOOGLE_API_KEY: str = ""
USE_PERMISSIVE_SAFETY: bool = Field(default=False)

def __init__(self):
self.type = "manifold"
self.id = "google_genai"
self.name = "Google: "

self.valves = self.Valves(**{"GOOGLE_API_KEY": os.getenv("GOOGLE_API_KEY", "")})
self.valves = self.Valves(**{
"GOOGLE_API_KEY": os.getenv("GOOGLE_API_KEY", ""),
"USE_PERMISSIVE_SAFETY": False
})
self.pipelines = []

genai.configure(api_key=self.valves.GOOGLE_API_KEY)
Expand Down Expand Up @@ -135,7 +139,15 @@ def pipe(
stop_sequences=body.get("stop", []),
)

safety_settings = body.get("safety_settings")
if self.valves.USE_PERMISSIVE_SAFETY:
safety_settings = {
genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
}
else:
safety_settings = body.get("safety_settings")

response = model.generate_content(
contents,
Expand Down

0 comments on commit 3425667

Please sign in to comment.