Skip to content

Commit

Permalink
feat: Add Gemini PDF Sample & Add Type for GenerationConfig (#11390)
Browse files Browse the repository at this point in the history
* Update gemini samples to use GenerationConfig instead of a dict for type safety.

* Add PDF Sample for Gemini Pro Vision 1.0

* 🦉 Updates from OwlBot post-processor

See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md

* Update sample PDF

* Fix lint error

* Fix test case

* Fix test case again

---------

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
  • Loading branch information
holtskinner and gcf-owl-bot[bot] authored Apr 3, 2024
1 parent b24c0d4 commit 89505e9
Show file tree
Hide file tree
Showing 7 changed files with 72 additions and 12 deletions.
13 changes: 8 additions & 5 deletions generative_ai/embedding_model_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def tune_embedding_model(
train_label_path: str = "gs://embedding-customization-pipeline/dataset/train.tsv",
test_label_path: str = "gs://embedding-customization-pipeline/dataset/test.tsv",
batch_size: int = 50,
iterations: int = 300
iterations: int = 300,
) -> pipeline_jobs.PipelineJob:
match = re.search(r"(.+)(-autopush|-staging)?-aiplatform.+", api_endpoint)
location = match.group(1) if match else "us-central1"
Expand All @@ -50,14 +50,17 @@ def tune_embedding_model(
train_label_path=train_label_path,
test_label_path=test_label_path,
batch_size=batch_size,
iterations=iterations)
iterations=iterations,
),
)
job.submit()
return job


# [END aiplatform_sdk_embedding]
if __name__ == "__main__":
tune_embedding_model(aiplatform_init.global_config.api_endpoint,
aiplatform_init.global_config.project,
aiplatform_init.global_config.staging_bucket)
tune_embedding_model(
aiplatform_init.global_config.api_endpoint,
aiplatform_init.global_config.project,
aiplatform_init.global_config.staging_bucket,
)
9 changes: 6 additions & 3 deletions generative_ai/embedding_model_tuning_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,16 +35,19 @@ def dispose(job: pipeline_jobs.PipelineJob) -> None:

def test_tune_embedding_model() -> None:
credentials, _ = google.auth.default( # Set explicit credentials with Oauth scopes.
scopes=["https://www.googleapis.com/auth/cloud-platform"])
scopes=["https://www.googleapis.com/auth/cloud-platform"]
)
aiplatform.init(
api_endpoint="us-central1-aiplatform.googleapis.com:443",
project=os.getenv("GOOGLE_CLOUD_PROJECT"),
staging_bucket="gs://ucaip-samples-us-central1/training_pipeline_output",
credentials=credentials)
credentials=credentials,
)
job = embedding_model_tuning.tune_embedding_model(
aiplatform_init.global_config.api_endpoint,
aiplatform_init.global_config.project,
aiplatform_init.global_config.staging_bucket)
aiplatform_init.global_config.staging_bucket,
)
try:
assert job.state != "PIPELINE_STATE_FAILED"
finally:
Expand Down
3 changes: 2 additions & 1 deletion generative_ai/function_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from vertexai.generative_models import (
Content,
FunctionDeclaration,
GenerationConfig,
GenerativeModel,
Part,
Tool,
Expand Down Expand Up @@ -57,7 +58,7 @@ def generate_function_call(prompt: str, project_id: str, location: str) -> tuple
# Send the prompt and instruct the model to generate content using the Tool that you just created
response = model.generate_content(
user_prompt_content,
generation_config={"temperature": 0},
generation_config=GenerationConfig(temperature=0),
tools=[weather_tool],
)
response_function_call_content = response.candidates[0].content
Expand Down
41 changes: 41 additions & 0 deletions generative_ai/gemini_pdf_example.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


# [START generativeaionvertexai_gemini_pdf]
import vertexai

from vertexai.generative_models import GenerativeModel, Part


def generate_text(project_id: str, location: str) -> str:
# Initialize Vertex AI
vertexai.init(project=project_id, location=location)
# Load the model
vision_model = GenerativeModel("gemini-1.0-pro-vision")
# Generate text
response = vision_model.generate_content(
[
Part.from_uri(
"gs://cloud-samples-data/vertex-ai/generative-ai/pdf/intake-form.pdf",
mime_type="application/pdf",
),
"Output the text in the PDF",
]
)
print(response)
return response.text


# [END generativeaionvertexai_gemini_pdf]
6 changes: 4 additions & 2 deletions generative_ai/gemini_pro_config_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import base64

import vertexai
from vertexai.generative_models import GenerativeModel, Part
from vertexai.generative_models import GenerationConfig, GenerativeModel, Part


def generate_text(project_id: str, location: str) -> None:
Expand All @@ -33,7 +33,9 @@ def generate_text(project_id: str, location: str) -> None:
)

# Generation Config
config = {"max_output_tokens": 2048, "temperature": 0.4, "top_p": 1, "top_k": 32}
config = GenerationConfig(
max_output_tokens=2048, temperature=0.4, top_p=1, top_k=32
)

# Generate text
response = model.generate_content(
Expand Down
4 changes: 3 additions & 1 deletion generative_ai/gemini_safety_config_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,9 @@ def generate_text(project_id: str, location: str, image: str) -> str:
model = generative_models.GenerativeModel("gemini-1.0-pro-vision")

# Generation config
config = {"max_output_tokens": 2048, "temperature": 0.4, "top_p": 1, "top_k": 32}
config = generative_models.GenerationConfig(
max_output_tokens=2048, temperature=0.4, top_p=1, top_k=32
)

# Safety config
safety_config = {
Expand Down
8 changes: 8 additions & 0 deletions generative_ai/test_gemini_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import gemini_grounding_example
import gemini_guide_example
import gemini_multi_image_example
import gemini_pdf_example
import gemini_pro_basic_example
import gemini_pro_config_example
import gemini_safety_config_example
Expand Down Expand Up @@ -121,6 +122,13 @@ def test_gemini_single_turn_video_example() -> None:
assert any([_ in text for _ in ("zoo", "tiger", "leaf", "water")])


def test_gemini_pdf_example() -> None:
text = gemini_pdf_example.generate_text(PROJECT_ID, LOCATION)
text = text.lower()
assert len(text) > 0
assert any([_ in text for _ in ("intake", "form", "software")])


def test_gemini_chat_example() -> None:
text = gemini_chat_example.chat_text_example(PROJECT_ID, LOCATION)
text = text.lower()
Expand Down

0 comments on commit 89505e9

Please sign in to comment.