From e9b0cdefb66bb4efa8bccef4809b7c8bd7d578b2 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 15 Oct 2024 18:13:20 -0700 Subject: [PATCH] Remove references to Pro 1.0 (#600) * remove references to 1.0-pro Change-Id: I405c87d495c73550cfbd00a13249cb1e30ab0989 * remove references to gemini-pro Change-Id: Ied2f0b7112dd5d61390da3e84457a2fb3f770665 * Update models.py * format Change-Id: Ib3a0c90bfc6ec7f8f793917b3140769e2635a8e9 --- docs/api/google/generativeai/ChatSession.md | 4 +- .../google/generativeai/GenerativeModel.md | 8 +-- docs/api/google/generativeai/get_model.md | 2 +- google/generativeai/generative_models.py | 10 ++-- google/generativeai/models.py | 4 +- google/generativeai/notebook/text_model.py | 2 +- samples/rest/tuned_models.sh | 2 +- tests/test_generative_models.py | 50 ++++++++++--------- tests/test_generative_models_async.py | 6 +-- 9 files changed, 46 insertions(+), 42 deletions(-) diff --git a/docs/api/google/generativeai/ChatSession.md b/docs/api/google/generativeai/ChatSession.md index 3898a2ef1..ac58e2e4b 100644 --- a/docs/api/google/generativeai/ChatSession.md +++ b/docs/api/google/generativeai/ChatSession.md @@ -39,7 +39,7 @@ Contains an ongoing conversation with the model. ``` ->>> model = genai.GenerativeModel('models/gemini-pro') +>>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> chat = model.start_chat() >>> response = chat.send_message("Hello") >>> print(response.text) @@ -136,7 +136,7 @@ Sends the conversation history with the added message and returns the model's re Appends the request and response to the conversation history. ``` ->>> model = genai.GenerativeModel('models/gemini-pro') +>>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> chat = model.start_chat() >>> response = chat.send_message("Hello") >>> print(response.text) diff --git a/docs/api/google/generativeai/GenerativeModel.md b/docs/api/google/generativeai/GenerativeModel.md index 9b9e7ff6f..71f293ebe 100644 --- a/docs/api/google/generativeai/GenerativeModel.md +++ b/docs/api/google/generativeai/GenerativeModel.md @@ -31,7 +31,7 @@ The `genai.GenerativeModel` class wraps default parameters for calls to google.generativeai.GenerativeModel( - model_name: str = 'gemini-pro', + model_name: str = 'gemini-1.5-flash', safety_settings: (safety_types.SafetySettingOptions | None) = None, generation_config: (generation_types.GenerationConfigType | None) = None, tools: (content_types.FunctionLibraryType | None) = None, @@ -51,7 +51,7 @@ requests. What media-types are supported for input and output is model-dependant >>> import google.generativeai as genai >>> import PIL.Image >>> genai.configure(api_key='YOUR_API_KEY') ->>> model = genai.GenerativeModel('models/gemini-pro') +>>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> result = model.generate_content('Tell me a story about a magic backpack') >>> result.text "In the quaint little town of Lakeside, there lived a young girl named Lily..." @@ -62,7 +62,7 @@ requests. What media-types are supported for input and output is model-dependant ``` ->>> model = genai.GenerativeModel('models/gemini-pro') +>>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> result = model.generate_content([ ... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')]) >>> result.text @@ -270,7 +270,7 @@ This >> model = genai.GenerativeModel('models/gemini-pro') +>>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> response = model.generate_content('Tell me a story about a magic backpack') >>> response.text ``` diff --git a/docs/api/google/generativeai/get_model.md b/docs/api/google/generativeai/get_model.md index e488dbfaa..40e5b0e46 100644 --- a/docs/api/google/generativeai/get_model.md +++ b/docs/api/google/generativeai/get_model.md @@ -38,7 +38,7 @@ Calls the API to fetch a model by name. ``` import pprint -model = genai.get_model('models/gemini-pro') +model = genai.get_model('models/gemini-1.5-flash') pprint.pprint(model) ``` diff --git a/google/generativeai/generative_models.py b/google/generativeai/generative_models.py index 134430b2e..8d331a9f6 100644 --- a/google/generativeai/generative_models.py +++ b/google/generativeai/generative_models.py @@ -36,14 +36,14 @@ class GenerativeModel: >>> import google.generativeai as genai >>> import PIL.Image >>> genai.configure(api_key='YOUR_API_KEY') - >>> model = genai.GenerativeModel('models/gemini-pro') + >>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> result = model.generate_content('Tell me a story about a magic backpack') >>> result.text "In the quaint little town of Lakeside, there lived a young girl named Lily..." Multimodal input: - >>> model = genai.GenerativeModel('models/gemini-pro') + >>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> result = model.generate_content([ ... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')]) >>> result.text @@ -250,7 +250,7 @@ def generate_content( This `GenerativeModel.generate_content` method can handle multimodal input, and multi-turn conversations. - >>> model = genai.GenerativeModel('models/gemini-pro') + >>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> response = model.generate_content('Tell me a story about a magic backpack') >>> response.text @@ -481,7 +481,7 @@ def start_chat( class ChatSession: """Contains an ongoing conversation with the model. - >>> model = genai.GenerativeModel('models/gemini-pro') + >>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> chat = model.start_chat() >>> response = chat.send_message("Hello") >>> print(response.text) @@ -524,7 +524,7 @@ def send_message( Appends the request and response to the conversation history. - >>> model = genai.GenerativeModel('models/gemini-pro') + >>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> chat = model.start_chat() >>> response = chat.send_message("Hello") >>> print(response.text) diff --git a/google/generativeai/models.py b/google/generativeai/models.py index 4b95a2470..b23a7ce88 100644 --- a/google/generativeai/models.py +++ b/google/generativeai/models.py @@ -40,7 +40,7 @@ def get_model( ``` import pprint - model = genai.get_model('models/gemini-pro') + model = genai.get_model('models/gemini-1.5-flash') pprint.pprint(model) ``` @@ -112,7 +112,7 @@ def get_tuned_model( ``` import pprint - model = genai.get_tuned_model('tunedModels/gemini-1.0-pro-001') + model = genai.get_tuned_model('tunedModels/gemini-1.5-flash') pprint.pprint(model) ``` diff --git a/google/generativeai/notebook/text_model.py b/google/generativeai/notebook/text_model.py index 38375e348..7360bbfbd 100644 --- a/google/generativeai/notebook/text_model.py +++ b/google/generativeai/notebook/text_model.py @@ -20,7 +20,7 @@ from google.generativeai.types import generation_types from google.generativeai.notebook.lib import model as model_lib -_DEFAULT_MODEL = "models/gemini-pro" +_DEFAULT_MODEL = "models/gemini-1.5-flash" class TextModel(model_lib.AbstractModel): diff --git a/samples/rest/tuned_models.sh b/samples/rest/tuned_models.sh index 9b652febd..5594734f6 100644 --- a/samples/rest/tuned_models.sh +++ b/samples/rest/tuned_models.sh @@ -7,7 +7,7 @@ curl -X POST "https://generativelanguage.googleapis.com/v1beta/tunedModels?key=$ -d ' { "display_name": "number generator model", - "base_model": "models/gemini-1.0-pro-001", + "base_model": "models/gemini-1.5-flash-001-tuning", "tuning_task": { "hyperparameters": { "batch_size": 2, diff --git a/tests/test_generative_models.py b/tests/test_generative_models.py index fa69099ba..74469e5b8 100644 --- a/tests/test_generative_models.py +++ b/tests/test_generative_models.py @@ -115,7 +115,7 @@ def setUp(self): def test_hello(self): # Generate text from text prompt - model = generative_models.GenerativeModel(model_name="gemini-pro") + model = generative_models.GenerativeModel(model_name="gemini-1.5-flash") self.responses["generate_content"].append(simple_response("world!")) @@ -138,7 +138,7 @@ def test_hello(self): ) def test_image(self, content): # Generate text from image - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") cat = "It's a cat" self.responses["generate_content"].append(simple_response(cat)) @@ -172,7 +172,7 @@ def test_image(self, content): ) def test_generation_config_overwrite(self, config1, config2): # Generation config - model = generative_models.GenerativeModel("gemini-pro", generation_config=config1) + model = generative_models.GenerativeModel("gemini-1.5-flash", generation_config=config1) self.responses["generate_content"] = [ simple_response(" world!"), @@ -218,7 +218,7 @@ def test_generation_config_overwrite(self, config1, config2): ) def test_safety_overwrite(self, safe1, safe2): # Safety - model = generative_models.GenerativeModel("gemini-pro", safety_settings=safe1) + model = generative_models.GenerativeModel("gemini-1.5-flash", safety_settings=safe1) self.responses["generate_content"] = [ simple_response(" world!"), @@ -253,7 +253,7 @@ def test_stream_basic(self): chunks = ["first", " second", " third"] self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Hello", stream=True) self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello") @@ -267,7 +267,7 @@ def test_stream_lookahead(self): chunks = ["first", " second", " third"] self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Hello", stream=True) self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello") @@ -287,7 +287,7 @@ def test_stream_prompt_feedback_blocked(self): ] self.responses["stream_generate_content"] = [(chunk for chunk in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Bad stuff!", stream=True) self.assertEqual( @@ -322,7 +322,7 @@ def test_stream_prompt_feedback_not_blocked(self): ] self.responses["stream_generate_content"] = [(chunk for chunk in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Hello", stream=True) self.assertEqual( @@ -389,7 +389,7 @@ def add(a: int, b: int) -> int: def test_chat(self): # Multi turn chat - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() self.responses["generate_content"] = [ @@ -423,7 +423,7 @@ def test_chat(self): def test_chat_roles(self): self.responses["generate_content"] = [simple_response("hello!")] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() response = chat.send_message("hello?") history = chat.history @@ -792,7 +792,7 @@ def test_tool_config(self, tool_config, expected_tool_config): ) self.responses["generate_content"] = [simple_response("echo echo")] - model = generative_models.GenerativeModel("gemini-pro", tools=tools) + model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools) _ = model.generate_content("Hello", tools=[tools], tool_config=tool_config) req = self.observed_requests[0] @@ -811,7 +811,9 @@ def test_tool_config(self, tool_config, expected_tool_config): ) def test_system_instruction(self, instruction, expected_instr): self.responses["generate_content"] = [simple_response("echo echo")] - model = generative_models.GenerativeModel("gemini-pro", system_instruction=instruction) + model = generative_models.GenerativeModel( + "gemini-1.5-flash", system_instruction=instruction + ) _ = model.generate_content("test") @@ -852,7 +854,7 @@ def test_count_tokens_smoke(self, kwargs): ) def test_repr_for_unary_non_streamed_response(self): - model = generative_models.GenerativeModel(model_name="gemini-pro") + model = generative_models.GenerativeModel(model_name="gemini-1.5-flash") self.responses["generate_content"].append(simple_response("world!")) response = model.generate_content("Hello") @@ -885,7 +887,7 @@ def test_repr_for_streaming_start_to_finish(self): chunks = ["first", " second", " third"] self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Hello", stream=True) iterator = iter(response) @@ -980,7 +982,7 @@ def test_repr_error_info_for_stream_prompt_feedback_blocked(self): ] self.responses["stream_generate_content"] = [(chunk for chunk in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Bad stuff!", stream=True) result = repr(response) @@ -1096,7 +1098,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self): def test_repr_for_multi_turn_chat(self): # Multi turn chat - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() self.responses["generate_content"] = [ @@ -1119,7 +1121,7 @@ def test_repr_for_multi_turn_chat(self): """\ ChatSession( model=genai.GenerativeModel( - model_name='models/gemini-pro', + model_name='models/gemini-1.5-flash', generation_config={}, safety_settings={}, tools=None, @@ -1133,7 +1135,7 @@ def test_repr_for_multi_turn_chat(self): def test_repr_for_incomplete_streaming_chat(self): # Multi turn chat - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() self.responses["stream_generate_content"] = [ @@ -1148,7 +1150,7 @@ def test_repr_for_incomplete_streaming_chat(self): """\ ChatSession( model=genai.GenerativeModel( - model_name='models/gemini-pro', + model_name='models/gemini-1.5-flash', generation_config={}, safety_settings={}, tools=None, @@ -1162,7 +1164,7 @@ def test_repr_for_incomplete_streaming_chat(self): def test_repr_for_broken_streaming_chat(self): # Multi turn chat - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() self.responses["stream_generate_content"] = [ @@ -1193,7 +1195,7 @@ def test_repr_for_broken_streaming_chat(self): """\ ChatSession( model=genai.GenerativeModel( - model_name='models/gemini-pro', + model_name='models/gemini-1.5-flash', generation_config={}, safety_settings={}, tools=None, @@ -1206,7 +1208,9 @@ def test_repr_for_broken_streaming_chat(self): self.assertEqual(expected, result) def test_repr_for_system_instruction(self): - model = generative_models.GenerativeModel("gemini-pro", system_instruction="Be excellent.") + model = generative_models.GenerativeModel( + "gemini-1.5-flash", system_instruction="Be excellent." + ) result = repr(model) self.assertIn("system_instruction='Be excellent.'", result) @@ -1237,7 +1241,7 @@ def test_chat_with_request_options(self): ) request_options = {"timeout": 120} - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() chat.send_message("hello", request_options=helper_types.RequestOptions(**request_options)) diff --git a/tests/test_generative_models_async.py b/tests/test_generative_models_async.py index dd9bc3b62..b37c65235 100644 --- a/tests/test_generative_models_async.py +++ b/tests/test_generative_models_async.py @@ -80,7 +80,7 @@ async def count_tokens( async def test_basic(self): # Generate text from text prompt - model = generative_models.GenerativeModel(model_name="gemini-pro") + model = generative_models.GenerativeModel(model_name="gemini-1.5-flash") self.responses["generate_content"] = [simple_response("world!")] @@ -93,7 +93,7 @@ async def test_basic(self): async def test_streaming(self): # Generate text from text prompt - model = generative_models.GenerativeModel(model_name="gemini-pro") + model = generative_models.GenerativeModel(model_name="gemini-1.5-flash") async def responses(): for c in "world!": @@ -195,7 +195,7 @@ async def test_tool_config(self, tool_config, expected_tool_config): ) self.responses["generate_content"] = [simple_response("echo echo")] - model = generative_models.GenerativeModel("gemini-pro", tools=tools) + model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools) _ = await model.generate_content_async("Hello", tools=[tools], tool_config=tool_config) req = self.observed_requests[0]