Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove references to Pro 1.0 #600

Merged
merged 4 commits into from
Oct 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/api/google/generativeai/ChatSession.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ Contains an ongoing conversation with the model.
<!-- Placeholder for "Used in" -->

```
>>> model = genai.GenerativeModel('models/gemini-pro')
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> chat = model.start_chat()
>>> response = chat.send_message("Hello")
>>> print(response.text)
Expand Down Expand Up @@ -136,7 +136,7 @@ Sends the conversation history with the added message and returns the model's re
Appends the request and response to the conversation history.

```
>>> model = genai.GenerativeModel('models/gemini-pro')
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> chat = model.start_chat()
>>> response = chat.send_message("Hello")
>>> print(response.text)
Expand Down
8 changes: 4 additions & 4 deletions docs/api/google/generativeai/GenerativeModel.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ The `genai.GenerativeModel` class wraps default parameters for calls to <a href=

<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>google.generativeai.GenerativeModel(
model_name: str = &#x27;gemini-pro&#x27;,
model_name: str = &#x27;gemini-1.5-flash&#x27;,
safety_settings: (safety_types.SafetySettingOptions | None) = None,
generation_config: (generation_types.GenerationConfigType | None) = None,
tools: (content_types.FunctionLibraryType | None) = None,
Expand All @@ -51,7 +51,7 @@ requests. What media-types are supported for input and output is model-dependant
>>> import google.generativeai as genai
>>> import PIL.Image
>>> genai.configure(api_key='YOUR_API_KEY')
>>> model = genai.GenerativeModel('models/gemini-pro')
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> result = model.generate_content('Tell me a story about a magic backpack')
>>> result.text
"In the quaint little town of Lakeside, there lived a young girl named Lily..."
Expand All @@ -62,7 +62,7 @@ requests. What media-types are supported for input and output is model-dependant


```
>>> model = genai.GenerativeModel('models/gemini-pro')
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> result = model.generate_content([
... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')])
>>> result.text
Expand Down Expand Up @@ -270,7 +270,7 @@ This <a href="../../google/generativeai/GenerativeModel.md#generate_content"><co
conversations.

```
>>> model = genai.GenerativeModel('models/gemini-pro')
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> response = model.generate_content('Tell me a story about a magic backpack')
>>> response.text
```
Expand Down
2 changes: 1 addition & 1 deletion docs/api/google/generativeai/get_model.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ Calls the API to fetch a model by name.

```
import pprint
model = genai.get_model('models/gemini-pro')
model = genai.get_model('models/gemini-1.5-flash')
pprint.pprint(model)
```

Expand Down
10 changes: 5 additions & 5 deletions google/generativeai/generative_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,14 @@ class GenerativeModel:
>>> import google.generativeai as genai
>>> import PIL.Image
>>> genai.configure(api_key='YOUR_API_KEY')
>>> model = genai.GenerativeModel('models/gemini-pro')
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> result = model.generate_content('Tell me a story about a magic backpack')
>>> result.text
"In the quaint little town of Lakeside, there lived a young girl named Lily..."

Multimodal input:

>>> model = genai.GenerativeModel('models/gemini-pro')
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> result = model.generate_content([
... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')])
>>> result.text
Expand Down Expand Up @@ -250,7 +250,7 @@ def generate_content(
This `GenerativeModel.generate_content` method can handle multimodal input, and multi-turn
conversations.

>>> model = genai.GenerativeModel('models/gemini-pro')
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> response = model.generate_content('Tell me a story about a magic backpack')
>>> response.text

Expand Down Expand Up @@ -481,7 +481,7 @@ def start_chat(
class ChatSession:
"""Contains an ongoing conversation with the model.

>>> model = genai.GenerativeModel('models/gemini-pro')
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> chat = model.start_chat()
>>> response = chat.send_message("Hello")
>>> print(response.text)
Expand Down Expand Up @@ -524,7 +524,7 @@ def send_message(

Appends the request and response to the conversation history.

>>> model = genai.GenerativeModel('models/gemini-pro')
>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> chat = model.start_chat()
>>> response = chat.send_message("Hello")
>>> print(response.text)
Expand Down
4 changes: 2 additions & 2 deletions google/generativeai/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def get_model(

```
import pprint
model = genai.get_model('models/gemini-pro')
model = genai.get_model('models/gemini-1.5-flash')
pprint.pprint(model)
```

Expand Down Expand Up @@ -112,7 +112,7 @@ def get_tuned_model(

```
import pprint
model = genai.get_tuned_model('tunedModels/gemini-1.0-pro-001')
model = genai.get_tuned_model('tunedModels/gemini-1.5-flash')
pprint.pprint(model)
```

Expand Down
2 changes: 1 addition & 1 deletion google/generativeai/notebook/text_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from google.generativeai.types import generation_types
from google.generativeai.notebook.lib import model as model_lib

_DEFAULT_MODEL = "models/gemini-pro"
_DEFAULT_MODEL = "models/gemini-1.5-flash"


class TextModel(model_lib.AbstractModel):
Expand Down
2 changes: 1 addition & 1 deletion samples/rest/tuned_models.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ curl -X POST "https://generativelanguage.googleapis.com/v1beta/tunedModels?key=$
-d '
{
"display_name": "number generator model",
"base_model": "models/gemini-1.0-pro-001",
"base_model": "models/gemini-1.5-flash-001-tuning",
"tuning_task": {
"hyperparameters": {
"batch_size": 2,
Expand Down
50 changes: 27 additions & 23 deletions tests/test_generative_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def setUp(self):

def test_hello(self):
# Generate text from text prompt
model = generative_models.GenerativeModel(model_name="gemini-pro")
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")

self.responses["generate_content"].append(simple_response("world!"))

Expand All @@ -138,7 +138,7 @@ def test_hello(self):
)
def test_image(self, content):
# Generate text from image
model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")

cat = "It's a cat"
self.responses["generate_content"].append(simple_response(cat))
Expand Down Expand Up @@ -172,7 +172,7 @@ def test_image(self, content):
)
def test_generation_config_overwrite(self, config1, config2):
# Generation config
model = generative_models.GenerativeModel("gemini-pro", generation_config=config1)
model = generative_models.GenerativeModel("gemini-1.5-flash", generation_config=config1)

self.responses["generate_content"] = [
simple_response(" world!"),
Expand Down Expand Up @@ -218,7 +218,7 @@ def test_generation_config_overwrite(self, config1, config2):
)
def test_safety_overwrite(self, safe1, safe2):
# Safety
model = generative_models.GenerativeModel("gemini-pro", safety_settings=safe1)
model = generative_models.GenerativeModel("gemini-1.5-flash", safety_settings=safe1)

self.responses["generate_content"] = [
simple_response(" world!"),
Expand Down Expand Up @@ -253,7 +253,7 @@ def test_stream_basic(self):
chunks = ["first", " second", " third"]
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]

model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Hello", stream=True)

self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello")
Expand All @@ -267,7 +267,7 @@ def test_stream_lookahead(self):
chunks = ["first", " second", " third"]
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]

model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Hello", stream=True)

self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello")
Expand All @@ -287,7 +287,7 @@ def test_stream_prompt_feedback_blocked(self):
]
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]

model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Bad stuff!", stream=True)

self.assertEqual(
Expand Down Expand Up @@ -322,7 +322,7 @@ def test_stream_prompt_feedback_not_blocked(self):
]
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]

model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Hello", stream=True)

self.assertEqual(
Expand Down Expand Up @@ -389,7 +389,7 @@ def add(a: int, b: int) -> int:

def test_chat(self):
# Multi turn chat
model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

self.responses["generate_content"] = [
Expand Down Expand Up @@ -423,7 +423,7 @@ def test_chat(self):
def test_chat_roles(self):
self.responses["generate_content"] = [simple_response("hello!")]

model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()
response = chat.send_message("hello?")
history = chat.history
Expand Down Expand Up @@ -792,7 +792,7 @@ def test_tool_config(self, tool_config, expected_tool_config):
)
self.responses["generate_content"] = [simple_response("echo echo")]

model = generative_models.GenerativeModel("gemini-pro", tools=tools)
model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools)
_ = model.generate_content("Hello", tools=[tools], tool_config=tool_config)

req = self.observed_requests[0]
Expand All @@ -811,7 +811,9 @@ def test_tool_config(self, tool_config, expected_tool_config):
)
def test_system_instruction(self, instruction, expected_instr):
self.responses["generate_content"] = [simple_response("echo echo")]
model = generative_models.GenerativeModel("gemini-pro", system_instruction=instruction)
model = generative_models.GenerativeModel(
"gemini-1.5-flash", system_instruction=instruction
)

_ = model.generate_content("test")

Expand Down Expand Up @@ -852,7 +854,7 @@ def test_count_tokens_smoke(self, kwargs):
)

def test_repr_for_unary_non_streamed_response(self):
model = generative_models.GenerativeModel(model_name="gemini-pro")
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
self.responses["generate_content"].append(simple_response("world!"))
response = model.generate_content("Hello")

Expand Down Expand Up @@ -885,7 +887,7 @@ def test_repr_for_streaming_start_to_finish(self):
chunks = ["first", " second", " third"]
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]

model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Hello", stream=True)
iterator = iter(response)

Expand Down Expand Up @@ -980,7 +982,7 @@ def test_repr_error_info_for_stream_prompt_feedback_blocked(self):
]
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]

model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Bad stuff!", stream=True)

result = repr(response)
Expand Down Expand Up @@ -1096,7 +1098,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self):

def test_repr_for_multi_turn_chat(self):
# Multi turn chat
model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

self.responses["generate_content"] = [
Expand All @@ -1119,7 +1121,7 @@ def test_repr_for_multi_turn_chat(self):
"""\
ChatSession(
model=genai.GenerativeModel(
model_name='models/gemini-pro',
model_name='models/gemini-1.5-flash',
generation_config={},
safety_settings={},
tools=None,
Expand All @@ -1133,7 +1135,7 @@ def test_repr_for_multi_turn_chat(self):

def test_repr_for_incomplete_streaming_chat(self):
# Multi turn chat
model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

self.responses["stream_generate_content"] = [
Expand All @@ -1148,7 +1150,7 @@ def test_repr_for_incomplete_streaming_chat(self):
"""\
ChatSession(
model=genai.GenerativeModel(
model_name='models/gemini-pro',
model_name='models/gemini-1.5-flash',
generation_config={},
safety_settings={},
tools=None,
Expand All @@ -1162,7 +1164,7 @@ def test_repr_for_incomplete_streaming_chat(self):

def test_repr_for_broken_streaming_chat(self):
# Multi turn chat
model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

self.responses["stream_generate_content"] = [
Expand Down Expand Up @@ -1193,7 +1195,7 @@ def test_repr_for_broken_streaming_chat(self):
"""\
ChatSession(
model=genai.GenerativeModel(
model_name='models/gemini-pro',
model_name='models/gemini-1.5-flash',
generation_config={},
safety_settings={},
tools=None,
Expand All @@ -1206,7 +1208,9 @@ def test_repr_for_broken_streaming_chat(self):
self.assertEqual(expected, result)

def test_repr_for_system_instruction(self):
model = generative_models.GenerativeModel("gemini-pro", system_instruction="Be excellent.")
model = generative_models.GenerativeModel(
"gemini-1.5-flash", system_instruction="Be excellent."
)
result = repr(model)
self.assertIn("system_instruction='Be excellent.'", result)

Expand Down Expand Up @@ -1237,7 +1241,7 @@ def test_chat_with_request_options(self):
)
request_options = {"timeout": 120}

model = generative_models.GenerativeModel("gemini-pro")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()
chat.send_message("hello", request_options=helper_types.RequestOptions(**request_options))

Expand Down
6 changes: 3 additions & 3 deletions tests/test_generative_models_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ async def count_tokens(

async def test_basic(self):
# Generate text from text prompt
model = generative_models.GenerativeModel(model_name="gemini-pro")
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")

self.responses["generate_content"] = [simple_response("world!")]

Expand All @@ -93,7 +93,7 @@ async def test_basic(self):

async def test_streaming(self):
# Generate text from text prompt
model = generative_models.GenerativeModel(model_name="gemini-pro")
model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")

async def responses():
for c in "world!":
Expand Down Expand Up @@ -195,7 +195,7 @@ async def test_tool_config(self, tool_config, expected_tool_config):
)
self.responses["generate_content"] = [simple_response("echo echo")]

model = generative_models.GenerativeModel("gemini-pro", tools=tools)
model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools)
_ = await model.generate_content_async("Hello", tools=[tools], tool_config=tool_config)

req = self.observed_requests[0]
Expand Down
Loading