Skip to content

Commit

Permalink
remove references to pro-vision (#388)
Browse files Browse the repository at this point in the history
Change-Id: I5409ada8470dfda8354beba615ad906778ea13f6
  • Loading branch information
MarkDaoust authored Jun 13, 2024
1 parent 7313e21 commit dbd5498
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 16 deletions.
28 changes: 14 additions & 14 deletions tests/test_generative_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ def test_chat_streaming_basic(self):
iter([simple_response("x"), simple_response("y"), simple_response("z")]),
]

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

response = chat.send_message("letters?", stream=True)
Expand All @@ -457,7 +457,7 @@ def test_chat_incomplete_streaming_errors(self):
iter([simple_response("x"), simple_response("y"), simple_response("z")]),
]

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()
response = chat.send_message("letters?", stream=True)

Expand All @@ -481,7 +481,7 @@ def test_edit_history(self):
simple_response("third"),
]

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

response = chat.send_message("hello")
Expand All @@ -507,7 +507,7 @@ def test_replace_history(self):
simple_response("third"),
]

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()
chat.send_message("hello1")
chat.send_message("hello2")
Expand All @@ -529,7 +529,7 @@ def test_copy_history(self):
simple_response("third"),
]

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat1 = model.start_chat()
chat1.send_message("hello1")

Expand Down Expand Up @@ -574,7 +574,7 @@ def no_throw():
no_throw(),
]

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

# Send a message, the response is okay..
Expand Down Expand Up @@ -617,7 +617,7 @@ def test_chat_prompt_blocked(self):
)
]

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

with self.assertRaises(generation_types.BlockedPromptException):
Expand All @@ -635,7 +635,7 @@ def test_chat_candidate_blocked(self):
)
]

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

with self.assertRaises(generation_types.StopCandidateException):
Expand All @@ -657,7 +657,7 @@ def test_chat_streaming_unexpected_stop(self):
)
]

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

response = chat.send_message("hello", stream=True)
Expand All @@ -681,7 +681,7 @@ def test_tools(self):
dict(name="datetime", description="Returns the current UTC date and time.")
]
)
model = generative_models.GenerativeModel("gemini-pro-vision", tools=tools)
model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools)

self.responses["generate_content"] = [
simple_response("a"),
Expand Down Expand Up @@ -840,7 +840,7 @@ def test_system_instruction(self, instruction, expected_instr):
def test_count_tokens_smoke(self, kwargs):
si = kwargs.pop("system_instruction", None)
self.responses["count_tokens"] = [protos.CountTokensResponse(total_tokens=7)]
model = generative_models.GenerativeModel("gemini-pro-vision", system_instruction=si)
model = generative_models.GenerativeModel("gemini-1.5-flash", system_instruction=si)
response = model.count_tokens(**kwargs)
self.assertEqual(
type(response).to_dict(response, including_default_value_fields=False),
Expand Down Expand Up @@ -1018,7 +1018,7 @@ def no_throw():
no_throw(),
]

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

# Send a message, the response is okay..
Expand Down Expand Up @@ -1077,7 +1077,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self):
)
]

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()

response = chat.send_message("hello", stream=True)
Expand Down Expand Up @@ -1257,7 +1257,7 @@ def test_count_tokens_called_with_request_options(self):
self.responses["count_tokens"].append(protos.CountTokensResponse(total_tokens=7))
request_options = {"timeout": 120}

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
model.count_tokens([{"role": "user", "parts": ["hello"]}], request_options=request_options)

self.assertEqual(request_options, self.observed_kwargs[0])
Expand Down
4 changes: 2 additions & 2 deletions tests/test_generative_models_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ async def test_tool_config(self, tool_config, expected_tool_config):
)
async def test_count_tokens_smoke(self, contents):
self.responses["count_tokens"] = [protos.CountTokensResponse(total_tokens=7)]
model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
response = await model.count_tokens_async(contents)
self.assertEqual(
type(response).to_dict(response, including_default_value_fields=False),
Expand Down Expand Up @@ -256,7 +256,7 @@ async def test_count_tokens_called_with_request_options(self):
request = unittest.mock.ANY
request_options = {"timeout": 120}

model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.5-flash")
response = await model.count_tokens_async(
contents=["Hello?"], request_options=request_options
)
Expand Down

0 comments on commit dbd5498

Please sign in to comment.