@@ -434,7 +434,7 @@ def test_chat_streaming_basic(self):
434
434
iter ([simple_response ("x" ), simple_response ("y" ), simple_response ("z" )]),
435
435
]
436
436
437
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
437
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
438
438
chat = model .start_chat ()
439
439
440
440
response = chat .send_message ("letters?" , stream = True )
@@ -457,7 +457,7 @@ def test_chat_incomplete_streaming_errors(self):
457
457
iter ([simple_response ("x" ), simple_response ("y" ), simple_response ("z" )]),
458
458
]
459
459
460
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
460
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
461
461
chat = model .start_chat ()
462
462
response = chat .send_message ("letters?" , stream = True )
463
463
@@ -481,7 +481,7 @@ def test_edit_history(self):
481
481
simple_response ("third" ),
482
482
]
483
483
484
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
484
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
485
485
chat = model .start_chat ()
486
486
487
487
response = chat .send_message ("hello" )
@@ -507,7 +507,7 @@ def test_replace_history(self):
507
507
simple_response ("third" ),
508
508
]
509
509
510
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
510
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
511
511
chat = model .start_chat ()
512
512
chat .send_message ("hello1" )
513
513
chat .send_message ("hello2" )
@@ -529,7 +529,7 @@ def test_copy_history(self):
529
529
simple_response ("third" ),
530
530
]
531
531
532
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
532
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
533
533
chat1 = model .start_chat ()
534
534
chat1 .send_message ("hello1" )
535
535
@@ -574,7 +574,7 @@ def no_throw():
574
574
no_throw (),
575
575
]
576
576
577
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
577
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
578
578
chat = model .start_chat ()
579
579
580
580
# Send a message, the response is okay..
@@ -617,7 +617,7 @@ def test_chat_prompt_blocked(self):
617
617
)
618
618
]
619
619
620
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
620
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
621
621
chat = model .start_chat ()
622
622
623
623
with self .assertRaises (generation_types .BlockedPromptException ):
@@ -635,7 +635,7 @@ def test_chat_candidate_blocked(self):
635
635
)
636
636
]
637
637
638
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
638
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
639
639
chat = model .start_chat ()
640
640
641
641
with self .assertRaises (generation_types .StopCandidateException ):
@@ -657,7 +657,7 @@ def test_chat_streaming_unexpected_stop(self):
657
657
)
658
658
]
659
659
660
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
660
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
661
661
chat = model .start_chat ()
662
662
663
663
response = chat .send_message ("hello" , stream = True )
@@ -681,7 +681,7 @@ def test_tools(self):
681
681
dict (name = "datetime" , description = "Returns the current UTC date and time." )
682
682
]
683
683
)
684
- model = generative_models .GenerativeModel ("gemini-pro-vision " , tools = tools )
684
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , tools = tools )
685
685
686
686
self .responses ["generate_content" ] = [
687
687
simple_response ("a" ),
@@ -840,7 +840,7 @@ def test_system_instruction(self, instruction, expected_instr):
840
840
def test_count_tokens_smoke (self , kwargs ):
841
841
si = kwargs .pop ("system_instruction" , None )
842
842
self .responses ["count_tokens" ] = [protos .CountTokensResponse (total_tokens = 7 )]
843
- model = generative_models .GenerativeModel ("gemini-pro-vision " , system_instruction = si )
843
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , system_instruction = si )
844
844
response = model .count_tokens (** kwargs )
845
845
self .assertEqual (
846
846
type (response ).to_dict (response , including_default_value_fields = False ),
@@ -1018,7 +1018,7 @@ def no_throw():
1018
1018
no_throw (),
1019
1019
]
1020
1020
1021
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
1021
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
1022
1022
chat = model .start_chat ()
1023
1023
1024
1024
# Send a message, the response is okay..
@@ -1077,7 +1077,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self):
1077
1077
)
1078
1078
]
1079
1079
1080
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
1080
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
1081
1081
chat = model .start_chat ()
1082
1082
1083
1083
response = chat .send_message ("hello" , stream = True )
@@ -1257,7 +1257,7 @@ def test_count_tokens_called_with_request_options(self):
1257
1257
self .responses ["count_tokens" ].append (protos .CountTokensResponse (total_tokens = 7 ))
1258
1258
request_options = {"timeout" : 120 }
1259
1259
1260
- model = generative_models .GenerativeModel ("gemini-pro-vision " )
1260
+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
1261
1261
model .count_tokens ([{"role" : "user" , "parts" : ["hello" ]}], request_options = request_options )
1262
1262
1263
1263
self .assertEqual (request_options , self .observed_kwargs [0 ])
0 commit comments