@@ -434,7 +434,7 @@ def test_chat_streaming_basic(self):
434434 iter ([simple_response ("x" ), simple_response ("y" ), simple_response ("z" )]),
435435 ]
436436
437- model = generative_models .GenerativeModel ("gemini-pro-vision " )
437+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
438438 chat = model .start_chat ()
439439
440440 response = chat .send_message ("letters?" , stream = True )
@@ -457,7 +457,7 @@ def test_chat_incomplete_streaming_errors(self):
457457 iter ([simple_response ("x" ), simple_response ("y" ), simple_response ("z" )]),
458458 ]
459459
460- model = generative_models .GenerativeModel ("gemini-pro-vision " )
460+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
461461 chat = model .start_chat ()
462462 response = chat .send_message ("letters?" , stream = True )
463463
@@ -481,7 +481,7 @@ def test_edit_history(self):
481481 simple_response ("third" ),
482482 ]
483483
484- model = generative_models .GenerativeModel ("gemini-pro-vision " )
484+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
485485 chat = model .start_chat ()
486486
487487 response = chat .send_message ("hello" )
@@ -507,7 +507,7 @@ def test_replace_history(self):
507507 simple_response ("third" ),
508508 ]
509509
510- model = generative_models .GenerativeModel ("gemini-pro-vision " )
510+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
511511 chat = model .start_chat ()
512512 chat .send_message ("hello1" )
513513 chat .send_message ("hello2" )
@@ -529,7 +529,7 @@ def test_copy_history(self):
529529 simple_response ("third" ),
530530 ]
531531
532- model = generative_models .GenerativeModel ("gemini-pro-vision " )
532+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
533533 chat1 = model .start_chat ()
534534 chat1 .send_message ("hello1" )
535535
@@ -574,7 +574,7 @@ def no_throw():
574574 no_throw (),
575575 ]
576576
577- model = generative_models .GenerativeModel ("gemini-pro-vision " )
577+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
578578 chat = model .start_chat ()
579579
580580 # Send a message, the response is okay..
@@ -617,7 +617,7 @@ def test_chat_prompt_blocked(self):
617617 )
618618 ]
619619
620- model = generative_models .GenerativeModel ("gemini-pro-vision " )
620+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
621621 chat = model .start_chat ()
622622
623623 with self .assertRaises (generation_types .BlockedPromptException ):
@@ -635,7 +635,7 @@ def test_chat_candidate_blocked(self):
635635 )
636636 ]
637637
638- model = generative_models .GenerativeModel ("gemini-pro-vision " )
638+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
639639 chat = model .start_chat ()
640640
641641 with self .assertRaises (generation_types .StopCandidateException ):
@@ -657,7 +657,7 @@ def test_chat_streaming_unexpected_stop(self):
657657 )
658658 ]
659659
660- model = generative_models .GenerativeModel ("gemini-pro-vision " )
660+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
661661 chat = model .start_chat ()
662662
663663 response = chat .send_message ("hello" , stream = True )
@@ -681,7 +681,7 @@ def test_tools(self):
681681 dict (name = "datetime" , description = "Returns the current UTC date and time." )
682682 ]
683683 )
684- model = generative_models .GenerativeModel ("gemini-pro-vision " , tools = tools )
684+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , tools = tools )
685685
686686 self .responses ["generate_content" ] = [
687687 simple_response ("a" ),
@@ -840,7 +840,7 @@ def test_system_instruction(self, instruction, expected_instr):
840840 def test_count_tokens_smoke (self , kwargs ):
841841 si = kwargs .pop ("system_instruction" , None )
842842 self .responses ["count_tokens" ] = [protos .CountTokensResponse (total_tokens = 7 )]
843- model = generative_models .GenerativeModel ("gemini-pro-vision " , system_instruction = si )
843+ model = generative_models .GenerativeModel ("gemini-1.5-flash " , system_instruction = si )
844844 response = model .count_tokens (** kwargs )
845845 self .assertEqual (
846846 type (response ).to_dict (response , including_default_value_fields = False ),
@@ -1018,7 +1018,7 @@ def no_throw():
10181018 no_throw (),
10191019 ]
10201020
1021- model = generative_models .GenerativeModel ("gemini-pro-vision " )
1021+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
10221022 chat = model .start_chat ()
10231023
10241024 # Send a message, the response is okay..
@@ -1077,7 +1077,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self):
10771077 )
10781078 ]
10791079
1080- model = generative_models .GenerativeModel ("gemini-pro-vision " )
1080+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
10811081 chat = model .start_chat ()
10821082
10831083 response = chat .send_message ("hello" , stream = True )
@@ -1257,7 +1257,7 @@ def test_count_tokens_called_with_request_options(self):
12571257 self .responses ["count_tokens" ].append (protos .CountTokensResponse (total_tokens = 7 ))
12581258 request_options = {"timeout" : 120 }
12591259
1260- model = generative_models .GenerativeModel ("gemini-pro-vision " )
1260+ model = generative_models .GenerativeModel ("gemini-1.5-flash " )
12611261 model .count_tokens ([{"role" : "user" , "parts" : ["hello" ]}], request_options = request_options )
12621262
12631263 self .assertEqual (request_options , self .observed_kwargs [0 ])
0 commit comments