Skip to content

Commit dd057ee

Browse files
jscudcopybara-github
authored andcommitted
chore: Update docs samples to use 2.5-flash.
PiperOrigin-RevId: 816831714
1 parent 83d7973 commit dd057ee

1 file changed

Lines changed: 39 additions & 37 deletions

File tree

README.md

Lines changed: 39 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ See the 'Create a client' section above to initialize a client.
259259

260260
```python
261261
response = client.models.generate_content(
262-
model='gemini-2.0-flash-001', contents='Why is the sky blue?'
262+
model='gemini-2.5-flash', contents='Why is the sky blue?'
263263
)
264264
print(response.text)
265265
```
@@ -276,7 +276,7 @@ python code.
276276
```python
277277
file = client.files.upload(file='a11.txt')
278278
response = client.models.generate_content(
279-
model='gemini-2.0-flash-001',
279+
model='gemini-2.5-flash',
280280
contents=['Could you summarize this file?', file]
281281
)
282282
print(response.text)
@@ -580,7 +580,7 @@ print(async_pager[0])
580580
from google.genai import types
581581

582582
response = client.models.generate_content(
583-
model='gemini-2.0-flash-001',
583+
model='gemini-2.5-flash',
584584
contents='Say something bad.',
585585
config=types.GenerateContentConfig(
586586
safety_settings=[
@@ -614,7 +614,7 @@ def get_current_weather(location: str) -> str:
614614

615615

616616
response = client.models.generate_content(
617-
model='gemini-2.0-flash-001',
617+
model='gemini-2.5-flash',
618618
contents='What is the weather like in Boston?',
619619
config=types.GenerateContentConfig(tools=[get_current_weather]),
620620
)
@@ -630,7 +630,7 @@ as follows:
630630
from google.genai import types
631631

632632
response = client.models.generate_content(
633-
model='gemini-2.0-flash-001',
633+
model='gemini-2.5-flash',
634634
contents='What is the weather like in Boston?',
635635
config=types.GenerateContentConfig(
636636
tools=[get_current_weather],
@@ -677,7 +677,7 @@ function = types.FunctionDeclaration(
677677
tool = types.Tool(function_declarations=[function])
678678

679679
response = client.models.generate_content(
680-
model='gemini-2.0-flash-001',
680+
model='gemini-2.5-flash',
681681
contents='What is the weather like in Boston?',
682682
config=types.GenerateContentConfig(tools=[tool]),
683683
)
@@ -721,7 +721,7 @@ function_response_content = types.Content(
721721
)
722722

723723
response = client.models.generate_content(
724-
model='gemini-2.0-flash-001',
724+
model='gemini-2.5-flash',
725725
contents=[
726726
user_prompt_content,
727727
function_call_content,
@@ -756,7 +756,7 @@ def get_current_weather(location: str) -> str:
756756
return "sunny"
757757

758758
response = client.models.generate_content(
759-
model="gemini-2.0-flash-001",
759+
model="gemini-2.5-flash",
760760
contents="What is the weather like in Boston?",
761761
config=types.GenerateContentConfig(
762762
tools=[get_current_weather],
@@ -786,7 +786,7 @@ def get_current_weather(location: str) -> str:
786786
return "sunny"
787787

788788
response = client.models.generate_content(
789-
model="gemini-2.0-flash-001",
789+
model="gemini-2.5-flash",
790790
contents="What is the weather like in Boston?",
791791
config=types.GenerateContentConfig(
792792
tools=[get_current_weather],
@@ -876,7 +876,7 @@ user_profile = {
876876
}
877877

878878
response = client.models.generate_content(
879-
model='gemini-2.0-flash',
879+
model='gemini-2.5-flash',
880880
contents='Give me a random user profile.',
881881
config={
882882
'response_mime_type': 'application/json',
@@ -906,7 +906,7 @@ class CountryInfo(BaseModel):
906906

907907

908908
response = client.models.generate_content(
909-
model='gemini-2.0-flash-001',
909+
model='gemini-2.5-flash',
910910
contents='Give me information for the United States.',
911911
config=types.GenerateContentConfig(
912912
response_mime_type='application/json',
@@ -920,7 +920,7 @@ print(response.text)
920920
from google.genai import types
921921

922922
response = client.models.generate_content(
923-
model='gemini-2.0-flash-001',
923+
model='gemini-2.5-flash',
924924
contents='Give me information for the United States.',
925925
config=types.GenerateContentConfig(
926926
response_mime_type='application/json',
@@ -958,6 +958,8 @@ You can set response_mime_type to 'text/x.enum' to return one of those enum
958958
values as the response.
959959

960960
```python
961+
from enum import Enum
962+
961963
class InstrumentEnum(Enum):
962964
PERCUSSION = 'Percussion'
963965
STRING = 'String'
@@ -966,7 +968,7 @@ class InstrumentEnum(Enum):
966968
KEYBOARD = 'Keyboard'
967969

968970
response = client.models.generate_content(
969-
model='gemini-2.0-flash-001',
971+
model='gemini-2.5-flash',
970972
contents='What instrument plays multiple notes at once?',
971973
config={
972974
'response_mime_type': 'text/x.enum',
@@ -992,7 +994,7 @@ class InstrumentEnum(Enum):
992994
KEYBOARD = 'Keyboard'
993995

994996
response = client.models.generate_content(
995-
model='gemini-2.0-flash-001',
997+
model='gemini-2.5-flash',
996998
contents='What instrument plays multiple notes at once?',
997999
config={
9981000
'response_mime_type': 'application/json',
@@ -1011,7 +1013,7 @@ to you, rather than being returned as one chunk.
10111013

10121014
```python
10131015
for chunk in client.models.generate_content_stream(
1014-
model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1016+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
10151017
):
10161018
print(chunk.text, end='')
10171019
```
@@ -1025,7 +1027,7 @@ you can use the `from_uri` class method to create a `Part` object.
10251027
from google.genai import types
10261028

10271029
for chunk in client.models.generate_content_stream(
1028-
model='gemini-2.0-flash-001',
1030+
model='gemini-2.5-flash',
10291031
contents=[
10301032
'What is this image about?',
10311033
types.Part.from_uri(
@@ -1049,7 +1051,7 @@ with open(YOUR_IMAGE_PATH, 'rb') as f:
10491051
image_bytes = f.read()
10501052

10511053
for chunk in client.models.generate_content_stream(
1052-
model='gemini-2.0-flash-001',
1054+
model='gemini-2.5-flash',
10531055
contents=[
10541056
'What is this image about?',
10551057
types.Part.from_bytes(data=image_bytes, mime_type=YOUR_IMAGE_MIME_TYPE),
@@ -1068,7 +1070,7 @@ of `client.models.generate_content`
10681070

10691071
```python
10701072
response = await client.aio.models.generate_content(
1071-
model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1073+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
10721074
)
10731075

10741076
print(response.text)
@@ -1079,7 +1081,7 @@ print(response.text)
10791081

10801082
```python
10811083
async for chunk in await client.aio.models.generate_content_stream(
1082-
model='gemini-2.0-flash-001', contents='Tell me a story in 300 words.'
1084+
model='gemini-2.5-flash', contents='Tell me a story in 300 words.'
10831085
):
10841086
print(chunk.text, end='')
10851087
```
@@ -1088,7 +1090,7 @@ async for chunk in await client.aio.models.generate_content_stream(
10881090

10891091
```python
10901092
response = client.models.count_tokens(
1091-
model='gemini-2.0-flash-001',
1093+
model='gemini-2.5-flash',
10921094
contents='why is the sky blue?',
10931095
)
10941096
print(response)
@@ -1100,7 +1102,7 @@ Compute tokens is only supported in Vertex AI.
11001102

11011103
```python
11021104
response = client.models.compute_tokens(
1103-
model='gemini-2.0-flash-001',
1105+
model='gemini-2.5-flash',
11041106
contents='why is the sky blue?',
11051107
)
11061108
print(response)
@@ -1110,7 +1112,7 @@ print(response)
11101112

11111113
```python
11121114
response = await client.aio.models.count_tokens(
1113-
model='gemini-2.0-flash-001',
1115+
model='gemini-2.5-flash',
11141116
contents='why is the sky blue?',
11151117
)
11161118
print(response)
@@ -1119,14 +1121,14 @@ print(response)
11191121
#### Local Count Tokens
11201122

11211123
```python
1122-
tokenizer = genai.LocalTokenizer(model_name='gemini-2.0-flash-001')
1124+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
11231125
result = tokenizer.count_tokens("What is your name?")
11241126
```
11251127

11261128
#### Local Compute Tokens
11271129

11281130
```python
1129-
tokenizer = genai.LocalTokenizer(model_name='gemini-2.0-flash-001')
1131+
tokenizer = genai.LocalTokenizer(model_name='gemini-2.5-flash')
11301132
result = tokenizer.compute_tokens("What is your name?")
11311133
```
11321134

@@ -1339,7 +1341,7 @@ that it can reflect on its previous responses (i.e., engage in an ongoing
13391341
### Send Message (Synchronous Non-Streaming)
13401342

13411343
```python
1342-
chat = client.chats.create(model='gemini-2.0-flash-001')
1344+
chat = client.chats.create(model='gemini-2.5-flash')
13431345
response = chat.send_message('tell me a story')
13441346
print(response.text)
13451347
response = chat.send_message('summarize the story you told me in 1 sentence')
@@ -1349,23 +1351,23 @@ print(response.text)
13491351
### Send Message (Synchronous Streaming)
13501352

13511353
```python
1352-
chat = client.chats.create(model='gemini-2.0-flash-001')
1354+
chat = client.chats.create(model='gemini-2.5-flash')
13531355
for chunk in chat.send_message_stream('tell me a story'):
13541356
print(chunk.text)
13551357
```
13561358

13571359
### Send Message (Asynchronous Non-Streaming)
13581360

13591361
```python
1360-
chat = client.aio.chats.create(model='gemini-2.0-flash-001')
1362+
chat = client.aio.chats.create(model='gemini-2.5-flash')
13611363
response = await chat.send_message('tell me a story')
13621364
print(response.text)
13631365
```
13641366

13651367
### Send Message (Asynchronous Streaming)
13661368

13671369
```python
1368-
chat = client.aio.chats.create(model='gemini-2.0-flash-001')
1370+
chat = client.aio.chats.create(model='gemini-2.5-flash')
13691371
async for chunk in await chat.send_message_stream('tell me a story'):
13701372
print(chunk.text)
13711373
```
@@ -1424,7 +1426,7 @@ else:
14241426
file_uris = [file1.uri, file2.uri]
14251427

14261428
cached_content = client.caches.create(
1427-
model='gemini-2.0-flash-001',
1429+
model='gemini-2.5-flash',
14281430
config=types.CreateCachedContentConfig(
14291431
contents=[
14301432
types.Content(
@@ -1459,7 +1461,7 @@ cached_content = client.caches.get(name=cached_content.name)
14591461
from google.genai import types
14601462

14611463
response = client.models.generate_content(
1462-
model='gemini-2.0-flash-001',
1464+
model='gemini-2.5-flash',
14631465
contents='Summarize the pdfs',
14641466
config=types.GenerateContentConfig(
14651467
cached_content=cached_content.name,
@@ -1481,7 +1483,7 @@ section above to initialize a client.
14811483
```python
14821484
from google.genai import types
14831485

1484-
model = 'gemini-2.0-flash-001'
1486+
model = 'gemini-2.5-flash'
14851487
training_dataset = types.TuningDataset(
14861488
# or gcs_uri=my_vertex_multimodal_dataset
14871489
gcs_uri='gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl',
@@ -1635,7 +1637,7 @@ Vertex AI:
16351637
```python
16361638
# Specify model and source file only, destination and job display name will be auto-populated
16371639
job = client.batches.create(
1638-
model='gemini-2.0-flash-001',
1640+
model='gemini-2.5-flash',
16391641
src='bq://my-project.my-dataset.my-table', # or "gs://path/to/input/data"
16401642
)
16411643

@@ -1647,7 +1649,7 @@ Gemini Developer API:
16471649
```python
16481650
# Create a batch job with inlined requests
16491651
batch_job = client.batches.create(
1650-
model="gemini-2.0-flash",
1652+
model="gemini-2.5-flash",
16511653
src=[{
16521654
"contents": [{
16531655
"parts": [{
@@ -1662,7 +1664,7 @@ batch_job = client.batches.create(
16621664
job
16631665
```
16641666

1665-
In order to create a batch job with file name. Need to upload a jsonl file.
1667+
In order to create a batch job with file name. Need to upload a json file.
16661668
For example myrequests.json:
16671669

16681670
```
@@ -1675,14 +1677,14 @@ Then upload the file.
16751677
```python
16761678
# Upload the file
16771679
file = client.files.upload(
1678-
file='myrequest.json',
1679-
config=types.UploadFileConfig(display_name='test_json')
1680+
file='myrequests.json',
1681+
config=types.UploadFileConfig(display_name='test-json')
16801682
)
16811683

16821684
# Create a batch job with file name
16831685
batch_job = client.batches.create(
16841686
model="gemini-2.0-flash",
1685-
src="files/file_name",
1687+
src="files/test-json",
16861688
)
16871689
```
16881690

0 commit comments

Comments
 (0)