Google's Gemini models integrate seamlessly with the OpenAI Python library, allowing developers to access cutting-edge AI capabilities without switching tools. This tutorial covers:
pip install openai python-dotenv
from openai import OpenAI
import os
# Configure with your API key
client = OpenAI(
api_key=os.getenv("GEMINI_API_KEY"),
base_url="https://generativelanguage.googleapis.com/v1beta/"
)
response = client.chat.completions.create(
model="gemini-1.5-flash",
messages=[
{"role": "system", "content": "You are a technical documentation expert"},
{"role": "user", "content": "Explain gradient descent in simple terms"}
]
)
print(response.choices[0].message.content)
stream = client.chat.completions.create(
model="gemini-1.5-flash",
messages=[{"role": "user", "content": "Describe quantum computing"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end='', flush=True)
tools = [{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"},
"unit": {"enum": ["celsius", "fahrenheit"]}
},
"required": ["location"]
}
}
}]
response = client.chat.completions.create(
model="gemini-1.5-flash",
messages=[{"role": "user", "content": "What's Chicago's weather?"}],
tools=tools,
tool_choice="auto"
)
print(response.choices[0].message.tool_calls[0].function.arguments)
import base64
def encode_image(image_path):
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode('utf-8')
response = client.chat.completions.create(
model="gemini-1.5-flash",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image"},
{"type": "image_url", "image_url": {
"url": f"data:image/png;base64,{encode_image('diagram.png')}"
}}
]
}
]
)
print(response.choices[0].message.content)
try:
response = client.chat.completions.create(
model="gemini-1.5-flash",
messages=[...],
timeout=10 # Seconds
)
except openai.APITimeoutError:
print("Request timed out - implement retry logic")
from time import sleep
def safe_request(prompt, retries=3):
for attempt in range(retries):
try:
return client.chat.completions.create(...)
except openai.RateLimitError:
sleep(2 ** attempt) # Exponential backoff
raise Exception("Max retries exceeded")
temperature=0.2-0.5
for technical contentThe Gemini-OpenAI integration simplifies AI development while maintaining enterprise-grade capabilities. Key benefits include:
Explore advanced features in the Gemini Cookbook and official documentation .
Category: Gemini