# First message in the conversation
response1 = client.models.generate_content(
model="gemini-2.0-flash",
contents="Hello, can you help me with Python programming?",
config={
"temperature": 0.8,
"system_instruction": "You are a helpful Python programming assistant"
},
trace_id=trace_id
)
response_text1 = response1.text
print("Assistant:", response_text1)
# Continue the conversation with follow-up question
response2 = client.models.generate_content(
model="gemini-2.0-flash",
contents="Can you write a simple Python function to calculate fibonacci numbers?",
config={
"temperature": 0.8,
"system_instruction": "You are a helpful Python programming assistant"
},
trace_id=trace_id
)
response_text2 = response2.text
print("Assistant:", response_text2)
# End the trace when conversation is complete
trace.end()