This tutorial demonstrates how to build a ReAct-style LangGraph agent that uses the Tavily Search API to fetch information and then processes it with either OpenAI or Anthropic models—while instrumenting the entire workflow with Maxim for tracing, spans, and performance insights.

1. Prerequisites

  • Python 3.8+
  • API keys for:
    • OpenAI (OPENAI_API_KEY)
    • Anthropic (ANTHROPIC_API_KEY)
    • Tavily (TAVILY_API_KEY)
    • Maxim (MAXIM_API_KEY)
    • Maxim Log Repository ID (MAXIM_LOG_REPO_ID)
  • Install packages:
    langchain
    langchain-anthropic
    langchain-community
    langchain-openai
    langgraph
    maxim-py
    python-dotenv
    

2. Imports & Environment

import os
from functools import lru_cache
from typing import Annotated, Sequence, TypedDict, Literal

# LangChain & community tools
from langchain_openai import ChatOpenAI
from langchain_anthropic import ChatAnthropic
from langchain_community.tools.tavily_search import TavilySearchResults

# LangGraph workflow
from langgraph.graph import START, END, StateGraph, add_messages
from langgraph.prebuilt import ToolNode

# Maxim tracing
from maxim import Maxim
from maxim.decorators import trace, span
from maxim.decorators.langchain import langchain_callback, langgraph_agent
Load your API keys:
openAIKey    = os.getenv("OPENAI_API_KEY")
anthropicKey = os.getenv("ANTHROPIC_API_KEY")
tavilyKey    = os.getenv("TAVILY_API_KEY")
workspaceId = os.environ.get("MAXIM_WORKSPACE_ID", None)

3. Initialize Maxim Logger

maxim_client = Maxim()
logger = maxim_client.logger()

4. Define Agent State & Tools

class AgentState(TypedDict):
    messages: Annotated[Sequence[BaseMessage], add_messages]


tools = [TavilySearchResults(max_results=1, tavily_api_key=tavilyApiKey)]
# Define the function to execute tools
tool_node = ToolNode(tools)

5. Model Selection Helper

@lru_cache(maxsize=4)
def _get_model(model_name: str):
    if model_name == "openai":
        model = ChatOpenAI(temperature=0, model_name="gpt-4o", api_key=openAIKey)
    elif model_name == "anthropic":
        model = ChatAnthropic(
            temperature=0,
            model_name="claude-3-sonnet-20240229",
            api_key=anthropicApiKey,
        )
    else:
        raise ValueError(f"Unsupported model type: {model_name}")

    model = model.bind_tools(tools)
    return model

6. Control Flow: Continue or End

def should_continue(state):
    messages = state["messages"]
    last_message = messages[-1]
    # If there are no tool calls, then we finish
    if not last_message.tool_calls:
        return "end"
    # Otherwise if there is, we continue
    else:
        return "continue"

7. Define the function that calls the model

def call_model(state, config):
    messages = state["messages"]
    messages = [{"role": "system", "content": system_prompt}] + messages
    model_name = config.get("configurable", {}).get("model_name", "anthropic")
    model = _get_model(model_name)
    response = model.invoke(messages)
    # We return a list, because this will get added to the existing list
    return {"messages": [response]}

8. Build the LangGraph Workflow

class GraphConfig(TypedDict):
    model_name: Literal["anthropic", "openai"]


# Define a new graph
workflow = StateGraph(AgentState, config_schema=GraphConfig)

# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("action", tool_node)

# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")

# We now add a conditional edge
workflow.add_conditional_edges(
    # First, we define the start node. We use `agent`.
    # This means these are the edges taken after the `agent` node is called.
    "agent",
    # Next, we pass in the function that will determine which node is called next.
    should_continue,
    # Finally we pass in a mapping.
    # The keys are strings, and the values are other nodes.
    # END is a special node marking that the graph should finish.
    # What will happen is we will call `should_continue`, and then the output of that
    # will be matched against the keys in this mapping.
    # Based on which one it matches, that node will then be called.
    {
        # If `tools`, then we call the tool node.
        "continue": "action",
        # Otherwise we finish.
        "end": END,
    },
)

# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("action", "agent")
app = workflow.compile()

9. Start tracing entire LangGraph agent using 2 simple annotations.

@span(name="another-method-span")
def another_method(query: str) -> str:
    return query


@langgraph_agent(name="movie-agent-v1")
async def ask_agent(query: str) -> str:
    config = {"recursion_limit": 50, "callbacks": [langchain_callback()]}
    async for event in app.astream(input={"messages": [query]}, config=config):
        for k, v in event.items():
            if k == "agent":
                response = str(v["messages"][0].content)
    return response


@trace(logger=logger, name="movie-chat-v1",tags={"service": "movie-chat-v1-server-1"})
async def handle(query: str):
    resp = await ask_agent(query)
    current_trace().set_output(str(resp))
    another_method(str(resp))
    trace = current_trace()
    trace.feedback({"score": 1})
    return resp

10. Get the response from the agent:

resp = await handle("is there any new iron man movies coming this year?")
print(resp)
Langgraph Gif