This doc demonstrates how to use the Tavily search API with LangChain and LangGraph to create an agent that can search for information on the web. The agent uses either OpenAI or Anthropic models to process the search results and generate responses.

Add the Required Packages

import os
from functools import lru_cache
from typing import Annotated, List, Literal, Sequence, Tuple, TypedDict, Union

from langchain import hub
from langchain_anthropic import Anthropic, ChatAnthropic
from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchTool
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import BaseMessage
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, StateGraph, add_messages
from langgraph.prebuilt import ToolNode, create_react_agent
from pydantic.type_adapter import R

Create the LangGraph Agent

openAIKey = os.environ.get("OPENAI_API_KEY", None)
anthropicApiKey = os.environ.get("ANTHROPIC_API_KEY", None)
tavilyApiKey = os.environ.get("TAVILY_API_KEY", None)

Define the Agent State & Tools

class AgentState(TypedDict):
    messages: Annotated[Sequence[BaseMessage], add_messages]


tools = [TavilySearchResults(max_results=1, tavily_api_key=tavilyApiKey)]

Model Selection Helper

@lru_cache(maxsize=4)
def _get_model(model_name: str):
    if model_name == "openai":
        model = ChatOpenAI(temperature=0, model_name="gpt-4o", api_key=openAIKey)
    elif model_name == "anthropic":
        model = ChatAnthropic(
            temperature=0,
            model_name="claude-3-sonnet-20240229",
            api_key=anthropicApiKey,
        )
    else:
        raise ValueError(f"Unsupported model type: {model_name}")

    model = model.bind_tools(tools)
    return model

Define the function that determines whether to continue or not

def should_continue(state):
    messages = state["messages"]
    last_message = messages[-1]
    # If there are no tool calls, then we finish
    if not last_message.tool_calls:
        return "end"
    # Otherwise if there is, we continue
    else:
        return "continue"


system_prompt = """Be a helpful assistant"""

Define the function that calls the model

def call_model(state, config):
    messages = state["messages"]
    messages = [{"role": "system", "content": system_prompt}] + messages
    model_name = config.get("configurable", {}).get("model_name", "anthropic")
    model = _get_model(model_name)
    response = model.invoke(messages)
    # We return a list, because this will get added to the existing list
    return {"messages": [response]}

Define the function to execute tools

tool_node = ToolNode(tools)

Define the config

class GraphConfig(TypedDict):
    model_name: Literal["anthropic", "openai"]

Define a new graph

workflow = StateGraph(AgentState, config_schema=GraphConfig)

# Define the two nodes we will cycle between
workflow.add_node("agent", call_model)
workflow.add_node("action", tool_node)

# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")

# We now add a conditional edge
workflow.add_conditional_edges(
    # First, we define the start node. We use `agent`.
    # This means these are the edges taken after the `agent` node is called.
    "agent",
    # Next, we pass in the function that will determine which node is called next.
    should_continue,
    # Finally we pass in a mapping.
    # The keys are strings, and the values are other nodes.
    # END is a special node marking that the graph should finish.
    # What will happen is we will call `should_continue`, and then the output of that
    # will be matched against the keys in this mapping.
    # Based on which one it matches, that node will then be called.
    {
        # If `tools`, then we call the tool node.
        "continue": "action",
        # Otherwise we finish.
        "end": END,
    },
)

# We now add a normal edge from `tools` to `agent`.
# This means that after `tools` is called, `agent` node is called next.
workflow.add_edge("action", "agent")

Maxim Logger Integration

from maxim import  Maxim
from maxim.decorators import current_trace

logger = Maxim({}).logger()
Start tracing entire LangGraph agent using 2 simple annotations.
from maxim.logger.langchain import MaximLangchainTracer

app = workflow.compile()
maxim_langchain_tracer = MaximLangchainTracer(logger)


def another_method(query:str)->str:
    return query

async def ask_agent(query: str) -> str:
    config = {"recursion_limit": 50, "callbacks": [maxim_langchain_tracer]}
    async for event in app.astream(input={"messages": [query]}, config=config):
        for k, v in event.items():
            if k == "agent":
                response = str(v["messages"][0].content)
    return response

async def handle(query:str):
    resp = await ask_agent(query)    
    another_method(str(resp))
    return resp

Get the response from the agent:

resp = await handle("tell me latest football news?") 
print(resp)