View module source on GitHub
class MaximLangchainTracer(BaseCallbackHandler)
A callback handler that logs langchain outputs to Maxim logger
Arguments:
Name | Description |
---|
logger | [Logger](/sdk/python/references/logger/logger): Maxim Logger instance to log outputs |
__init__
def __init__(logger: Logger,
metadata: Optional[Dict[str, Any]] = None,
eval_config: Optional[Dict[str, List[str]]] = None) -> None
Initializes the Langchain Tracer
Arguments:
Name | Description |
---|
logger | [Logger](/sdk/python/references/logger/logger): Maxim Logger instance to log outputs |
on_llm_start
def on_llm_start(serialized: dict[str, Any],
prompts: list[str],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any) -> Any
Runs when LLM starts
on_chat_model_start
def on_chat_model_start(serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any) -> Any
Runs when a chat model call starts
on_llm_new_token
def on_llm_new_token(token: str, **kwargs: Any) -> Any
Run on new LLM token. Only available when streaming is enabled.
on_llm_end
def on_llm_end(response: LLMResult, **kwargs: Any) -> Any
Run when LLM ends running.
on_llm_error
def on_llm_error(error: Union[Exception, BaseException, KeyboardInterrupt],
**kwargs: Any) -> Any
Run when LLM errors.
on_retriever_start
def on_retriever_start(serialized: dict[str, Any],
query: str,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any) -> Any
Run when Retriever starts running.