Documentation Index
Fetch the complete documentation index at: https://www.getmaxim.ai/docs/llms.txt
Use this file to discover all available pages before exploring further.
View module source on GitHub
class ConfigDict(TypedDict)
A class representing the configuration for the Maxim SDK.
Attributes:
| Name | Type | Description |
|---|
api_key | Optional[str], optional | The API key for the Maxim instance. Defaults to None. |
base_url | Optional[str], optional | The base URL for the Maxim instance. Defaults to None. |
cache | Optional[MaximCache], optional | The cache to use for the Maxim instance. Defaults to None. |
debug | Optional[bool], optional | Whether to enable debug logging. Defaults to False. |
raise_exceptions | Optional[bool], optional | Whether to raise exceptions during logging operations. Defaults to False. |
prompt_management | Optional[bool], optional | Whether to enable prompt management. Defaults to False. |
@deprecated(
"This class will be removed in a future version. Use {} which is TypedDict."
)
@dataclass
class Config()
A class representing the configuration for the Maxim SDK.
Attributes:
| Name | Type | Description |
|---|
api_key | str | The API key for the Maxim instance. |
base_url | Optional[str], optional | The base URL for the Maxim instance. Defaults to “https://app.getmaxim.ai”. |
cache | Optional[MaximCache], optional | The cache to use for the Maxim instance. Defaults to None. |
debug | Optional[bool], optional | Whether to enable debug logging. Defaults to False. |
raise_exceptions | Optional[bool], optional | Whether to raise exceptions during logging operations. Defaults to False. |
prompt_management | Optional[bool], optional | Whether to enable prompt management. Defaults to False. |
get_config_dict
def get_config_dict(config: Union[Config, ConfigDict]) -> dict[str, Any]
Converts a Config or ConfigDict to a dictionary with default values.
Arguments:
| Name | Type | Description |
|---|
config | Union[Config, ConfigDict] | The configuration object to convert. |
Returns:
dict[str, Any]: A dictionary containing the configuration parameters with defaults applied.
__init__
def __init__(config: Union[Config, ConfigDict, None] = None)
Initializes a new instance of the Maxim class.
Arguments:
| Name | Type | Description |
|---|
config | Config | The configuration for the Maxim instance. |
enable_prompt_management
def enable_prompt_management(cache: Optional[MaximCache] = None) -> "Maxim"
Enables prompt management functionality with optional cache configuration.
Arguments:
| Name | Type | Description |
|---|
cache | Optional[MaximCache], optional | Custom cache implementation to use. |
Defaults to None (uses existing cache).
Returns:
| Name | Description |
|---|
[Maxim](/sdk/python/references/maxim) | The current Maxim instance for method chaining. |
enable_exceptions
def enable_exceptions(val: bool) -> "Maxim"
Enables or disables exception raising during logging operations.
Arguments:
| Name | Type | Description |
|---|
val | bool | True to enable exception raising, False to disable. |
Returns:
| Name | Description |
|---|
[Maxim](/sdk/python/references/maxim) | The current Maxim instance for method chaining. |
get_prompt
def get_prompt(id: str, rule: QueryRule) -> Optional[RunnablePrompt]
Retrieves a prompt based on the provided id and rule.
Arguments:
| Name | Type | Description |
|---|
id | str | The id of the prompt. |
rule | QueryRule | The rule to match the prompt against. |
Returns:
| Name | Description |
|---|
Optional[[Prompt](/sdk/python/references/models/prompt)] | The prompt object if found, otherwise None. |
get_prompts
def get_prompts(rule: QueryRule) -> List[RunnablePrompt]
Retrieves all prompts that match the given rule.
Arguments:
| Name | Type | Description |
|---|
rule | QueryRule | The rule to match the prompts against. |
Returns:
| Name | Description |
|---|
List[[Prompt](/sdk/python/references/models/prompt)] | A list of prompts that match the given rule. |
get_prompt_chain
def get_prompt_chain(id: str,
rule: QueryRule) -> Optional[RunnablePromptChain]
Retrieves a prompt chain based on the provided id and rule.
Arguments:
| Name | Type | Description |
|---|
id | str | The id of the prompt chain. |
rule | QueryRule | The rule to match the prompt chain against. |
Returns:
| Name | Description |
|---|
Optional[[PromptChain](/sdk/python/references/models/prompt_chain)] | The prompt chain object if found, otherwise None. |
get_folder_by_id
def get_folder_by_id(id: str) -> Optional[Folder]
Retrieves a folder based on the provided id.
Arguments:
| Name | Type | Description |
|---|
id | str | The id of the folder. |
Returns:
| Name | Description |
|---|
Optional[[Folder](/sdk/python/references/models/folder)] | The folder object if found, otherwise None. |
get_folders
def get_folders(rule: QueryRule) -> List[Folder]
Retrieves all folders that match the given rule.
Arguments:
| Name | Type | Description |
|---|
rule | QueryRule | The rule to match the folders against. |
Returns:
| Name | Description |
|---|
List[[Folder](/sdk/python/references/models/folder)] | A list of folders that match the given rule. |
logger
def logger(
config: Optional[Union[LoggerConfig,
LoggerConfigDict]] = None) -> Logger
Creates a logger based on the provided configuration.
Arguments:
| Name | Type | Description |
|---|
config | LoggerConfig | The configuration for the logger. |
Returns:
| Name | Description |
|---|
[Logger](/sdk/python/references/logger/logger) | The logger object. |
create_test_run
def create_test_run(name: str, in_workspace_id: str) -> TestRunBuilder
Creates a test run builder based on the provided name and workspace id.
Arguments:
| Name | Type | Description |
|---|
name | str | The name of the test run. |
in_workspace_id | str | The workspace id to create the test run in. |
Returns:
| Name | Description |
|---|
[TestRunBuilder](/sdk/python/references/test_runs/test_run_builder) | The test run builder object. |
chat_completion
def chat_completion(model: str,
messages: List[ChatCompletionMessage],
tools: Optional[List[Tool]] = None,
**kwargs) -> Optional[PromptResponse]
Performs a chat completion request using the specified model and messages.
Arguments:
| Name | Type | Description |
|---|
model | str | The model name to use for completion. The expected format is “provider/model_name”. Example “openai/gpt-3.5-turbo”. |
messages | List[ChatCompletionMessage] | List of chat messages in the conversation |
tools | Optional[List[Tool]], optional | List of tools available to the model. Defaults to None. |
**kwargs | | Additional model parameters to pass to the completion request |
Returns:
| Name | Description |
|---|
Optional[[PromptResponse](/sdk/python/references/models/prompt)] | The completion response if successful, None otherwise |
cleanup
Cleans up the Maxim sync thread.