View module source on GitHub

ConnectionPool

class ConnectionPool()

Manages HTTP session pooling for efficient network requests.

This class provides a reusable session with retry logic for handling transient network errors.

__init__

def __init__()

Initialize a new session with retry configuration.

get_session

@contextlib.contextmanager
def get_session()

Context manager that yields the session and ensures it’s closed after use.

Yields:

  • requests.Session - The HTTP session object

MaximAPI

class MaximAPI()

Client for interacting with the Maxim API.

This class provides methods for all available Maxim API endpoints, handling authentication, request formatting, and error handling.

__init__

def __init__(base_url: str, api_key: str)

Initialize a new Maxim API client.

Arguments:

NameDescription
base_urlThe base URL for the Maxim API
api_keyThe API key for authentication

get_prompt

def get_prompt(id: str) -> VersionAndRulesWithPromptId

Get a prompt by ID.

Arguments:

NameDescription
idThe prompt ID

Returns:

NameDescription
[VersionAndRulesWithPromptId](/sdk/python/references/models/prompt)The prompt details

Raises:

  • Exception - If the request fails

get_prompts

def get_prompts() -> List[VersionAndRulesWithPromptId]

Get all prompts.

Returns:

NameDescription
List[[VersionAndRulesWithPromptId](/sdk/python/references/models/prompt)]List of all prompts

Raises:

  • Exception - If the request fails

getPromptChain

def getPromptChain(id: str) -> VersionAndRulesWithPromptChainId

Get a prompt chain by ID.

Arguments:

NameDescription
idThe prompt chain ID

Returns:

NameDescription
[VersionAndRulesWithPromptChainId](/sdk/python/references/models/prompt_chain)The prompt chain details

Raises:

  • Exception - If the request fails

get_prompt_chains

def get_prompt_chains() -> List[VersionAndRulesWithPromptChainId]

Get all prompt chains.

Returns:

NameDescription
List[[VersionAndRulesWithPromptChainId](/sdk/python/references/models/prompt_chain)]List of all prompt chains

Raises:

  • Exception - If the request fails

run_prompt

def run_prompt(model: str,
               messages: List[ChatCompletionMessage],
               tools: Optional[List[Tool]] = None,
               **kwargs)

Run a custom prompt with the specified model and messages.

Arguments:

NameDescription
modelThe model to use
messagesList of chat messages
toolsOptional list of tools to use
**kwargsAdditional parameters to pass to the API

Returns:

NameDescription
[PromptResponse](/sdk/python/references/models/prompt)The response from the model

Raises:

  • Exception - If the request fails

run_prompt_version

def run_prompt_version(
        prompt_version_id: str, input: str,
        image_urls: Optional[List[ImageUrls]],
        variables: Optional[dict[str, str]]) -> Optional[PromptResponse]

Run a specific prompt version with the given input.

Arguments:

NameDescription
prompt_version_idThe ID of the prompt version to run
inputThe input text for the prompt
image_urlsOptional list of image URLs to include
variablesOptional dictionary of variables to use

Returns:

NameDescription
Optional[[PromptResponse](/sdk/python/references/models/prompt)]The response from the prompt

Raises:

  • Exception - If the request fails

run_prompt_chain_version

def run_prompt_chain_version(
        prompt_chain_version_id: str, input: str,
        variables: Optional[dict[str, str]]) -> Optional[AgentResponse]

Run a specific prompt chain version with the given input.

Arguments:

NameDescription
prompt_chain_version_idThe ID of the prompt chain version to run
inputThe input text for the prompt chain
variablesOptional dictionary of variables to use

Returns:

NameDescription
Optional[[AgentResponse](/sdk/python/references/models/prompt_chain)]The response from the prompt chain

Raises:

  • Exception - If the request fails

get_folder

def get_folder(id: str) -> Folder

Get a folder by ID.

Arguments:

NameDescription
idThe folder ID

Returns:

NameDescription
[Folder](/sdk/python/references/models/folder)The folder details

Raises:

  • Exception - If the request fails

get_folders

def get_folders() -> List[Folder]

Get all folders.

Returns:

NameDescription
List[[Folder](/sdk/python/references/models/folder)]List of all folders

Raises:

  • Exception - If the request fails

add_dataset_entries

def add_dataset_entries(dataset_id: str,
                        dataset_entries: List[DatasetEntry]) -> dict[str, Any]

Add entries to a dataset.

Arguments:

NameDescription
dataset_idThe ID of the dataset
dataset_entriesList of dataset entries to add

Returns:

dict[str, Any]: Response from the API

Raises:

  • Exception - If the request fails

get_dataset_total_rows

def get_dataset_total_rows(dataset_id: str) -> int

Get the total number of rows in a dataset.

Arguments:

NameDescription
dataset_idThe ID of the dataset

Returns:

NameDescription
intThe total number of rows

Raises:

  • Exception - If the request fails

get_dataset_row

def get_dataset_row(dataset_id: str, row_index: int) -> Optional[DatasetRow]

Get a specific row from a dataset.

Arguments:

NameDescription
dataset_idThe ID of the dataset
row_indexThe index of the row to retrieve

Returns:

NameDescription
Optional[[DatasetRow](/sdk/python/references/models/dataset)]The dataset row, or None if not found

Raises:

  • Exception - If the request fails

get_dataset_structure

def get_dataset_structure(dataset_id: str) -> Dict[str, str]

Get the structure of a dataset.

Arguments:

NameDescription
dataset_idThe ID of the dataset

Returns:

Dict[str, str]: The dataset structure

Raises:

  • Exception - If the request fails

does_log_repository_exist

def does_log_repository_exist(logger_id: str) -> bool

Check if a log repository exists.

Arguments:

NameDescription
logger_idThe ID of the logger

Returns:

NameDescription
boolTrue if the repository exists, False otherwise

push_logs

def push_logs(repository_id: str, logs: str) -> None

Push logs to a repository.

Arguments:

NameDescription
repository_idThe ID of the repository
logsThe logs to push

Raises:

  • Exception - If the request fails

fetch_platform_evaluator

def fetch_platform_evaluator(name: str, in_workspace_id: str) -> Evaluator

Fetch a platform evaluator by name.

Arguments:

NameDescription
nameThe name of the evaluator
in_workspace_idThe workspace ID

Returns:

NameDescription
[Evaluator](/sdk/python/references/models/evaluator)The evaluator details

Raises:

  • Exception - If the request fails

create_test_run

def create_test_run(
    name: str,
    workspace_id: str,
    workflow_id: Optional[str],
    prompt_version_id: Optional[str],
    prompt_chain_version_id: Optional[str],
    run_type: RunType,
    evaluator_config: list[Evaluator],
    requires_local_run: bool,
    human_evaluation_config: Optional[HumanEvaluationConfig] = None
) -> TestRun

Create a new test run.

Arguments:

NameDescription
nameThe name of the test run
workspace_idThe workspace ID
workflow_idOptional workflow ID
prompt_version_idOptional prompt version ID
prompt_chain_version_idOptional prompt chain version ID
run_typeThe type of run
evaluator_configList of evaluators to use
requires_local_runWhether the test run requires local execution
human_evaluation_configOptional human evaluation configuration

Returns:

NameDescription
[TestRun](/sdk/python/references/models/test_run)The created test run

Raises:

  • Exception - If the request fails

attach_dataset_to_test_run

def attach_dataset_to_test_run(test_run_id: str, dataset_id: str) -> None

Attach a dataset to a test run.

Arguments:

NameDescription
test_run_idThe ID of the test run
dataset_idThe ID of the dataset

Raises:

  • Exception - If the request fails

push_test_run_entry

def push_test_run_entry(test_run: Union[TestRun, TestRunWithDatasetEntry],
                        entry: TestRunEntry,
                        run_config: Optional[Dict[str, Any]] = None) -> None

Push an entry to a test run.

Arguments:

NameDescription
test_runThe test run
entryThe test run entry to push
run_configOptional run configuration

Raises:

  • Exception - If the request fails

mark_test_run_processed

def mark_test_run_processed(test_run_id: str) -> None

Mark a test run as processed.

Arguments:

NameDescription
test_run_idThe ID of the test run

Raises:

  • Exception - If the request fails

mark_test_run_failed

def mark_test_run_failed(test_run_id: str) -> None

Mark a test run as failed.

Arguments:

NameDescription
test_run_idThe ID of the test run

Raises:

  • Exception - If the request fails

get_test_run_status

def get_test_run_status(test_run_id: str) -> TestRunStatus

Get the status of a test run.

Arguments:

NameDescription
test_run_idThe ID of the test run

Returns:

NameDescription
[TestRunStatus](/sdk/python/references/models/test_run)The status of the test run

Raises:

  • Exception - If the request fails

get_test_run_final_result

def get_test_run_final_result(test_run_id: str) -> TestRunResult

Get the final result of a test run.

Arguments:

NameDescription
test_run_idThe ID of the test run

Returns:

NameDescription
[TestRunResult](/sdk/python/references/models/test_run)The final result of the test run

Raises:

  • Exception - If the request fails

get_upload_url

def get_upload_url(key: str, mime_type: str, size: int) -> SignedURLResponse

Get a signed URL for uploading a file.

Arguments:

NameDescription
keyThe key (filename) for the upload
mime_typeThe MIME type of the file
sizeThe size of the file in bytes

Returns:

NameDescription
[SignedURLResponse](/sdk/python/references/models/attachment)A dictionary containing the signed URL for upload

Raises:

  • Exception - If the request fails

upload_to_signed_url

def upload_to_signed_url(url: str, data: bytes, mime_type: str) -> bool

Upload data to a signed URL using multipart form data with retry logic.

Arguments:

NameDescription
urlThe signed URL to upload to
dataThe binary data to upload
mime_typeThe MIME type of the data

Returns:

NameDescription
boolTrue if upload was successful, False otherwise