Skip to content

auto_generate

Class info

🛈 DocStrings

Auto-generate LLM responses using function signatures.

auto_callable

auto_callable(
    model: str | Model | KnownModelName,
    *,
    system_prompt: str | None = None,
    retries: int = 3,
) -> Callable[
    [Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]]
]
auto_callable(
    model: str | Model | KnownModelName,
    *,
    system_prompt: str | None = None,
    retries: int = 3,
) -> Callable[[Callable[P, R]], Callable[P, Coroutine[Any, Any, R]]]
auto_callable(
    model: str | Model | KnownModelName,
    *,
    system_prompt: str | None = None,
    retries: int = 3,
) -> AnyCallable

Use function signature as schema for LLM responses.

This decorator uses the function's: - Type hints - Docstring - Parameter names and defaults as a schema for getting structured responses from the LLM.

Parameters:

Name Type Description Default
model str | Model | KnownModelName

Model to use for responses

required
system_prompt str | None

Optional system instructions

None
retries int

Max retries for failed responses

3
Example

@auto_callable("gpt-5") async def analyze_sentiment(text: str) -> dict[str, float]: '''Analyze sentiment scores (positive/negative) for text.'''

Source code in src/llmling_agent/functional/auto_generate.py
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
def auto_callable[R, **P](
    model: str | models.Model | models.KnownModelName,
    *,
    system_prompt: str | None = None,
    retries: int = 3,
) -> AnyCallable:
    """Use function signature as schema for LLM responses.

    This decorator uses the function's:
    - Type hints
    - Docstring
    - Parameter names and defaults
    as a schema for getting structured responses from the LLM.

    Args:
        model: Model to use for responses
        system_prompt: Optional system instructions
        retries: Max retries for failed responses

    Example:
        @auto_callable("gpt-5")
        async def analyze_sentiment(text: str) -> dict[str, float]:
            '''Analyze sentiment scores (positive/negative) for text.'''
    """

    def decorator(
        func: Callable[P, R] | Callable[P, Coroutine[Any, Any, R]],
    ) -> Callable[P, Coroutine[Any, Any, R]]:
        # Get function info once
        sig = inspect.signature(func)
        doc = inspect.getdoc(func) or ""

        @functools.wraps(func)
        async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
            # Bind arguments to create context
            bound = sig.bind(*args, **kwargs)
            bound.apply_defaults()
            arg_values = dict(bound.arguments)

            # Create prompt from signature and args
            prompt = (
                f"Based on this function:\n\n"
                f"def {func.__name__}{sig!s}:\n"
                f'    """{doc}"""\n\n'
                f"Generate response for inputs: {arg_values}"
            )

            # Use get_structured to get typed response
            return_type = sig.return_annotation
            if return_type is inspect.Parameter.empty:
                return_type = str  # type: ignore[assignment]

            return await get_structured(
                prompt=prompt,
                response_type=return_type,  # type: ignore[arg-type]
                model=model,
                system_prompt=system_prompt,
                max_retries=retries,
            )

        return wrapper

    return decorator