Skip to content

llmfilters

Class info

🛈 DocStrings

generate_class_schemas

generate_class_schemas(cls_instance: Any) -> list[dict[str, Any]]

Generate OpenAI-tools-JSON schemas for all methods of a class instance.

Parameters:

Name Type Description Default
cls_instance Any

An instance of the class to generate schemas for.

required

Returns:

Type Description
list[dict[str, Any]]

List[Dict[str, Any]]: A list of OpenAI-tools-JSON schemas for the class methods.

Raises:

Type Description
ValueError

If the input is not a class instance.

Source code in src/jinjarope/llmfilters.py
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
def generate_class_schemas(cls_instance: Any) -> list[dict[str, Any]]:
    """Generate OpenAI-tools-JSON schemas for all methods of a class instance.

    Args:
        cls_instance (Any): An instance of the class to generate schemas for.

    Returns:
        List[Dict[str, Any]]: A list of OpenAI-tools-JSON schemas for the class methods.

    Raises:
        ValueError: If the input is not a class instance.
    """
    if not isinstance(cls_instance, object):
        msg = "Input must be a class instance"
        raise TypeError(msg)

    schemas: list[dict[str, Any]] = []

    for name, method in inspect.getmembers(cls_instance, predicate=inspect.ismethod):
        # Skip magic methods and private methods
        if not name.startswith("_"):
            try:
                schema = generate_openai_schema(method)
                schemas.append(schema)
            except ValueError as e:
                print(f"Skipping method '{name}': {e!s}")

    return schemas

generate_openai_schema

generate_openai_schema(func: Callable[..., Any]) -> dict[str, Any]

Generate an OpenAI-tools-JSON schema for the given function.

Parameters:

Name Type Description Default
func Callable[..., Any]

The function to generate the schema for.

required

Returns:

Type Description
dict[str, Any]

Dict[str, Any]: The OpenAI-tools-JSON schema.

Raises:

Type Description
ValueError

If the function has no docstring or parameters.

Source code in src/jinjarope/llmfilters.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def generate_openai_schema(func: Callable[..., Any]) -> dict[str, Any]:
    """Generate an OpenAI-tools-JSON schema for the given function.

    Args:
        func: The function to generate the schema for.

    Returns:
        Dict[str, Any]: The OpenAI-tools-JSON schema.

    Raises:
        ValueError: If the function has no docstring or parameters.
    """
    if not func.__doc__:
        msg = "Function must have a docstring"
        raise ValueError(msg)

    signature = inspectfilters.get_signature(func)
    if not signature.parameters:
        msg = "Function must have at least one parameter"
        raise ValueError(msg)

    description = inspectfilters.get_doc(func)
    # description = description.split("\n\n")[0]
    properties = {}
    required: list[str] = []
    for name, param in signature.parameters.items():
        param_type = "string"
        if param.annotation != inspect.Parameter.empty:
            if param.annotation is int:
                param_type = "integer"
            elif param.annotation is float:
                param_type = "number"
            elif param.annotation is bool:
                param_type = "boolean"

        properties[name] = {"type": param_type}
        if param.default == inspect.Parameter.empty:
            required.append(name)
    return {
        "name": func.__name__,
        "description": description,
        "parameters": {
            "type": "object",
            "properties": properties,
            "required": required,
        },
    }

llm_analyze_image

llm_analyze_image(
    image_url: str,
    prompt: str | None = None,
    model: str = "gpt-4-vision-preview",
    token: str | None = None,
    base_url: str | None = None,
    encode_b64: bool = False,
    **kwargs: Any
) -> str

Analyze an image using an LLM vision model and return the analysis as a string.

Parameters:

Name Type Description Default
image_url str

The URL of the image to analyze.

required
prompt str | None

A prompt to guide the image analysis. If None, use a default prompt.

None
model str

The name of the model to use. Defaults to "gpt-4-vision-preview".

'gpt-4-vision-preview'
token str | None

The API token (key) for authentication. If None, it will use the OPENAI_API_KEY environment variable.

None
base_url str | None

The base URL for the API endpoint. If None, the default URL for the model will be used.

None
encode_b64 bool

Whether to encode the image to base64 before sending it to the API. (required for some models)

False
kwargs Any

Additional keyword arguments passed to litellm.completion.

{}

Returns:

Type Description
str

The analysis of the image as a string.

Raises:

Type Description
ValueError

If the image_url is empty or invalid.

RequestException

If there's an error downloading the image.

Exception

If there's an error in making the API call or processing the response.

Source code in src/jinjarope/llmfilters.py
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
def llm_analyze_image(
    image_url: str,
    prompt: str | None = None,
    model: str = "gpt-4-vision-preview",
    token: str | None = None,
    base_url: str | None = None,
    encode_b64: bool = False,
    **kwargs: Any,
) -> str:
    """Analyze an image using an LLM vision model and return the analysis as a string.

    Args:
        image_url: The URL of the image to analyze.
        prompt: A prompt to guide the image analysis. If None, use a default prompt.
        model: The name of the model to use. Defaults to "gpt-4-vision-preview".
        token: The API token (key) for authentication.
               If None, it will use the OPENAI_API_KEY environment variable.
        base_url: The base URL for the API endpoint.
                  If None, the default URL for the model will be used.
        encode_b64: Whether to encode the image to base64 before sending it to the API.
                    (required for some models)
        kwargs: Additional keyword arguments passed to litellm.completion.

    Returns:
        The analysis of the image as a string.

    Raises:
        ValueError: If the image_url is empty or invalid.
        requests.RequestException: If there's an error downloading the image.
        Exception: If there's an error in making the API call or processing the response.
    """
    if not image_url or not image_url.strip():
        msg = "Image URL cannot be empty"
        raise ValueError(msg)

    prompt = prompt or "Analyze this image and describe what you see in detail."
    image_str = htmlfilters.url_to_b64(image_url) if encode_b64 else image_url
    completion_kwargs: dict[str, Any] = {
        "model": model,
        "messages": [
            {
                "role": "user",
                "content": [
                    {"type": "text", "text": prompt},
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": image_str,
                        },
                    },
                ],
            },
        ],
        "max_tokens": 300,  # Default max tokens
    }

    if token:
        completion_kwargs["api_key"] = token
    if base_url:
        completion_kwargs["api_base"] = base_url
    response = litellm.completion(**completion_kwargs, **kwargs)
    return response.choices[0].message.content.strip()

llm_complete

llm_complete(
    prompt: str,
    system_prompt: str | None = None,
    context: str | None = None,
    model: str | None = None,
    token: str | None = None,
    base_url: str | None = None,
    **kwargs: Any
) -> str

Complete a prompt using the LLM API.

Parameters:

Name Type Description Default
prompt str

The prompt to complete.

required
system_prompt str | None

The system prompt to set context for the model.

None
context str | None

Additional context for the prompt.

None
model str | None

The model to use.

None
token str | None

The API token.

None
base_url str | None

The base URL of the API.

None
kwargs Any

Additional keyword arguments passed to litellm.completion.

{}

Returns:

Type Description
str

The completed text from the LLM.

Raises:

Type Description
ValueError

If the API response is invalid or missing expected data.

Source code in src/jinjarope/llmfilters.py
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
def llm_complete(
    prompt: str,
    system_prompt: str | None = None,
    context: str | None = None,
    model: str | None = None,
    token: str | None = None,
    base_url: str | None = None,
    **kwargs: Any,
) -> str:
    """Complete a prompt using the LLM API.

    Args:
        prompt: The prompt to complete.
        system_prompt: The system prompt to set context for the model.
        context: Additional context for the prompt.
        model: The model to use.
        token: The API token.
        base_url: The base URL of the API.
        kwargs: Additional keyword arguments passed to litellm.completion.

    Returns:
        The completed text from the LLM.

    Raises:
        ValueError: If the API response is invalid or missing expected data.
    """
    messages: list[dict[str, str]] = []
    if system_prompt:
        messages.append({"role": "system", "content": system_prompt})
    if context:
        messages.append({"role": "user", "content": context})
    messages.append({"role": "user", "content": prompt})

    response = litellm.completion(
        model=model or os.getenv("OPENAI_MODEL", ""),
        api_key=token or os.getenv("OPENAI_API_TOKEN"),
        api_base=base_url or os.getenv("OPENAI_API_BASE"),
        messages=messages,
        **kwargs,
    )
    if not response.choices or not response.choices[0].message:
        msg = "Invalid API response: missing choices or message"
        raise ValueError(msg)
    return response.choices[0].message.content or ""

llm_generate_image

llm_generate_image(
    prompt: str,
    model: str | None = None,
    token: str | None = None,
    base_url: str | None = None,
    size: str = "1024x1024",
    quality: str = "standard",
    as_b64_json: bool = False,
    **kwargs: Any
) -> str | None

Generate an image using the LLM API and returns the URL.

Parameters:

Name Type Description Default
prompt str

The prompt to generate an image from.

required
model str | None

The model to use. Defaults to None.

None
token str | None

The API token. Defaults to None.

None
base_url str | None

The base URL of the API. Defaults to None.

None
size str

The size of the generated image. Defaults to "1024x1024".

'1024x1024'
quality str

The quality of the generated image. Defaults to "standard".

'standard'
as_b64_json bool

Return b64-encoded image instead of URL.

False
kwargs Any

Additional keyword arguments passed to litellm.image_generation.

{}

Returns:

Type Description
str | None

The generated image response.

Source code in src/jinjarope/llmfilters.py
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
def llm_generate_image(
    prompt: str,
    model: str | None = None,
    token: str | None = None,
    base_url: str | None = None,
    size: str = "1024x1024",
    quality: str = "standard",
    as_b64_json: bool = False,
    **kwargs: Any,
) -> str | None:
    """Generate an image using the LLM API and returns the URL.

    Args:
        prompt: The prompt to generate an image from.
        model: The model to use. Defaults to None.
        token: The API token. Defaults to None.
        base_url: The base URL of the API. Defaults to None.
        size: The size of the generated image. Defaults to "1024x1024".
        quality: The quality of the generated image. Defaults to "standard".
        as_b64_json: Return b64-encoded image instead of URL.
        kwargs: Additional keyword arguments passed to litellm.image_generation.

    Returns:
        The generated image response.
    """
    response = litellm.image_generation(
        prompt=prompt,
        model=model or os.getenv("OPENAI_IMAGE_MODEL"),
        api_key=token or os.getenv("OPENAI_API_TOKEN"),
        api_base=base_url,
        size=size,
        quality=quality,
        response_format="b64_json" if as_b64_json else "url",
        **kwargs,
    )
    # Check if the API result is valid
    if response and response.data and len(response.data) > 0:
        # TODO: <img src="data:image/png;base64,iVBORw0KG..." />
        return response.data[0].b64_json if as_b64_json else response.data[0].url
    return None