Skip to content

Commit 95654f7

Browse files
committed
Respect custom output models
1 parent 5b2f338 commit 95654f7

File tree

3 files changed

+19
-14
lines changed

3 files changed

+19
-14
lines changed

docs/ref/checks/off_topic_prompts.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ Returns a `GuardrailResult` with the following `info` dictionary:
4343
}
4444
```
4545

46-
- **`flagged`**: Whether the content aligns with your business scope
46+
- **`flagged`**: Whether the content is off-topic (outside your business scope)
4747
- **`confidence`**: Confidence score (0.0 to 1.0) for the assessment
4848
- **`threshold`**: The confidence threshold that was configured
4949
- **`reason`**: Explanation of why the input was flagged (or not flagged) - *only included when `include_reasoning=true`*

src/guardrails/checks/text/llm_base.py

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -89,8 +89,8 @@ class LLMConfig(BaseModel):
8989
confidence_threshold (float): Minimum confidence required to trigger the guardrail,
9090
as a float between 0.0 and 1.0.
9191
include_reasoning (bool): Whether to include reasoning/explanation in guardrail
92-
output. Useful for development and debugging, but can be disabled in production
93-
to save tokens. Defaults to True.
92+
output. Useful for development and debugging, but disabled by default in production
93+
to save tokens. Defaults to False.
9494
"""
9595

9696
model: str = Field(..., description="LLM model to use for checking the text")
@@ -433,25 +433,25 @@ def create_llm_check_fn(
433433
use the configured LLM to analyze text, validate the result, and trigger if
434434
confidence exceeds the provided threshold.
435435
436-
When `include_reasoning=True` in the config, the guardrail will automatically
437-
use an extended output model with a `reason` field. When `include_reasoning=False`,
438-
it uses the base `LLMOutput` model (only `flagged` and `confidence` fields).
436+
When a custom `output_model` is provided, it will always be used regardless of
437+
`include_reasoning`. When no custom model is provided, `include_reasoning` controls
438+
whether to use `LLMReasoningOutput` (with reason field) or `LLMOutput` (base model).
439439
440440
Args:
441441
name (str): Name under which to register the guardrail.
442442
description (str): Short explanation of the guardrail's logic.
443443
system_prompt (str): Prompt passed to the LLM to control analysis.
444444
output_model (type[LLMOutput] | None): Custom schema for parsing the LLM output.
445-
If None (default), uses `LLMReasoningOutput` when reasoning is enabled.
446-
Provide a custom model only if you need additional fields beyond `reason`.
445+
If provided, this model will always be used. If None (default), the model
446+
selection is controlled by `include_reasoning` in the config.
447447
config_model (type[LLMConfig]): Configuration schema for the check_fn.
448448
449449
Returns:
450450
CheckFn[GuardrailLLMContextProto, str, TLLMCfg]: Async check function
451451
to be registered as a guardrail.
452452
"""
453-
# Default to LLMReasoningOutput if no custom model provided
454-
extended_output_model = output_model or LLMReasoningOutput
453+
# Store the custom output model if provided
454+
custom_output_model = output_model
455455

456456
async def guardrail_func(
457457
ctx: GuardrailLLMContextProto,
@@ -473,9 +473,14 @@ async def guardrail_func(
473473
else:
474474
rendered_system_prompt = system_prompt
475475

476-
# Use base LLMOutput if reasoning is disabled, otherwise use the extended model
477-
include_reasoning = getattr(config, "include_reasoning", False)
478-
selected_output_model = extended_output_model if include_reasoning else LLMOutput
476+
# Determine output model: custom model takes precedence, otherwise use include_reasoning
477+
if custom_output_model is not None:
478+
# Always use the custom model if provided
479+
selected_output_model = custom_output_model
480+
else:
481+
# No custom model: use include_reasoning to decide
482+
include_reasoning = getattr(config, "include_reasoning", False)
483+
selected_output_model = LLMReasoningOutput if include_reasoning else LLMOutput
479484

480485
analysis, token_usage = await run_llm(
481486
data,

tests/unit/checks/test_llm_base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -255,7 +255,7 @@ async def fake_run_llm(
255255
system_prompt="Test prompt",
256256
)
257257

258-
# Test with include_reasoning=True (default)
258+
# Test with include_reasoning=True explicitly enabled
259259
config = LLMConfig(model="gpt-test", confidence_threshold=0.5, include_reasoning=True)
260260
context = SimpleNamespace(guardrail_llm="fake-client")
261261
result = await guardrail_fn(context, "test", config)

0 commit comments

Comments
 (0)