@@ -89,8 +89,8 @@ class LLMConfig(BaseModel):
8989 confidence_threshold (float): Minimum confidence required to trigger the guardrail,
9090 as a float between 0.0 and 1.0.
9191 include_reasoning (bool): Whether to include reasoning/explanation in guardrail
92- output. Useful for development and debugging, but can be disabled in production
93- to save tokens. Defaults to True .
92+ output. Useful for development and debugging, but disabled by default in production
93+ to save tokens. Defaults to False .
9494 """
9595
9696 model : str = Field (..., description = "LLM model to use for checking the text" )
@@ -433,25 +433,25 @@ def create_llm_check_fn(
433433 use the configured LLM to analyze text, validate the result, and trigger if
434434 confidence exceeds the provided threshold.
435435
436- When `include_reasoning=True` in the config, the guardrail will automatically
437- use an extended output model with a `reason` field. When ` include_reasoning=False`,
438- it uses the base `LLMOutput` model (only `flagged` and `confidence` fields ).
436+ When a custom `output_model` is provided, it will always be used regardless of
437+ `include_reasoning`. When no custom model is provided, ` include_reasoning` controls
438+ whether to use `LLMReasoningOutput` (with reason field) or `LLMOutput` (base model ).
439439
440440 Args:
441441 name (str): Name under which to register the guardrail.
442442 description (str): Short explanation of the guardrail's logic.
443443 system_prompt (str): Prompt passed to the LLM to control analysis.
444444 output_model (type[LLMOutput] | None): Custom schema for parsing the LLM output.
445- If None (default), uses `LLMReasoningOutput` when reasoning is enabled.
446- Provide a custom model only if you need additional fields beyond `reason` .
445+ If provided, this model will always be used. If None (default), the model
446+ selection is controlled by `include_reasoning` in the config .
447447 config_model (type[LLMConfig]): Configuration schema for the check_fn.
448448
449449 Returns:
450450 CheckFn[GuardrailLLMContextProto, str, TLLMCfg]: Async check function
451451 to be registered as a guardrail.
452452 """
453- # Default to LLMReasoningOutput if no custom model provided
454- extended_output_model = output_model or LLMReasoningOutput
453+ # Store the custom output model if provided
454+ custom_output_model = output_model
455455
456456 async def guardrail_func (
457457 ctx : GuardrailLLMContextProto ,
@@ -473,9 +473,14 @@ async def guardrail_func(
473473 else :
474474 rendered_system_prompt = system_prompt
475475
476- # Use base LLMOutput if reasoning is disabled, otherwise use the extended model
477- include_reasoning = getattr (config , "include_reasoning" , False )
478- selected_output_model = extended_output_model if include_reasoning else LLMOutput
476+ # Determine output model: custom model takes precedence, otherwise use include_reasoning
477+ if custom_output_model is not None :
478+ # Always use the custom model if provided
479+ selected_output_model = custom_output_model
480+ else :
481+ # No custom model: use include_reasoning to decide
482+ include_reasoning = getattr (config , "include_reasoning" , False )
483+ selected_output_model = LLMReasoningOutput if include_reasoning else LLMOutput
479484
480485 analysis , token_usage = await run_llm (
481486 data ,
0 commit comments