diff --git a/src/Infrastructure/BotSharp.Abstraction/MLTasks/Settings/LlmModelSetting.cs b/src/Infrastructure/BotSharp.Abstraction/MLTasks/Settings/LlmModelSetting.cs index 33e7a53f4..7078e12d4 100644 --- a/src/Infrastructure/BotSharp.Abstraction/MLTasks/Settings/LlmModelSetting.cs +++ b/src/Infrastructure/BotSharp.Abstraction/MLTasks/Settings/LlmModelSetting.cs @@ -85,8 +85,11 @@ public class EmbeddingSetting #region Reasoning model settings public class ReasoningSetting { - public float Temperature { get; set; } = 1.0f; + public float? Temperature { get; set; } + + [Obsolete("Set EffortLevel in Parameters")] public string? EffortLevel { get; set; } + public Dictionary? Parameters { get; set; } } #endregion diff --git a/src/Plugins/BotSharp.Plugin.OpenAI/Providers/Chat/ChatCompletionProvider.cs b/src/Plugins/BotSharp.Plugin.OpenAI/Providers/Chat/ChatCompletionProvider.cs index 195a1e803..256c56f67 100644 --- a/src/Plugins/BotSharp.Plugin.OpenAI/Providers/Chat/ChatCompletionProvider.cs +++ b/src/Plugins/BotSharp.Plugin.OpenAI/Providers/Chat/ChatCompletionProvider.cs @@ -11,6 +11,7 @@ public class ChatCompletionProvider : IChatCompletion protected readonly OpenAiSettings _settings; protected readonly IServiceProvider _services; protected readonly ILogger _logger; + protected readonly IConversationStateService _state; protected string _model; private List renderedInstructions = []; @@ -21,11 +22,13 @@ public class ChatCompletionProvider : IChatCompletion public ChatCompletionProvider( OpenAiSettings settings, ILogger logger, - IServiceProvider services) + IServiceProvider services, + IConversationStateService state) { _settings = settings; _logger = logger; _services = services; + _state = state; } public async Task GetChatCompletions(Agent agent, List conversations) @@ -351,7 +354,6 @@ public async Task GetChatCompletionsStreamingAsync(Agent agent, protected (string, IEnumerable, ChatCompletionOptions) PrepareOptions(Agent agent, List conversations) { var agentService = _services.GetRequiredService(); - var state = _services.GetRequiredService(); var settingsService = _services.GetRequiredService(); var settings = settingsService.GetSetting(Provider, _model); var allowMultiModal = settings != null && settings.MultiModal; @@ -409,7 +411,7 @@ public async Task GetChatCompletionsStreamingAsync(Agent agent, var imageDetailLevel = ChatImageDetailLevel.Auto; if (allowMultiModal) { - imageDetailLevel = ParseChatImageDetailLevel(state.GetState("chat_image_detail_level")); + imageDetailLevel = ParseChatImageDetailLevel(_state.GetState("chat_image_detail_level")); } foreach (var message in filteredMessages) @@ -549,20 +551,15 @@ private string GetPrompt(IEnumerable messages, ChatCompletionOption private ChatCompletionOptions InitChatCompletionOption(Agent agent) { - var state = _services.GetRequiredService(); var settingsService = _services.GetRequiredService(); var settings = settingsService.GetSetting(Provider, _model); - // Reasoning effort - ChatReasoningEffortLevel? reasoningEffortLevel = null; - float? temperature = float.Parse(state.GetState("temperature", "0.0")); - if (settings?.Reasoning != null) - { - temperature = settings.Reasoning.Temperature; - var level = state.GetState("reasoning_effort_level") - .IfNullOrEmptyAs(agent?.LlmConfig?.ReasoningEffortLevel) - .IfNullOrEmptyAs(settings?.Reasoning?.EffortLevel); - reasoningEffortLevel = ParseReasoningEffortLevel(level); + // Reasoning + float? temperature = float.Parse(_state.GetState("temperature", "0.0")); + var (reasoningTemp, reasoningEffortLevel) = ParseReasoning(settings?.Reasoning, agent); + if (reasoningTemp.HasValue) + { + temperature = reasoningTemp.Value; } // Web search @@ -574,7 +571,7 @@ private ChatCompletionOptions InitChatCompletionOption(Agent agent) webSearchOptions = new(); } - var maxTokens = int.TryParse(state.GetState("max_tokens"), out var tokens) + var maxTokens = int.TryParse(_state.GetState("max_tokens"), out var tokens) ? tokens : agent.LlmConfig?.MaxOutputTokens ?? LlmConstant.DEFAULT_MAX_OUTPUT_TOKEN; @@ -587,6 +584,47 @@ private ChatCompletionOptions InitChatCompletionOption(Agent agent) }; } + /// + /// Parse reasoning setting: returns (temperature, reasoning effort level) + /// + /// + /// + private (float?, ChatReasoningEffortLevel?) ParseReasoning( + ReasoningSetting? settings, + Agent agent) + { + float? temperature = null; + ChatReasoningEffortLevel? reasoningEffortLevel = null; + + if (settings == null) + { + return (temperature, reasoningEffortLevel); + } + + if (settings.Temperature.HasValue) + { + temperature = settings.Temperature; + } + + + var defaultLevel = settings?.EffortLevel; + + if (settings?.Parameters != null + && settings.Parameters.TryGetValue("EffortLevel", out var settingValue) + && !string.IsNullOrEmpty(settingValue?.Default)) + { + defaultLevel = settingValue.Default; + } + + var level = _state.GetState("reasoning_effort_level") + .IfNullOrEmptyAs(agent?.LlmConfig?.ReasoningEffortLevel) + .IfNullOrEmptyAs(defaultLevel); + + reasoningEffortLevel = ParseReasoningEffortLevel(level); + + return (temperature, reasoningEffortLevel); + } + private ChatReasoningEffortLevel? ParseReasoningEffortLevel(string? level) { if (string.IsNullOrWhiteSpace(level)) @@ -594,9 +632,13 @@ private ChatCompletionOptions InitChatCompletionOption(Agent agent) return null; } - var effortLevel = new ChatReasoningEffortLevel("minimal"); - switch (level.ToLower()) + var effortLevel = new ChatReasoningEffortLevel("low"); + level = level.ToLower(); + switch (level) { + case "minimal": + effortLevel = ChatReasoningEffortLevel.Minimal; + break; case "low": effortLevel = ChatReasoningEffortLevel.Low; break; @@ -606,7 +648,12 @@ private ChatCompletionOptions InitChatCompletionOption(Agent agent) case "high": effortLevel = ChatReasoningEffortLevel.High; break; + case "none": + case "xhigh": + effortLevel = new ChatReasoningEffortLevel(level); + break; default: + effortLevel = new ChatReasoningEffortLevel(level); break; } diff --git a/src/WebStarter/appsettings.json b/src/WebStarter/appsettings.json index 03c42e746..e0410d530 100644 --- a/src/WebStarter/appsettings.json +++ b/src/WebStarter/appsettings.json @@ -359,7 +359,7 @@ "CachedAudioInputCost": 0, "TextOutputCost": 0.03, "AudioOutputCost": 0, - "ImageInputCost": 0.01, + "ImageInputCost": 0.01, "CachedImageInputCost": 0.0025, "ImageOutputCost": 0.04, "ImageCosts": [ @@ -477,6 +477,93 @@ "TextOutputCost": 0.02, "AudioOutputCost": 0.08 } + }, + { + "Id": "gpt-5", + "Name": "gpt-5", + "Version": "gpt-5", + "ApiKey": "", + "Type": "chat", + "MultiModal": true, + "Capabilities": [ + "Chat", + "ImageReading" + ], + "Reasoning": { + "Temperature": 1.0, + "Parameters": { + "EffortLevel": { + "Default": "minimal", + "Options": [ "minimal", "low", "medium", "high" ] + } + } + }, + "Cost": { + "TextInputCost": 0.005, + "CachedTextInputCost": 0.0025, + "AudioInputCost": 0.04, + "CachedAudioInputCost": 0.0025, + "TextOutputCost": 0.02, + "AudioOutputCost": 0.08 + } + }, + { + "Id": "gpt-5", + "Name": "gpt-5.1", + "Version": "gpt-5.1", + "ApiKey": "o", + "Type": "chat", + "MultiModal": true, + "Capabilities": [ + "Chat", + "ImageReading" + ], + "Reasoning": { + "Temperature": 1.0, + "Parameters": { + "EffortLevel": { + "Default": "low", + "Options": [ "none", "low", "medium", "high" ] + } + } + }, + "Cost": { + "TextInputCost": 0.005, + "CachedTextInputCost": 0.0025, + "AudioInputCost": 0.04, + "CachedAudioInputCost": 0.0025, + "TextOutputCost": 0.02, + "AudioOutputCost": 0.08 + } + }, + { + "Id": "gpt-5", + "Name": "gpt-5.2", + "Version": "gpt-5.2", + "ApiKey": "", + "Type": "chat", + "MultiModal": true, + "Capabilities": [ + "Chat", + "ImageReading" + ], + "Reasoning": { + "Temperature": 1, + "Parameters": { + "EffortLevel": { + "Default": "low", + "Options": [ "none", "low", "medium", "high", "xhigh" ] + } + } + }, + "Cost": { + "TextInputCost": 0.005, + "CachedTextInputCost": 0.0025, + "AudioInputCost": 0.04, + "CachedAudioInputCost": 0.0025, + "TextOutputCost": 0.02, + "AudioOutputCost": 0.08 + } } ] },