-
-
- {t("settings:providers.lmStudio.draftModelId")}
-
-
-
- {t("settings:providers.lmStudio.draftModelDesc")}
-
- {draftModelNotAvailable && (
-
-
-
-
- {t("settings:validation.modelAvailability", {
- modelId: apiConfiguration?.lmStudioDraftModelId,
- })}
-
-
-
- )}
+
+
+ {t("settings:providers.lmStudio.draftModelDesc")}
- {Object.keys(lmStudioModels).length > 0 && (
- <>
-
{t("settings:providers.lmStudio.selectDraftModel")}
-
- {Object.keys(lmStudioModels).map((model) => (
-
- {model}
-
- ))}
-
- {Object.keys(lmStudioModels).length === 0 && (
-
- {t("settings:providers.lmStudio.noModelsFound")}
-
- )}
- >
- )}
>
)}
diff --git a/webview-ui/src/components/settings/providers/Ollama.tsx b/webview-ui/src/components/settings/providers/Ollama.tsx
index 615d3be4098..fa64fd2b010 100644
--- a/webview-ui/src/components/settings/providers/Ollama.tsx
+++ b/webview-ui/src/components/settings/providers/Ollama.tsx
@@ -1,6 +1,6 @@
import { useState, useCallback, useMemo, useEffect } from "react"
import { useEvent } from "react-use"
-import { VSCodeTextField, VSCodeRadioGroup, VSCodeRadio } from "@vscode/webview-ui-toolkit/react"
+import { VSCodeTextField } from "@vscode/webview-ui-toolkit/react"
import type { ProviderSettings } from "@roo-code/types"
@@ -11,6 +11,7 @@ import { useRouterModels } from "@src/components/ui/hooks/useRouterModels"
import { vscode } from "@src/utils/vscode"
import { inputEventTransform } from "../transforms"
+import { ModelPicker } from "../ModelPicker"
import { ModelRecord } from "@roo/api"
type OllamaProps = {
@@ -57,25 +58,27 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro
}, [])
// Check if the selected model exists in the fetched models
- const modelNotAvailable = useMemo(() => {
+ const modelNotAvailableError = useMemo(() => {
const selectedModel = apiConfiguration?.ollamaModelId
- if (!selectedModel) return false
+ if (!selectedModel) return undefined
// Check if model exists in local ollama models
if (Object.keys(ollamaModels).length > 0 && selectedModel in ollamaModels) {
- return false // Model is available locally
+ return undefined // Model is available locally
}
// If we have router models data for Ollama
if (routerModels.data?.ollama) {
const availableModels = Object.keys(routerModels.data.ollama)
// Show warning if model is not in the list (regardless of how many models there are)
- return !availableModels.includes(selectedModel)
+ if (!availableModels.includes(selectedModel)) {
+ return t("settings:validation.modelAvailability", { modelId: selectedModel })
+ }
}
// If neither source has loaded yet, don't show warning
- return false
- }, [apiConfiguration?.ollamaModelId, routerModels.data, ollamaModels])
+ return undefined
+ }, [apiConfiguration?.ollamaModelId, routerModels.data, ollamaModels, t])
return (
<>
@@ -100,40 +103,21 @@ export const Ollama = ({ apiConfiguration, setApiConfigurationField }: OllamaPro
)}
-
- {t("settings:providers.ollama.modelId")}
-
- {modelNotAvailable && (
-
-
-
-
- {t("settings:validation.modelAvailability", { modelId: apiConfiguration?.ollamaModelId })}
-
-
-
- )}
- {Object.keys(ollamaModels).length > 0 && (
-
- {Object.keys(ollamaModels).map((model) => (
-
- {model}
-
- ))}
-
- )}
+
{
- const value = e.target?.value
+ onInput={(e) => {
+ const value = (e.target as HTMLInputElement)?.value
if (value === "") {
setApiConfigurationField("ollamaNumCtx", undefined)
} else {
diff --git a/webview-ui/src/components/settings/providers/VSCodeLM.tsx b/webview-ui/src/components/settings/providers/VSCodeLM.tsx
index a2097badf61..73d2bdd22fc 100644
--- a/webview-ui/src/components/settings/providers/VSCodeLM.tsx
+++ b/webview-ui/src/components/settings/providers/VSCodeLM.tsx
@@ -1,15 +1,14 @@
-import { useState, useCallback } from "react"
+import { useState, useCallback, useMemo } from "react"
import { useEvent } from "react-use"
import { LanguageModelChatSelector } from "vscode"
-import type { ProviderSettings } from "@roo-code/types"
+import type { ProviderSettings, ModelInfo } from "@roo-code/types"
import { ExtensionMessage } from "@roo/ExtensionMessage"
import { useAppTranslation } from "@src/i18n/TranslationContext"
-import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@src/components/ui"
-import { inputEventTransform } from "../transforms"
+import { ModelPicker } from "../ModelPicker"
type VSCodeLMProps = {
apiConfiguration: ProviderSettings
@@ -21,17 +20,6 @@ export const VSCodeLM = ({ apiConfiguration, setApiConfigurationField }: VSCodeL
const [vsCodeLmModels, setVsCodeLmModels] = useState([])
- const handleInputChange = useCallback(
- (
- field: K,
- transform: (event: E) => ProviderSettings[K] = inputEventTransform,
- ) =>
- (event: E | Event) => {
- setApiConfigurationField(field, transform(event as E))
- },
- [setApiConfigurationField],
- )
-
const onMessage = useCallback((event: MessageEvent) => {
const message: ExtensionMessage = event.data
@@ -47,40 +35,59 @@ export const VSCodeLM = ({ apiConfiguration, setApiConfigurationField }: VSCodeL
useEvent("message", onMessage)
+ // Convert VSCode LM models array to Record format for ModelPicker
+ const modelsRecord = useMemo((): Record => {
+ return vsCodeLmModels.reduce(
+ (acc, model) => {
+ const modelId = `${model.vendor}/${model.family}`
+ acc[modelId] = {
+ maxTokens: 0,
+ contextWindow: 0,
+ supportsPromptCache: false,
+ description: `${model.vendor} - ${model.family}`,
+ }
+ return acc
+ },
+ {} as Record,
+ )
+ }, [vsCodeLmModels])
+
+ // Transform string model ID to { vendor, family } object for storage
+ const valueTransform = useCallback((modelId: string) => {
+ const [vendor, family] = modelId.split("/")
+ return { vendor, family }
+ }, [])
+
+ // Transform stored { vendor, family } object back to display string
+ const displayTransform = useCallback((value: unknown) => {
+ if (!value) return ""
+ const selector = value as { vendor?: string; family?: string }
+ return selector.vendor && selector.family ? `${selector.vendor}/${selector.family}` : ""
+ }, [])
+
return (
<>
-
-
{t("settings:providers.vscodeLmModel")}
- {vsCodeLmModels.length > 0 ? (
-
{
- const [vendor, family] = value.split("/")
- return { vendor, family }
- })}>
-
-
-
-
- {vsCodeLmModels.map((model) => (
-
- {`${model.vendor} - ${model.family}`}
-
- ))}
-
-
- ) : (
+ {vsCodeLmModels.length > 0 ? (
+
+ ) : (
+
+
{t("settings:providers.vscodeLmModel")}
{t("settings:providers.vscodeLmDescription")}
- )}
-
+
+ )}
{t("settings:providers.vscodeLmWarning")}
>
)
diff --git a/webview-ui/src/components/settings/utils/__tests__/providerModelConfig.spec.ts b/webview-ui/src/components/settings/utils/__tests__/providerModelConfig.spec.ts
new file mode 100644
index 00000000000..931796d2b02
--- /dev/null
+++ b/webview-ui/src/components/settings/utils/__tests__/providerModelConfig.spec.ts
@@ -0,0 +1,155 @@
+import {
+ PROVIDER_SERVICE_CONFIG,
+ PROVIDER_DEFAULT_MODEL_IDS,
+ getProviderServiceConfig,
+ getDefaultModelIdForProvider,
+ getStaticModelsForProvider,
+ isStaticModelProvider,
+ PROVIDERS_WITH_CUSTOM_MODEL_UI,
+ shouldUseGenericModelPicker,
+} from "../providerModelConfig"
+
+describe("providerModelConfig", () => {
+ describe("PROVIDER_SERVICE_CONFIG", () => {
+ it("contains service config for anthropic", () => {
+ expect(PROVIDER_SERVICE_CONFIG.anthropic).toEqual({
+ serviceName: "Anthropic",
+ serviceUrl: "https://console.anthropic.com",
+ })
+ })
+
+ it("contains service config for bedrock", () => {
+ expect(PROVIDER_SERVICE_CONFIG.bedrock).toEqual({
+ serviceName: "Amazon Bedrock",
+ serviceUrl: "https://aws.amazon.com/bedrock",
+ })
+ })
+
+ it("contains service config for ollama", () => {
+ expect(PROVIDER_SERVICE_CONFIG.ollama).toEqual({
+ serviceName: "Ollama",
+ serviceUrl: "https://ollama.ai",
+ })
+ })
+
+ it("contains service config for lmstudio", () => {
+ expect(PROVIDER_SERVICE_CONFIG.lmstudio).toEqual({
+ serviceName: "LM Studio",
+ serviceUrl: "https://lmstudio.ai/docs",
+ })
+ })
+
+ it("contains service config for vscode-lm", () => {
+ expect(PROVIDER_SERVICE_CONFIG["vscode-lm"]).toEqual({
+ serviceName: "VS Code LM",
+ serviceUrl: "https://code.visualstudio.com/api/extension-guides/language-model",
+ })
+ })
+ })
+
+ describe("getProviderServiceConfig", () => {
+ it("returns correct config for known provider", () => {
+ const config = getProviderServiceConfig("gemini")
+ expect(config.serviceName).toBe("Google Gemini")
+ expect(config.serviceUrl).toBe("https://ai.google.dev")
+ })
+
+ it("returns fallback config for unknown provider", () => {
+ const config = getProviderServiceConfig("unknown-provider" as any)
+ expect(config.serviceName).toBe("unknown-provider")
+ expect(config.serviceUrl).toBe("")
+ })
+ })
+
+ describe("PROVIDER_DEFAULT_MODEL_IDS", () => {
+ it("contains default model IDs for static providers", () => {
+ expect(PROVIDER_DEFAULT_MODEL_IDS.anthropic).toBeDefined()
+ expect(PROVIDER_DEFAULT_MODEL_IDS.bedrock).toBeDefined()
+ expect(PROVIDER_DEFAULT_MODEL_IDS.gemini).toBeDefined()
+ expect(PROVIDER_DEFAULT_MODEL_IDS["openai-native"]).toBeDefined()
+ })
+ })
+
+ describe("getDefaultModelIdForProvider", () => {
+ it("returns default model ID for known provider", () => {
+ const defaultId = getDefaultModelIdForProvider("anthropic")
+ expect(defaultId).toBeDefined()
+ expect(typeof defaultId).toBe("string")
+ expect(defaultId.length).toBeGreaterThan(0)
+ })
+
+ it("returns empty string for unknown provider", () => {
+ const defaultId = getDefaultModelIdForProvider("unknown" as any)
+ expect(defaultId).toBe("")
+ })
+ })
+
+ describe("getStaticModelsForProvider", () => {
+ it("returns models for anthropic provider", () => {
+ const models = getStaticModelsForProvider("anthropic")
+ expect(Object.keys(models).length).toBeGreaterThan(0)
+ })
+
+ it("adds custom-arn option for bedrock provider", () => {
+ const models = getStaticModelsForProvider("bedrock", "Use Custom ARN")
+ expect(models["custom-arn"]).toBeDefined()
+ expect(models["custom-arn"].description).toBe("Use Custom ARN")
+ })
+
+ it("returns empty object for providers without static models", () => {
+ const models = getStaticModelsForProvider("openrouter")
+ expect(Object.keys(models).length).toBe(0)
+ })
+ })
+
+ describe("isStaticModelProvider", () => {
+ it("returns true for providers with static models", () => {
+ expect(isStaticModelProvider("anthropic")).toBe(true)
+ expect(isStaticModelProvider("bedrock")).toBe(true)
+ expect(isStaticModelProvider("gemini")).toBe(true)
+ expect(isStaticModelProvider("openai-native")).toBe(true)
+ })
+
+ it("returns false for providers without static models", () => {
+ expect(isStaticModelProvider("openrouter")).toBe(false)
+ expect(isStaticModelProvider("ollama")).toBe(false)
+ expect(isStaticModelProvider("lmstudio")).toBe(false)
+ })
+ })
+
+ describe("PROVIDERS_WITH_CUSTOM_MODEL_UI", () => {
+ it("includes providers that have their own model selection UI", () => {
+ expect(PROVIDERS_WITH_CUSTOM_MODEL_UI).toContain("openrouter")
+ expect(PROVIDERS_WITH_CUSTOM_MODEL_UI).toContain("ollama")
+ expect(PROVIDERS_WITH_CUSTOM_MODEL_UI).toContain("lmstudio")
+ expect(PROVIDERS_WITH_CUSTOM_MODEL_UI).toContain("vscode-lm")
+ expect(PROVIDERS_WITH_CUSTOM_MODEL_UI).toContain("claude-code")
+ })
+
+ it("does not include static providers using generic picker", () => {
+ expect(PROVIDERS_WITH_CUSTOM_MODEL_UI).not.toContain("anthropic")
+ expect(PROVIDERS_WITH_CUSTOM_MODEL_UI).not.toContain("gemini")
+ expect(PROVIDERS_WITH_CUSTOM_MODEL_UI).not.toContain("bedrock")
+ })
+ })
+
+ describe("shouldUseGenericModelPicker", () => {
+ it("returns true for static providers without custom UI", () => {
+ expect(shouldUseGenericModelPicker("anthropic")).toBe(true)
+ expect(shouldUseGenericModelPicker("bedrock")).toBe(true)
+ expect(shouldUseGenericModelPicker("gemini")).toBe(true)
+ expect(shouldUseGenericModelPicker("deepseek")).toBe(true)
+ })
+
+ it("returns false for providers with custom model UI", () => {
+ expect(shouldUseGenericModelPicker("openrouter")).toBe(false)
+ expect(shouldUseGenericModelPicker("ollama")).toBe(false)
+ expect(shouldUseGenericModelPicker("lmstudio")).toBe(false)
+ expect(shouldUseGenericModelPicker("vscode-lm")).toBe(false)
+ })
+
+ it("returns false for providers without static models", () => {
+ expect(shouldUseGenericModelPicker("openai")).toBe(false)
+ })
+ })
+})
diff --git a/webview-ui/src/components/settings/utils/providerModelConfig.ts b/webview-ui/src/components/settings/utils/providerModelConfig.ts
new file mode 100644
index 00000000000..e71081a7a1a
--- /dev/null
+++ b/webview-ui/src/components/settings/utils/providerModelConfig.ts
@@ -0,0 +1,146 @@
+import type { ProviderName, ModelInfo } from "@roo-code/types"
+import {
+ anthropicDefaultModelId,
+ bedrockDefaultModelId,
+ cerebrasDefaultModelId,
+ deepSeekDefaultModelId,
+ doubaoDefaultModelId,
+ moonshotDefaultModelId,
+ geminiDefaultModelId,
+ mistralDefaultModelId,
+ openAiNativeDefaultModelId,
+ qwenCodeDefaultModelId,
+ vertexDefaultModelId,
+ xaiDefaultModelId,
+ groqDefaultModelId,
+ sambaNovaDefaultModelId,
+ internationalZAiDefaultModelId,
+ fireworksDefaultModelId,
+ featherlessDefaultModelId,
+ minimaxDefaultModelId,
+ basetenDefaultModelId,
+} from "@roo-code/types"
+
+import { MODELS_BY_PROVIDER } from "../constants"
+
+export interface ProviderServiceConfig {
+ serviceName: string
+ serviceUrl: string
+}
+
+export const PROVIDER_SERVICE_CONFIG: Partial> = {
+ anthropic: { serviceName: "Anthropic", serviceUrl: "https://console.anthropic.com" },
+ bedrock: { serviceName: "Amazon Bedrock", serviceUrl: "https://aws.amazon.com/bedrock" },
+ cerebras: { serviceName: "Cerebras", serviceUrl: "https://cerebras.ai" },
+ deepseek: { serviceName: "DeepSeek", serviceUrl: "https://platform.deepseek.com" },
+ doubao: { serviceName: "Doubao", serviceUrl: "https://www.volcengine.com/product/doubao" },
+ moonshot: { serviceName: "Moonshot", serviceUrl: "https://platform.moonshot.cn" },
+ gemini: { serviceName: "Google Gemini", serviceUrl: "https://ai.google.dev" },
+ mistral: { serviceName: "Mistral", serviceUrl: "https://console.mistral.ai" },
+ "openai-native": { serviceName: "OpenAI", serviceUrl: "https://platform.openai.com" },
+ "qwen-code": { serviceName: "Qwen Code", serviceUrl: "https://dashscope.console.aliyun.com" },
+ vertex: { serviceName: "GCP Vertex AI", serviceUrl: "https://console.cloud.google.com/vertex-ai" },
+ xai: { serviceName: "xAI", serviceUrl: "https://x.ai" },
+ groq: { serviceName: "Groq", serviceUrl: "https://console.groq.com" },
+ sambanova: { serviceName: "SambaNova", serviceUrl: "https://sambanova.ai" },
+ zai: { serviceName: "Z.ai", serviceUrl: "https://z.ai" },
+ fireworks: { serviceName: "Fireworks AI", serviceUrl: "https://fireworks.ai" },
+ featherless: { serviceName: "Featherless AI", serviceUrl: "https://featherless.ai" },
+ minimax: { serviceName: "MiniMax", serviceUrl: "https://minimax.chat" },
+ baseten: { serviceName: "Baseten", serviceUrl: "https://baseten.co" },
+ ollama: { serviceName: "Ollama", serviceUrl: "https://ollama.ai" },
+ lmstudio: { serviceName: "LM Studio", serviceUrl: "https://lmstudio.ai/docs" },
+ "vscode-lm": {
+ serviceName: "VS Code LM",
+ serviceUrl: "https://code.visualstudio.com/api/extension-guides/language-model",
+ },
+}
+
+export const PROVIDER_DEFAULT_MODEL_IDS: Partial> = {
+ anthropic: anthropicDefaultModelId,
+ bedrock: bedrockDefaultModelId,
+ cerebras: cerebrasDefaultModelId,
+ deepseek: deepSeekDefaultModelId,
+ doubao: doubaoDefaultModelId,
+ moonshot: moonshotDefaultModelId,
+ gemini: geminiDefaultModelId,
+ mistral: mistralDefaultModelId,
+ "openai-native": openAiNativeDefaultModelId,
+ "qwen-code": qwenCodeDefaultModelId,
+ vertex: vertexDefaultModelId,
+ xai: xaiDefaultModelId,
+ groq: groqDefaultModelId,
+ sambanova: sambaNovaDefaultModelId,
+ zai: internationalZAiDefaultModelId,
+ fireworks: fireworksDefaultModelId,
+ featherless: featherlessDefaultModelId,
+ minimax: minimaxDefaultModelId,
+ baseten: basetenDefaultModelId,
+}
+
+export const getProviderServiceConfig = (provider: ProviderName): ProviderServiceConfig => {
+ return PROVIDER_SERVICE_CONFIG[provider] ?? { serviceName: provider, serviceUrl: "" }
+}
+
+export const getDefaultModelIdForProvider = (provider: ProviderName): string => {
+ return PROVIDER_DEFAULT_MODEL_IDS[provider] ?? ""
+}
+
+export const getStaticModelsForProvider = (
+ provider: ProviderName,
+ customArnLabel?: string,
+): Record => {
+ const models = MODELS_BY_PROVIDER[provider] ?? {}
+
+ // Add custom-arn option for Bedrock
+ if (provider === "bedrock") {
+ return {
+ ...models,
+ "custom-arn": {
+ maxTokens: 0,
+ contextWindow: 0,
+ supportsPromptCache: false,
+ description: customArnLabel ?? "Use Custom ARN",
+ },
+ }
+ }
+
+ return models
+}
+
+/**
+ * Checks if a provider uses static models from MODELS_BY_PROVIDER
+ */
+export const isStaticModelProvider = (provider: ProviderName): boolean => {
+ return provider in MODELS_BY_PROVIDER
+}
+
+/**
+ * List of providers that have their own custom model selection UI
+ * and should not use the generic ModelPicker in ApiOptions
+ */
+export const PROVIDERS_WITH_CUSTOM_MODEL_UI: ProviderName[] = [
+ "openrouter",
+ "requesty",
+ "unbound",
+ "deepinfra",
+ "claude-code",
+ "openai", // OpenAI Compatible
+ "litellm",
+ "io-intelligence",
+ "vercel-ai-gateway",
+ "roo",
+ "chutes",
+ "ollama",
+ "lmstudio",
+ "vscode-lm",
+ "huggingface",
+ "human-relay",
+]
+
+/**
+ * Checks if a provider should use the generic ModelPicker
+ */
+export const shouldUseGenericModelPicker = (provider: ProviderName): boolean => {
+ return isStaticModelProvider(provider) && !PROVIDERS_WITH_CUSTOM_MODEL_UI.includes(provider)
+}