+ )}
>
)
}
diff --git a/webview-ui/src/i18n/TranslationContext.tsx b/webview-ui/src/i18n/TranslationContext.tsx
index 17f15450654..8412b1ddb65 100644
--- a/webview-ui/src/i18n/TranslationContext.tsx
+++ b/webview-ui/src/i18n/TranslationContext.tsx
@@ -35,7 +35,28 @@ export const TranslationProvider: React.FC<{ children: ReactNode }> = ({ childre
// Memoize the translation function to prevent unnecessary re-renders
const translate = useCallback(
(key: string, options?: Record) => {
- return i18n.t(key, options)
+ const result = i18n.t(key, options)
+ // Safeguard: ensure we always return a string, not an object
+ // This handles cases where plural objects might not be resolved correctly
+ if (typeof result === "object" && result !== null) {
+ // Type guard for plural object
+ const pluralResult = result as Record
+ // If it's a plural object and we have a count, try to resolve it
+ if (options?.count !== undefined && "one" in pluralResult && "other" in pluralResult) {
+ const count = options.count
+ // Use i18next's pluralization logic
+ if (count === 1 && typeof pluralResult.one === "string") {
+ return pluralResult.one
+ }
+ if (typeof pluralResult.other === "string") {
+ return pluralResult.other
+ }
+ }
+ // Fallback: return the key if we can't resolve it
+ console.warn(`Translation key "${key}" returned an object instead of string:`, result)
+ return key
+ }
+ return result as string
},
[i18n],
)
diff --git a/webview-ui/src/i18n/__tests__/ollama-coverage.spec.ts b/webview-ui/src/i18n/__tests__/ollama-coverage.spec.ts
new file mode 100644
index 00000000000..b26c194ad5d
--- /dev/null
+++ b/webview-ui/src/i18n/__tests__/ollama-coverage.spec.ts
@@ -0,0 +1,190 @@
+import { describe, it, expect } from "vitest"
+import fs from "fs"
+import path from "path"
+import { fileURLToPath } from "url"
+
+// Get directory path for ES modules
+const __filename = fileURLToPath(import.meta.url)
+const __dirname = path.dirname(__filename)
+
+// Import English as the reference (source of truth)
+import enSettings from "../locales/en/settings.json"
+
+// All target locales (17 non-English locales)
+const locales = [
+ "ca", // Catalan
+ "de", // German
+ "es", // Spanish
+ "fr", // French
+ "hi", // Hindi
+ "id", // Indonesian
+ "it", // Italian
+ "ja", // Japanese
+ "ko", // Korean
+ "nl", // Dutch
+ "pl", // Polish
+ "pt-BR", // Portuguese (Brazil)
+ "ru", // Russian
+ "tr", // Turkish
+ "vi", // Vietnamese
+ "zh-CN", // Chinese (Simplified)
+ "zh-TW", // Chinese (Traditional)
+]
+
+describe("Ollama i18n Coverage", () => {
+ // Get all keys from English (source of truth)
+ const enOllamaKeys = Object.keys(enSettings.providers.ollama)
+
+ locales.forEach((locale) => {
+ it(`should have all Ollama keys for ${locale}`, () => {
+ const filePath = path.join(__dirname, `../locales/${locale}/settings.json`)
+ const fileContent = fs.readFileSync(filePath, "utf-8")
+ const localeSettings = JSON.parse(fileContent)
+
+ // Verify providers.ollama exists
+ expect(localeSettings.providers).toBeDefined()
+ expect(localeSettings.providers.ollama).toBeDefined()
+
+ const localeOllamaKeys = Object.keys(localeSettings.providers.ollama)
+
+ // Check that all English keys exist in the locale
+ enOllamaKeys.forEach((key) => {
+ expect(localeOllamaKeys, `Missing key "${key}" in ${locale}/settings.json`).toContain(key)
+ })
+ })
+
+ it(`should have valid JSON structure for ${locale}`, () => {
+ const filePath = path.join(__dirname, `../locales/${locale}/settings.json`)
+ const fileContent = fs.readFileSync(filePath, "utf-8")
+
+ // Should parse without errors
+ expect(() => JSON.parse(fileContent)).not.toThrow()
+
+ const localeSettings = JSON.parse(fileContent)
+
+ // Verify structure
+ expect(localeSettings).toBeDefined()
+ expect(localeSettings.providers).toBeDefined()
+ expect(localeSettings.providers.ollama).toBeDefined()
+ expect(typeof localeSettings.providers.ollama).toBe("object")
+ })
+
+ it(`should have "models" key as plural object for ${locale}`, () => {
+ const filePath = path.join(__dirname, `../locales/${locale}/settings.json`)
+ const fileContent = fs.readFileSync(filePath, "utf-8")
+ const localeSettings = JSON.parse(fileContent)
+
+ const modelsKey = localeSettings.providers.ollama.models
+
+ // Should be an object (plural form), not a string
+ expect(typeof modelsKey).toBe("object")
+ expect(modelsKey).not.toBeNull()
+
+ // Should have at least "one" and "other" keys
+ expect(modelsKey).toHaveProperty("one")
+ expect(modelsKey).toHaveProperty("other")
+
+ // Russian and Polish should have additional plural forms
+ if (locale === "ru" || locale === "pl") {
+ expect(modelsKey).toHaveProperty("few")
+ expect(modelsKey).toHaveProperty("many")
+ }
+
+ // All values should be strings
+ Object.values(modelsKey).forEach((value) => {
+ expect(typeof value).toBe("string")
+ expect(value).not.toBe("")
+ })
+ })
+ })
+
+ it("should have consistent key count across all locales", () => {
+ const keyCounts = locales.map((locale) => {
+ const filePath = path.join(__dirname, `../locales/${locale}/settings.json`)
+ const fileContent = fs.readFileSync(filePath, "utf-8")
+ const localeSettings = JSON.parse(fileContent)
+ return Object.keys(localeSettings.providers.ollama).length
+ })
+
+ const enKeyCount = enOllamaKeys.length
+
+ // All locales should have the same number of keys as English
+ keyCounts.forEach((count, index) => {
+ expect(count, `${locales[index]} has ${count} keys, but English has ${enKeyCount} keys`).toBe(enKeyCount)
+ })
+ })
+
+ // Test common terms translations (Provider, Base, Model)
+ describe("Common Terms Translation Coverage", () => {
+ // English words that should not appear in non-English locales
+ const englishWords = ["Provider", "Providers", "Base", "Model", "URL", "ID"]
+
+ locales.forEach((locale) => {
+ it(`should have translated common terms for ${locale}`, () => {
+ const filePath = path.join(__dirname, `../locales/${locale}/settings.json`)
+ const fileContent = fs.readFileSync(filePath, "utf-8")
+ const localeSettings = JSON.parse(fileContent)
+
+ // Check sections.providers
+ expect(localeSettings.sections).toBeDefined()
+ expect(localeSettings.sections.providers).toBeDefined()
+ expect(typeof localeSettings.sections.providers).toBe("string")
+ expect(localeSettings.sections.providers).not.toBe("")
+ expect(localeSettings.sections.providers).not.toBe("Provider")
+ expect(localeSettings.sections.providers).not.toBe("Providers")
+
+ // Check providers.ollama.baseUrl
+ expect(localeSettings.providers).toBeDefined()
+ expect(localeSettings.providers.ollama).toBeDefined()
+ expect(localeSettings.providers.ollama.baseUrl).toBeDefined()
+ expect(typeof localeSettings.providers.ollama.baseUrl).toBe("string")
+ expect(localeSettings.providers.ollama.baseUrl).not.toBe("")
+ // Should not be exactly "Base URL (optional)" or "Base URL (opsional)"
+ expect(localeSettings.providers.ollama.baseUrl).not.toMatch(/^Base URL/i)
+
+ // Check providers.ollama.modelId
+ expect(localeSettings.providers.ollama.modelId).toBeDefined()
+ expect(typeof localeSettings.providers.ollama.modelId).toBe("string")
+ expect(localeSettings.providers.ollama.modelId).not.toBe("")
+ // Should not be exactly "Model ID"
+ expect(localeSettings.providers.ollama.modelId).not.toMatch(/^Model ID$/i)
+ })
+
+ it(`should not contain English words in common terms for ${locale}`, () => {
+ const filePath = path.join(__dirname, `../locales/${locale}/settings.json`)
+ const fileContent = fs.readFileSync(filePath, "utf-8")
+ const localeSettings = JSON.parse(fileContent)
+
+ const providerValue = localeSettings.sections.providers
+ const baseUrlValue = localeSettings.providers.ollama.baseUrl
+ const modelIdValue = localeSettings.providers.ollama.modelId
+
+ // Check that common terms don't contain standalone English words
+ // (Allow "URL" and "ID" as they are technical acronyms commonly used)
+ const providerHasEnglish = englishWords
+ .filter((word) => word !== "URL" && word !== "ID")
+ .some((word) => providerValue === word || providerValue.startsWith(word + " "))
+
+ const baseUrlHasEnglish = englishWords
+ .filter((word) => word !== "URL" && word !== "ID")
+ .some((word) => baseUrlValue.includes(word) && !baseUrlValue.includes("URL"))
+
+ const modelIdHasEnglish = englishWords
+ .filter((word) => word !== "URL" && word !== "ID")
+ .some((word) => modelIdValue.includes(word) && !modelIdValue.includes("ID"))
+
+ // Note: This is a soft check - some languages may legitimately use English technical terms
+ // The main check is that the values are not exactly the English defaults
+ if (providerHasEnglish) {
+ console.warn(`${locale}: "sections.providers" may contain English word: "${providerValue}"`)
+ }
+ if (baseUrlHasEnglish) {
+ console.warn(`${locale}: "providers.ollama.baseUrl" may contain English word: "${baseUrlValue}"`)
+ }
+ if (modelIdHasEnglish) {
+ console.warn(`${locale}: "providers.ollama.modelId" may contain English word: "${modelIdValue}"`)
+ }
+ })
+ })
+ })
+})
diff --git a/webview-ui/src/i18n/locales/ca/settings.json b/webview-ui/src/i18n/locales/ca/settings.json
index b54e44189f7..f81f6f7e8e2 100644
--- a/webview-ui/src/i18n/locales/ca/settings.json
+++ b/webview-ui/src/i18n/locales/ca/settings.json
@@ -467,7 +467,55 @@
"numCtx": "Mida de la finestra de context (num_ctx)",
"numCtxHelp": "Sobreescriu la mida de la finestra de context per defecte del model. Deixeu-ho en blanc per utilitzar la configuració del Modelfile del model. El valor mínim és 128.",
"description": "Ollama permet executar models localment al vostre ordinador. Per a instruccions sobre com començar, consulteu la Guia d'inici ràpid.",
- "warning": "Nota: Roo Code utilitza prompts complexos i funciona millor amb models Claude. Els models menys capaços poden no funcionar com s'espera."
+ "warning": "Nota: Roo Code utilitza prompts complexos i funciona millor amb models Claude. Els models menys capaços poden no funcionar com s'espera.",
+ "test": "Provar",
+ "testing": "Provant...",
+ "completedIn": "Completat en {{duration}}ms",
+ "refreshModels": "Actualitzar models",
+ "refreshing": "Actualitzant...",
+ "messages": {
+ "connectionSuccess": "S'ha connectat correctament a Ollama a {{baseUrl}}",
+ "connectionInvalidUrl": "URL no vàlida: {{baseUrl}}",
+ "connectionRefused": "No es pot connectar a Ollama a {{baseUrl}}. Assegureu-vos que Ollama està en execució.",
+ "connectionTimeout": "La connexió a Ollama ha expirat. Comproveu si la URL és correcta i Ollama és accessible.",
+ "connectionNetworkError": "Error de xarxa en connectar a Ollama. Comproveu la vostra connexió de xarxa.",
+ "connectionHttpError": "Ollama ha retornat un error: {{status}} {{statusText}}",
+ "connectionFailed": "Error en connectar: {{error}}",
+ "connectionTestError": "Error en provar la connexió: {{error}}",
+ "refreshSuccess": "S'han trobat {{count}} model(s) amb suport d'eines ({{total}} en total)",
+ "refreshNoModels": "No s'han trobat models. Assegureu-vos que Ollama està en execució i té models instal·lats.",
+ "refreshFailed": "Error en actualitzar els models: {{error}}"
+ },
+ "connectionSettings": "Configuració de connexió",
+ "toolsSupport": "Suport d'eines",
+ "noToolsSupport": "Sense suport d'eines",
+ "models": {
+ "one": "model",
+ "other": "models"
+ },
+ "noToolsSupportHelp": "Aquests models no admeten crides a eines natives i no es poden utilitzar amb Roo Code. Es mostren només com a referència.",
+ "table": {
+ "modelName": "Nom del model",
+ "context": "Context",
+ "size": "Mida",
+ "quantization": "Quantització",
+ "family": "Família",
+ "images": "Imatges",
+ "yes": "Sí",
+ "no": "No"
+ },
+ "streaming": "Transmissió",
+ "streamingHelp": "La transmissió sempre està habilitada per a les sol·licituds de l'API d'Ollama. Les respostes es transmeten en temps real mentre es generen.",
+ "requestTimeout": "Temps d'espera de sol·licitud (ms)",
+ "requestTimeoutHelp": "Temps d'espera en mil·lisegons per a sol·licituds de l'API LLM (completat de xat, treball de pensament). Per defecte: 3600000 (60 minuts). Rang: 1000-7200000 (120 minuts).",
+ "modelDiscoveryTimeout": "Temps d'espera de descobriment de models (ms)",
+ "modelDiscoveryTimeoutHelp": "Temps d'espera en mil·lisegons per a sol·licituds de descobriment de models (llistat i obtenció de detalls del model). Per defecte: 10000 (10 segons). Rang: 1000-600000 (10 minuts).",
+ "maxRetries": "Màxim de reintents",
+ "maxRetriesHelp": "Nombre màxim d'intents de reintent per a sol·licituds fallides. Per defecte: 0 (sense reintents). Rang: 0-10.",
+ "retryDelay": "Retard de reintent (ms)",
+ "retryDelayHelp": "Retard inicial entre intents de reintent en mil·lisegons. Utilitza retrocés exponencial. Per defecte: 1000 (1 segon). Rang: 100-10000.",
+ "enableLogging": "Habilitar registre de sol·licituds",
+ "enableLoggingHelp": "Habilitar registre detallat de sol·licituds, respostes i errors de l'API d'Ollama. Els registres inclouen informació de temps i detalls de connexió."
},
"unboundApiKey": "Clau API d'Unbound",
"getUnboundApiKey": "Obtenir clau API d'Unbound",
diff --git a/webview-ui/src/i18n/locales/de/settings.json b/webview-ui/src/i18n/locales/de/settings.json
index a609f5794ae..74f32f560a3 100644
--- a/webview-ui/src/i18n/locales/de/settings.json
+++ b/webview-ui/src/i18n/locales/de/settings.json
@@ -467,7 +467,55 @@
"numCtx": "Kontextfenstergröße (num_ctx)",
"numCtxHelp": "Überschreibt die Standard-Kontextfenstergröße des Modells. Lassen Sie das Feld leer, um die Modelfile-Konfiguration des Modells zu verwenden. Der Mindestwert ist 128.",
"description": "Ollama ermöglicht es dir, Modelle lokal auf deinem Computer auszuführen. Eine Anleitung zum Einstieg findest du im Schnellstart-Guide.",
- "warning": "Hinweis: Roo Code verwendet komplexe Prompts und funktioniert am besten mit Claude-Modellen. Weniger leistungsfähige Modelle funktionieren möglicherweise nicht wie erwartet."
+ "warning": "Hinweis: Roo Code verwendet komplexe Prompts und funktioniert am besten mit Claude-Modellen. Weniger leistungsfähige Modelle funktionieren möglicherweise nicht wie erwartet.",
+ "test": "Testen",
+ "testing": "Verbindung wird geprüft...",
+ "completedIn": "Abgeschlossen in {{duration}}ms",
+ "refreshModels": "Modelle aktualisieren",
+ "refreshing": "Aktualisiere...",
+ "messages": {
+ "connectionSuccess": "Erfolgreich mit Ollama unter {{baseUrl}} verbunden",
+ "connectionInvalidUrl": "Ungültige URL: {{baseUrl}}",
+ "connectionRefused": "Kann nicht mit Ollama unter {{baseUrl}} verbinden. Stellen Sie sicher, dass Ollama läuft.",
+ "connectionTimeout": "Verbindung zu Ollama ist abgelaufen. Prüfen Sie, ob die URL korrekt ist und Ollama erreichbar ist.",
+ "connectionNetworkError": "Netzwerkfehler beim Verbinden mit Ollama. Prüfen Sie Ihre Netzwerkverbindung.",
+ "connectionHttpError": "Ollama hat einen Fehler zurückgegeben: {{status}} {{statusText}}",
+ "connectionFailed": "Verbindung fehlgeschlagen: {{error}}",
+ "connectionTestError": "Fehler beim Testen der Verbindung: {{error}}",
+ "refreshSuccess": "{{count}} Modell(e) mit Tools-Unterstützung gefunden ({{total}} insgesamt)",
+ "refreshNoModels": "Keine Modelle gefunden. Stellen Sie sicher, dass Ollama läuft und Modelle installiert hat.",
+ "refreshFailed": "Aktualisieren der Modelle fehlgeschlagen: {{error}}"
+ },
+ "connectionSettings": "Verbindungseinstellungen",
+ "toolsSupport": "Tools-Unterstützung",
+ "noToolsSupport": "Keine Tools-Unterstützung",
+ "models": {
+ "one": "Modell",
+ "other": "Modelle"
+ },
+ "noToolsSupportHelp": "Diese Modelle unterstützen keine nativen Tool-Aufrufe und können nicht mit Roo Code verwendet werden. Sie werden nur zur Referenz angezeigt.",
+ "table": {
+ "modelName": "Modellname",
+ "context": "Kontext",
+ "size": "Größe",
+ "quantization": "Quantisierung",
+ "family": "Familie",
+ "images": "Bilder",
+ "yes": "Ja",
+ "no": "Nein"
+ },
+ "streaming": "Streaming",
+ "streamingHelp": "Streaming ist für Ollama API-Anfragen immer aktiviert. Antworten werden in Echtzeit gestreamt, während sie generiert werden.",
+ "requestTimeout": "Anfrage-Timeout (ms)",
+ "requestTimeoutHelp": "Timeout in Millisekunden für LLM API-Anfragen (Chat-Vervollständigungen, Denkarbeit). Standard: 3600000 (60 Minuten). Bereich: 1000-7200000 (120 Minuten).",
+ "modelDiscoveryTimeout": "Modell-Erkennungs-Timeout (ms)",
+ "modelDiscoveryTimeoutHelp": "Timeout in Millisekunden für Modell-Erkennungsanfragen (Auflisten und Abrufen von Modelldetails). Standard: 10000 (10 Sekunden). Bereich: 1000-600000 (10 Minuten).",
+ "maxRetries": "Max. Wiederholungen",
+ "maxRetriesHelp": "Maximale Anzahl von Wiederholungsversuchen bei fehlgeschlagenen Anfragen. Standard: 0 (keine Wiederholungen). Bereich: 0-10.",
+ "retryDelay": "Wiederholungsverzögerung (ms)",
+ "retryDelayHelp": "Anfängliche Verzögerung zwischen Wiederholungsversuchen in Millisekunden. Verwendet exponentielles Backoff. Standard: 1000 (1 Sekunde). Bereich: 100-10000.",
+ "enableLogging": "Anfrage-Protokollierung aktivieren",
+ "enableLoggingHelp": "Detaillierte Protokollierung von Ollama API-Anfragen, Antworten und Fehlern aktivieren. Protokolle enthalten Zeitinformationen und Verbindungsdetails."
},
"unboundApiKey": "Unbound API-Schlüssel",
"getUnboundApiKey": "Unbound API-Schlüssel erhalten",
diff --git a/webview-ui/src/i18n/locales/en/settings.json b/webview-ui/src/i18n/locales/en/settings.json
index e798fa4370b..1422bda4c52 100644
--- a/webview-ui/src/i18n/locales/en/settings.json
+++ b/webview-ui/src/i18n/locales/en/settings.json
@@ -475,8 +475,56 @@
"apiKeyHelp": "Optional API key for authenticated Ollama instances or cloud services. Leave empty for local installations.",
"numCtx": "Context Window Size (num_ctx)",
"numCtxHelp": "Override the model's default context window size. Leave empty to use the model's Modelfile configuration. Minimum value is 128.",
- "description": "Ollama allows you to run models locally on your computer. For instructions on how to get started, see their quickstart guide.",
- "warning": "Note: Roo Code uses complex prompts and works best with Claude models. Less capable models may not work as expected."
+ "description": "Ollama allows you to run models locally on your computer. For instructions on how to get started, see their quickstart guide.",
+ "warning": "Note: Roo Code uses complex prompts and works best with Claude models. Less capable models may not work as expected.",
+ "test": "Test",
+ "testing": "Testing...",
+ "completedIn": "Completed in {{duration}}ms",
+ "refreshModels": "Refresh Models",
+ "refreshing": "Refreshing...",
+ "messages": {
+ "connectionSuccess": "Successfully connected to Ollama at {{baseUrl}}",
+ "connectionInvalidUrl": "Invalid URL: {{baseUrl}}",
+ "connectionRefused": "Cannot connect to Ollama at {{baseUrl}}. Make sure Ollama is running.",
+ "connectionTimeout": "Connection to Ollama timed out. Check if the URL is correct and Ollama is accessible.",
+ "connectionNetworkError": "Network error connecting to Ollama. Check your network connection.",
+ "connectionHttpError": "Ollama returned error: {{status}} {{statusText}}",
+ "connectionFailed": "Failed to connect: {{error}}",
+ "connectionTestError": "Error testing connection: {{error}}",
+ "refreshSuccess": "Found {{count}} model(s) with tools support ({{total}} total)",
+ "refreshNoModels": "No models found. Make sure Ollama is running and has models installed.",
+ "refreshFailed": "Failed to refresh models: {{error}}"
+ },
+ "connectionSettings": "Connection Settings",
+ "toolsSupport": "Tools Support",
+ "noToolsSupport": "No Tools Support",
+ "models": {
+ "one": "model",
+ "other": "models"
+ },
+ "noToolsSupportHelp": "These models do not support native tool calling and cannot be used with Roo Code. They are shown for reference only.",
+ "table": {
+ "modelName": "Model Name",
+ "context": "Context",
+ "size": "Size",
+ "quantization": "Quantization",
+ "family": "Family",
+ "images": "Images",
+ "yes": "Yes",
+ "no": "No"
+ },
+ "streaming": "Streaming",
+ "streamingHelp": "Streaming is always enabled for Ollama API requests. Responses are streamed in real-time as they are generated.",
+ "requestTimeout": "Request Timeout (ms)",
+ "requestTimeoutHelp": "Timeout in milliseconds for LLM API requests (chat completions, thinking work). Default: 3600000 (60 minutes). Range: 1000-7200000 (120 minutes).",
+ "modelDiscoveryTimeout": "Model Discovery Timeout (ms)",
+ "modelDiscoveryTimeoutHelp": "Timeout in milliseconds for model discovery requests (listing and fetching model details). Default: 10000 (10 seconds). Range: 1000-600000 (10 minutes).",
+ "maxRetries": "Max Retries",
+ "maxRetriesHelp": "Maximum number of retry attempts for failed requests. Default: 0 (no retries). Range: 0-10.",
+ "retryDelay": "Retry Delay (ms)",
+ "retryDelayHelp": "Initial delay between retry attempts in milliseconds. Uses exponential backoff. Default: 1000 (1 second). Range: 100-10000.",
+ "enableLogging": "Enable Request Logging",
+ "enableLoggingHelp": "Enable detailed logging of Ollama API requests, responses, and errors. Logs include timing information and connection details."
},
"unboundApiKey": "Unbound API Key",
"getUnboundApiKey": "Get Unbound API Key",
@@ -780,7 +828,7 @@
}
},
"advancedSettings": {
- "title": "Advanced settings"
+ "title": "Advanced Settings"
},
"advanced": {
"diff": {
diff --git a/webview-ui/src/i18n/locales/es/settings.json b/webview-ui/src/i18n/locales/es/settings.json
index dd62b397f48..aaca5692509 100644
--- a/webview-ui/src/i18n/locales/es/settings.json
+++ b/webview-ui/src/i18n/locales/es/settings.json
@@ -467,7 +467,55 @@
"numCtx": "Tamaño de la ventana de contexto (num_ctx)",
"numCtxHelp": "Sobrescribe el tamaño de la ventana de contexto predeterminado del modelo. Déjelo vacío para usar la configuración del Modelfile del modelo. El valor mínimo es 128.",
"description": "Ollama le permite ejecutar modelos localmente en su computadora. Para obtener instrucciones sobre cómo comenzar, consulte la guía de inicio rápido.",
- "warning": "Nota: Roo Code utiliza prompts complejos y funciona mejor con modelos Claude. Los modelos menos capaces pueden no funcionar como se espera."
+ "warning": "Nota: Roo Code utiliza prompts complejos y funciona mejor con modelos Claude. Los modelos menos capaces pueden no funcionar como se espera.",
+ "test": "Probar",
+ "testing": "Probando...",
+ "completedIn": "Completado en {{duration}}ms",
+ "refreshModels": "Actualizar modelos",
+ "refreshing": "Actualizando...",
+ "messages": {
+ "connectionSuccess": "Conectado exitosamente a Ollama en {{baseUrl}}",
+ "connectionInvalidUrl": "URL no válida: {{baseUrl}}",
+ "connectionRefused": "No se puede conectar a Ollama en {{baseUrl}}. Asegúrese de que Ollama esté en ejecución.",
+ "connectionTimeout": "La conexión a Ollama ha expirado. Verifique si la URL es correcta y Ollama es accesible.",
+ "connectionNetworkError": "Error de red al conectar con Ollama. Verifique su conexión de red.",
+ "connectionHttpError": "Ollama devolvió un error: {{status}} {{statusText}}",
+ "connectionFailed": "Error al conectar: {{error}}",
+ "connectionTestError": "Error al probar la conexión: {{error}}",
+ "refreshSuccess": "Se encontraron {{count}} modelo(s) con soporte de herramientas ({{total}} en total)",
+ "refreshNoModels": "No se encontraron modelos. Asegúrese de que Ollama esté en ejecución y tenga modelos instalados.",
+ "refreshFailed": "Error al actualizar los modelos: {{error}}"
+ },
+ "connectionSettings": "Configuración de conexión",
+ "toolsSupport": "Soporte de herramientas",
+ "noToolsSupport": "Sin soporte de herramientas",
+ "models": {
+ "one": "modelo",
+ "other": "modelos"
+ },
+ "noToolsSupportHelp": "Estos modelos no admiten llamadas a herramientas nativas y no se pueden usar con Roo Code. Se muestran solo como referencia.",
+ "table": {
+ "modelName": "Nombre del modelo",
+ "context": "Contexto",
+ "size": "Tamaño",
+ "quantization": "Cuantización",
+ "family": "Familia",
+ "images": "Imágenes",
+ "yes": "Sí",
+ "no": "No"
+ },
+ "streaming": "Transmisión",
+ "streamingHelp": "La transmisión siempre está habilitada para las solicitudes de API de Ollama. Las respuestas se transmiten en tiempo real a medida que se generan.",
+ "requestTimeout": "Tiempo de espera de la solicitud (ms)",
+ "requestTimeoutHelp": "Tiempo de espera en milisegundos para solicitudes de API LLM (completado de chat, trabajo de pensamiento). Predeterminado: 3600000 (60 minutos). Rango: 1000-7200000 (120 minutos).",
+ "modelDiscoveryTimeout": "Tiempo de espera de descubrimiento de modelos (ms)",
+ "modelDiscoveryTimeoutHelp": "Tiempo de espera en milisegundos para solicitudes de descubrimiento de modelos (listado y obtención de detalles del modelo). Predeterminado: 10000 (10 segundos). Rango: 1000-600000 (10 minutos).",
+ "maxRetries": "Máximo de reintentos",
+ "maxRetriesHelp": "Número máximo de intentos de reintento para solicitudes fallidas. Predeterminado: 0 (sin reintentos). Rango: 0-10.",
+ "retryDelay": "Retraso de reintento (ms)",
+ "retryDelayHelp": "Retraso inicial entre intentos de reintento en milisegundos. Utiliza retroceso exponencial. Predeterminado: 1000 (1 segundo). Rango: 100-10000.",
+ "enableLogging": "Habilitar registro de solicitudes",
+ "enableLoggingHelp": "Habilitar registro detallado de solicitudes, respuestas y errores de la API de Ollama. Los registros incluyen información de tiempo y detalles de conexión."
},
"unboundApiKey": "Clave API de Unbound",
"getUnboundApiKey": "Obtener clave API de Unbound",
diff --git a/webview-ui/src/i18n/locales/fr/settings.json b/webview-ui/src/i18n/locales/fr/settings.json
index 596b3762284..883f9f64b6a 100644
--- a/webview-ui/src/i18n/locales/fr/settings.json
+++ b/webview-ui/src/i18n/locales/fr/settings.json
@@ -467,7 +467,55 @@
"numCtx": "Taille de la fenêtre de contexte (num_ctx)",
"numCtxHelp": "Remplace la taille de la fenêtre de contexte par défaut du modèle. Laissez vide pour utiliser la configuration du Modelfile du modèle. La valeur minimale est 128.",
"description": "Ollama vous permet d'exécuter des modèles localement sur votre ordinateur. Pour obtenir des instructions sur la mise en route, consultez le guide de démarrage rapide.",
- "warning": "Remarque : Roo Code utilise des prompts complexes et fonctionne mieux avec les modèles Claude. Les modèles moins performants peuvent ne pas fonctionner comme prévu."
+ "warning": "Remarque : Roo Code utilise des prompts complexes et fonctionne mieux avec les modèles Claude. Les modèles moins performants peuvent ne pas fonctionner comme prévu.",
+ "test": "Tester",
+ "testing": "Test en cours...",
+ "completedIn": "Terminé en {{duration}}ms",
+ "refreshModels": "Actualiser les modèles",
+ "refreshing": "Actualisation...",
+ "messages": {
+ "connectionSuccess": "Connexion réussie à Ollama sur {{baseUrl}}",
+ "connectionInvalidUrl": "URL invalide : {{baseUrl}}",
+ "connectionRefused": "Impossible de se connecter à Ollama sur {{baseUrl}}. Assurez-vous qu'Ollama est en cours d'exécution.",
+ "connectionTimeout": "La connexion à Ollama a expiré. Vérifiez si l'URL est correcte et qu'Ollama est accessible.",
+ "connectionNetworkError": "Erreur réseau lors de la connexion à Ollama. Vérifiez votre connexion réseau.",
+ "connectionHttpError": "Ollama a renvoyé une erreur : {{status}} {{statusText}}",
+ "connectionFailed": "Échec de la connexion : {{error}}",
+ "connectionTestError": "Erreur lors du test de la connexion : {{error}}",
+ "refreshSuccess": "{{count}} modèle(s) trouvé(s) avec support des outils ({{total}} au total)",
+ "refreshNoModels": "Aucun modèle trouvé. Assurez-vous qu'Ollama est en cours d'exécution et a des modèles installés.",
+ "refreshFailed": "Échec de l'actualisation des modèles : {{error}}"
+ },
+ "connectionSettings": "Paramètres de connexion",
+ "toolsSupport": "Support des outils",
+ "noToolsSupport": "Pas de support des outils",
+ "models": {
+ "one": "modèle",
+ "other": "modèles"
+ },
+ "noToolsSupportHelp": "Ces modèles ne prennent pas en charge les appels d'outils natifs et ne peuvent pas être utilisés avec Roo Code. Ils sont affichés uniquement à titre de référence.",
+ "table": {
+ "modelName": "Nom du modèle",
+ "context": "Contexte",
+ "size": "Taille",
+ "quantization": "Quantification",
+ "family": "Famille",
+ "images": "Images",
+ "yes": "Oui",
+ "no": "Non"
+ },
+ "streaming": "Diffusion en continu",
+ "streamingHelp": "La diffusion en continu est toujours activée pour les requêtes API Ollama. Les réponses sont diffusées en temps réel au fur et à mesure de leur génération.",
+ "requestTimeout": "Délai d'expiration de la requête (ms)",
+ "requestTimeoutHelp": "Délai d'expiration en millisecondes pour les requêtes API LLM (complétions de chat, travail de réflexion). Par défaut : 3600000 (60 minutes). Plage : 1000-7200000 (120 minutes).",
+ "modelDiscoveryTimeout": "Délai d'expiration de la découverte de modèles (ms)",
+ "modelDiscoveryTimeoutHelp": "Délai d'expiration en millisecondes pour les requêtes de découverte de modèles (liste et récupération des détails des modèles). Par défaut : 10000 (10 secondes). Plage : 1000-600000 (10 minutes).",
+ "maxRetries": "Nombre maximum de tentatives",
+ "maxRetriesHelp": "Nombre maximum de tentatives de nouvelle tentative pour les requêtes échouées. Par défaut : 0 (aucune nouvelle tentative). Plage : 0-10.",
+ "retryDelay": "Délai entre les tentatives (ms)",
+ "retryDelayHelp": "Délai initial entre les tentatives de nouvelle tentative en millisecondes. Utilise un backoff exponentiel. Par défaut : 1000 (1 seconde). Plage : 100-10000.",
+ "enableLogging": "Activer l'enregistrement des requêtes",
+ "enableLoggingHelp": "Activer l'enregistrement détaillé des requêtes, réponses et erreurs de l'API Ollama. Les journaux incluent les informations de timing et les détails de connexion."
},
"unboundApiKey": "Clé API Unbound",
"getUnboundApiKey": "Obtenir la clé API Unbound",
diff --git a/webview-ui/src/i18n/locales/hi/settings.json b/webview-ui/src/i18n/locales/hi/settings.json
index 3835667d821..5eb964b1027 100644
--- a/webview-ui/src/i18n/locales/hi/settings.json
+++ b/webview-ui/src/i18n/locales/hi/settings.json
@@ -450,7 +450,7 @@
"googleCloudProjectId": "Google Cloud प्रोजेक्ट ID",
"googleCloudRegion": "Google Cloud क्षेत्र",
"lmStudio": {
- "baseUrl": "बेस URL (वैकल्पिक)",
+ "baseUrl": "आधार URL (वैकल्पिक)",
"modelId": "मॉडल ID",
"speculativeDecoding": "स्पेक्युलेटिव डिकोडिंग सक्षम करें",
"draftModelId": "ड्राफ्ट मॉडल ID",
@@ -460,14 +460,62 @@
"description": "LM Studio आपको अपने कंप्यूटर पर स्थानीय रूप से मॉडल चलाने की अनुमति देता है। आरंभ करने के निर्देशों के लिए, उनकी क्विकस्टार्ट गाइड देखें। आपको इस एक्सटेंशन के साथ उपयोग करने के लिए LM Studio की स्थानीय सर्वर सुविधा भी शुरू करनी होगी। नोट: Roo Code जटिल प्रॉम्प्ट्स का उपयोग करता है और Claude मॉडल के साथ सबसे अच्छा काम करता है। कम क्षमता वाले मॉडल अपेक्षित रूप से काम नहीं कर सकते हैं।"
},
"ollama": {
- "baseUrl": "बेस URL (वैकल्पिक)",
+ "baseUrl": "आधार URL (वैकल्पिक)",
"modelId": "मॉडल ID",
"apiKey": "Ollama API Key",
"apiKeyHelp": "प्रमाणित Ollama इंस्टेंसेस या क्लाउड सेवाओं के लिए वैकल्पिक API key। स्थानीय इंस्टॉलेशन के लिए खाली छोड़ें।",
"numCtx": "संदर्भ विंडो आकार (num_ctx)",
"numCtxHelp": "मॉडल के डिफ़ॉल्ट संदर्भ विंडो आकार को ओवरराइड करें। मॉडल की मॉडलफ़ाइल कॉन्फ़िगरेशन का उपयोग करने के लिए खाली छोड़ दें। न्यूनतम मान 128 है।",
"description": "Ollama आपको अपने कंप्यूटर पर स्थानीय रूप से मॉडल चलाने की अनुमति देता है। आरंभ करने के निर्देशों के लिए, उनकी क्विकस्टार्ट गाइड देखें।",
- "warning": "नोट: Roo Code जटिल प्रॉम्प्ट्स का उपयोग करता है और Claude मॉडल के साथ सबसे अच्छा काम करता है। कम क्षमता वाले मॉडल अपेक्षित रूप से काम नहीं कर सकते हैं।"
+ "warning": "नोट: Roo Code जटिल प्रॉम्प्ट्स का उपयोग करता है और Claude मॉडल के साथ सबसे अच्छा काम करता है। कम क्षमता वाले मॉडल अपेक्षित रूप से काम नहीं कर सकते हैं।",
+ "test": "परीक्षण",
+ "testing": "परीक्षण कर रहे हैं...",
+ "completedIn": "{{duration}}ms में पूर्ण",
+ "refreshModels": "मॉडल ताज़ा करें",
+ "refreshing": "ताज़ा कर रहे हैं...",
+ "messages": {
+ "connectionSuccess": "{{baseUrl}} पर Ollama से सफलतापूर्वक कनेक्ट हुआ",
+ "connectionInvalidUrl": "अमान्य URL: {{baseUrl}}",
+ "connectionRefused": "{{baseUrl}} पर Ollama से कनेक्ट नहीं हो सका। सुनिश्चित करें कि Ollama चल रहा है।",
+ "connectionTimeout": "Ollama से कनेक्शन समय समाप्त हो गया। जांचें कि URL सही है और Ollama सुलभ है।",
+ "connectionNetworkError": "Ollama से कनेक्ट करने में नेटवर्क त्रुटि। अपना नेटवर्क कनेक्शन जांचें।",
+ "connectionHttpError": "Ollama ने त्रुटि लौटाई: {{status}} {{statusText}}",
+ "connectionFailed": "कनेक्ट करने में विफल: {{error}}",
+ "connectionTestError": "कनेक्शन परीक्षण में त्रुटि: {{error}}",
+ "refreshSuccess": "{{count}} मॉडल(मॉडल) टूल्स सपोर्ट के साथ मिले (कुल {{total}})",
+ "refreshNoModels": "कोई मॉडल नहीं मिला। सुनिश्चित करें कि Ollama चल रहा है और मॉडल इंस्टॉल हैं।",
+ "refreshFailed": "मॉडल ताज़ा करने में विफल: {{error}}"
+ },
+ "connectionSettings": "कनेक्शन सेटिंग्स",
+ "toolsSupport": "टूल्स सपोर्ट",
+ "noToolsSupport": "कोई टूल्स सपोर्ट नहीं",
+ "models": {
+ "one": "मॉडल",
+ "other": "मॉडल"
+ },
+ "noToolsSupportHelp": "ये मॉडल नेटिव टूल कॉलिंग का समर्थन नहीं करते हैं और Roo Code के साथ उपयोग नहीं किए जा सकते हैं। वे केवल संदर्भ के लिए दिखाए गए हैं।",
+ "table": {
+ "modelName": "मॉडल नाम",
+ "context": "संदर्भ",
+ "size": "आकार",
+ "quantization": "क्वांटाइज़ेशन",
+ "family": "परिवार",
+ "images": "छवियां",
+ "yes": "हाँ",
+ "no": "नहीं"
+ },
+ "streaming": "स्ट्रीमिंग",
+ "streamingHelp": "Ollama API अनुरोधों के लिए स्ट्रीमिंग हमेशा सक्षम होती है। प्रतिक्रियाएं वास्तविक समय में स्ट्रीम की जाती हैं जैसे ही वे उत्पन्न होती हैं।",
+ "requestTimeout": "अनुरोध समय सीमा (ms)",
+ "requestTimeoutHelp": "LLM API अनुरोधों (चैट पूर्णता, सोच कार्य) के लिए मिलीसेकंड में समय सीमा। डिफ़ॉल्ट: 3600000 (60 मिनट)। सीमा: 1000-7200000 (120 मिनट)।",
+ "modelDiscoveryTimeout": "मॉडल खोज समय सीमा (ms)",
+ "modelDiscoveryTimeoutHelp": "मॉडल खोज अनुरोधों (सूची और मॉडल विवरण प्राप्त करना) के लिए मिलीसेकंड में समय सीमा। डिफ़ॉल्ट: 10000 (10 सेकंड)। सीमा: 1000-600000 (10 मिनट)।",
+ "maxRetries": "अधिकतम पुनः प्रयास",
+ "maxRetriesHelp": "विफल अनुरोधों के लिए पुनः प्रयास प्रयासों की अधिकतम संख्या। डिफ़ॉल्ट: 0 (कोई पुनः प्रयास नहीं)। सीमा: 0-10।",
+ "retryDelay": "पुनः प्रयास देरी (ms)",
+ "retryDelayHelp": "मिलीसेकंड में पुनः प्रयास प्रयासों के बीच प्रारंभिक देरी। घातीय बैकऑफ़ का उपयोग करता है। डिफ़ॉल्ट: 1000 (1 सेकंड)। सीमा: 100-10000।",
+ "enableLogging": "अनुरोध लॉगिंग सक्षम करें",
+ "enableLoggingHelp": "Ollama API अनुरोधों, प्रतिक्रियाओं और त्रुटियों का विस्तृत लॉगिंग सक्षम करें। लॉग में समय जानकारी और कनेक्शन विवरण शामिल हैं।"
},
"unboundApiKey": "Unbound API कुंजी",
"getUnboundApiKey": "Unbound API कुंजी प्राप्त करें",
diff --git a/webview-ui/src/i18n/locales/id/settings.json b/webview-ui/src/i18n/locales/id/settings.json
index 5942f8f8a93..86c69bace00 100644
--- a/webview-ui/src/i18n/locales/id/settings.json
+++ b/webview-ui/src/i18n/locales/id/settings.json
@@ -26,7 +26,7 @@
"discardButton": "Buang perubahan"
},
"sections": {
- "providers": "Provider",
+ "providers": "Penyedia",
"modes": "Mode",
"mcp": "Server MCP",
"worktrees": "Worktrees",
@@ -263,7 +263,7 @@
"providerDocumentation": "Dokumentasi {{provider}}",
"configProfile": "Profil Konfigurasi",
"description": "Simpan konfigurasi API yang berbeda untuk beralih dengan cepat antara provider dan pengaturan.",
- "apiProvider": "Provider API",
+ "apiProvider": "Penyedia API",
"apiProviderDocs": "Dokumentasi Penyedia",
"model": "Model",
"nameEmpty": "Nama tidak boleh kosong",
@@ -454,8 +454,8 @@
"googleCloudProjectId": "Google Cloud Project ID",
"googleCloudRegion": "Google Cloud Region",
"lmStudio": {
- "baseUrl": "Base URL (opsional)",
- "modelId": "Model ID",
+ "baseUrl": "URL Dasar (opsional)",
+ "modelId": "ID Model",
"speculativeDecoding": "Aktifkan Speculative Decoding",
"draftModelId": "Draft Model ID",
"draftModelDesc": "Draft model harus dari keluarga model yang sama agar speculative decoding bekerja dengan benar.",
@@ -464,14 +464,62 @@
"description": "LM Studio memungkinkan kamu menjalankan model secara lokal di komputer. Untuk instruksi cara memulai, lihat panduan quickstart mereka. Kamu juga perlu memulai fitur local server LM Studio untuk menggunakannya dengan ekstensi ini. Catatan: Roo Code menggunakan prompt kompleks dan bekerja terbaik dengan model Claude. Model yang kurang mampu mungkin tidak bekerja seperti yang diharapkan."
},
"ollama": {
- "baseUrl": "Base URL (opsional)",
- "modelId": "Model ID",
+ "baseUrl": "URL Dasar (opsional)",
+ "modelId": "ID Model",
"apiKey": "Ollama API Key",
"apiKeyHelp": "API key opsional untuk instance Ollama yang terautentikasi atau layanan cloud. Biarkan kosong untuk instalasi lokal.",
"numCtx": "Ukuran Jendela Konteks (num_ctx)",
"numCtxHelp": "Ganti ukuran jendela konteks default model. Biarkan kosong untuk menggunakan konfigurasi Modelfile model. Nilai minimum adalah 128.",
"description": "Ollama memungkinkan kamu menjalankan model secara lokal di komputer. Untuk instruksi cara memulai, lihat panduan quickstart mereka.",
- "warning": "Catatan: Roo Code menggunakan prompt kompleks dan bekerja terbaik dengan model Claude. Model yang kurang mampu mungkin tidak bekerja seperti yang diharapkan."
+ "warning": "Catatan: Roo Code menggunakan prompt kompleks dan bekerja terbaik dengan model Claude. Model yang kurang mampu mungkin tidak bekerja seperti yang diharapkan.",
+ "test": "Uji",
+ "testing": "Menguji...",
+ "completedIn": "Selesai dalam {{duration}}ms",
+ "refreshModels": "Muat ulang model",
+ "refreshing": "Memuat ulang...",
+ "messages": {
+ "connectionSuccess": "Berhasil terhubung ke Ollama di {{baseUrl}}",
+ "connectionInvalidUrl": "URL tidak valid: {{baseUrl}}",
+ "connectionRefused": "Tidak dapat terhubung ke Ollama di {{baseUrl}}. Pastikan Ollama sedang berjalan.",
+ "connectionTimeout": "Koneksi ke Ollama habis waktu. Periksa apakah URL benar dan Ollama dapat diakses.",
+ "connectionNetworkError": "Kesalahan jaringan saat menghubungkan ke Ollama. Periksa koneksi jaringan Anda.",
+ "connectionHttpError": "Ollama mengembalikan kesalahan: {{status}} {{statusText}}",
+ "connectionFailed": "Gagal terhubung: {{error}}",
+ "connectionTestError": "Kesalahan saat menguji koneksi: {{error}}",
+ "refreshSuccess": "Ditemukan {{count}} model dengan dukungan tools ({{total}} total)",
+ "refreshNoModels": "Tidak ada model yang ditemukan. Pastikan Ollama sedang berjalan dan memiliki model yang terpasang.",
+ "refreshFailed": "Gagal memuat ulang model: {{error}}"
+ },
+ "connectionSettings": "Pengaturan koneksi",
+ "toolsSupport": "Dukungan alat",
+ "noToolsSupport": "Tidak ada dukungan alat",
+ "models": {
+ "one": "model",
+ "other": "model"
+ },
+ "noToolsSupportHelp": "Model-model ini tidak mendukung pemanggilan alat native dan tidak dapat digunakan dengan Roo Code. Mereka ditampilkan hanya sebagai referensi.",
+ "table": {
+ "modelName": "Nama model",
+ "context": "Konteks",
+ "size": "Ukuran",
+ "quantization": "Kuantisasi",
+ "family": "Keluarga",
+ "images": "Gambar",
+ "yes": "Ya",
+ "no": "Tidak"
+ },
+ "streaming": "Streaming",
+ "streamingHelp": "Streaming selalu diaktifkan untuk permintaan API Ollama. Respons di-stream secara real-time saat dibuat.",
+ "requestTimeout": "Batas waktu permintaan (ms)",
+ "requestTimeoutHelp": "Batas waktu dalam milidetik untuk permintaan API LLM (penyelesaian chat, pekerjaan berpikir). Default: 3600000 (60 menit). Rentang: 1000-7200000 (120 menit).",
+ "modelDiscoveryTimeout": "Batas waktu penemuan model (ms)",
+ "modelDiscoveryTimeoutHelp": "Batas waktu dalam milidetik untuk permintaan penemuan model (daftar dan mengambil detail model). Default: 10000 (10 detik). Rentang: 1000-600000 (10 menit).",
+ "maxRetries": "Maks. percobaan ulang",
+ "maxRetriesHelp": "Jumlah maksimum upaya percobaan ulang untuk permintaan yang gagal. Default: 0 (tidak ada percobaan ulang). Rentang: 0-10.",
+ "retryDelay": "Penundaan percobaan ulang (ms)",
+ "retryDelayHelp": "Penundaan awal antara upaya percobaan ulang dalam milidetik. Menggunakan exponential backoff. Default: 1000 (1 detik). Rentang: 100-10000.",
+ "enableLogging": "Aktifkan pencatatan permintaan",
+ "enableLoggingHelp": "Aktifkan pencatatan detail permintaan, respons, dan kesalahan API Ollama. Log mencakup informasi waktu dan detail koneksi."
},
"unboundApiKey": "Unbound API Key",
"getUnboundApiKey": "Dapatkan Unbound API Key",
@@ -531,7 +579,7 @@
"resetDefaults": "Reset ke Default"
},
"rateLimitSeconds": {
- "label": "Rate limit",
+ "label": "Pembatasan laju",
"description": "Waktu minimum antara permintaan API."
},
"consecutiveMistakeLimit": {
diff --git a/webview-ui/src/i18n/locales/it/settings.json b/webview-ui/src/i18n/locales/it/settings.json
index 823339007f0..e5fa5e6934f 100644
--- a/webview-ui/src/i18n/locales/it/settings.json
+++ b/webview-ui/src/i18n/locales/it/settings.json
@@ -467,7 +467,55 @@
"numCtx": "Dimensione della finestra di contesto (num_ctx)",
"numCtxHelp": "Sovrascrive la dimensione predefinita della finestra di contesto del modello. Lasciare vuoto per utilizzare la configurazione del Modelfile del modello. Il valore minimo è 128.",
"description": "Ollama ti permette di eseguire modelli localmente sul tuo computer. Per iniziare, consulta la guida rapida.",
- "warning": "Nota: Roo Code utiliza prompt complessi e funziona meglio con i modelli Claude. I modelli con capacità inferiori potrebbero non funzionare come previsto."
+ "warning": "Nota: Roo Code utiliza prompt complessi e funziona meglio con i modelli Claude. I modelli con capacità inferiori potrebbero non funzionare come previsto.",
+ "test": "Testa",
+ "testing": "Test in corso...",
+ "completedIn": "Completato in {{duration}}ms",
+ "refreshModels": "Aggiorna modelli",
+ "refreshing": "Aggiornamento...",
+ "messages": {
+ "connectionSuccess": "Connesso con successo a Ollama su {{baseUrl}}",
+ "connectionInvalidUrl": "URL non valida: {{baseUrl}}",
+ "connectionRefused": "Impossibile connettersi a Ollama su {{baseUrl}}. Assicurati che Ollama sia in esecuzione.",
+ "connectionTimeout": "Connessione a Ollama scaduta. Verifica che l'URL sia corretto e che Ollama sia accessibile.",
+ "connectionNetworkError": "Errore di rete durante la connessione a Ollama. Verifica la tua connessione di rete.",
+ "connectionHttpError": "Ollama ha restituito un errore: {{status}} {{statusText}}",
+ "connectionFailed": "Connessione fallita: {{error}}",
+ "connectionTestError": "Errore durante il test della connessione: {{error}}",
+ "refreshSuccess": "Trovati {{count}} modello(i) con supporto strumenti ({{total}} totali)",
+ "refreshNoModels": "Nessun modello trovato. Assicurati che Ollama sia in esecuzione e abbia modelli installati.",
+ "refreshFailed": "Aggiornamento modelli fallito: {{error}}"
+ },
+ "connectionSettings": "Impostazioni di connessione",
+ "toolsSupport": "Supporto strumenti",
+ "noToolsSupport": "Nessun supporto strumenti",
+ "models": {
+ "one": "modello",
+ "other": "modelli"
+ },
+ "noToolsSupportHelp": "Questi modelli non supportano le chiamate a strumenti native e non possono essere utilizzati con Roo Code. Sono mostrati solo come riferimento.",
+ "table": {
+ "modelName": "Nome modello",
+ "context": "Contesto",
+ "size": "Dimensione",
+ "quantization": "Quantizzazione",
+ "family": "Famiglia",
+ "images": "Immagini",
+ "yes": "Sì",
+ "no": "No"
+ },
+ "streaming": "Streaming",
+ "streamingHelp": "Lo streaming è sempre abilitato per le richieste API Ollama. Le risposte vengono trasmesse in tempo reale mentre vengono generate.",
+ "requestTimeout": "Timeout richiesta (ms)",
+ "requestTimeoutHelp": "Timeout in millisecondi per le richieste API LLM (completamenti chat, lavoro di pensiero). Predefinito: 3600000 (60 minuti). Intervallo: 1000-7200000 (120 minuti).",
+ "modelDiscoveryTimeout": "Timeout scoperta modelli (ms)",
+ "modelDiscoveryTimeoutHelp": "Timeout in millisecondi per le richieste di scoperta modelli (elenco e recupero dettagli modello). Predefinito: 10000 (10 secondi). Intervallo: 1000-600000 (10 minuti).",
+ "maxRetries": "Tentativi massimi",
+ "maxRetriesHelp": "Numero massimo di tentativi di ripetizione per richieste fallite. Predefinito: 0 (nessun tentativo). Intervallo: 0-10.",
+ "retryDelay": "Ritardo tentativo (ms)",
+ "retryDelayHelp": "Ritardo iniziale tra tentativi di ripetizione in millisecondi. Utilizza backoff esponenziale. Predefinito: 1000 (1 secondo). Intervallo: 100-10000.",
+ "enableLogging": "Abilita registrazione richieste",
+ "enableLoggingHelp": "Abilita registrazione dettagliata di richieste, risposte ed errori API Ollama. I log includono informazioni temporali e dettagli di connessione."
},
"unboundApiKey": "Chiave API Unbound",
"getUnboundApiKey": "Ottieni chiave API Unbound",
diff --git a/webview-ui/src/i18n/locales/ja/settings.json b/webview-ui/src/i18n/locales/ja/settings.json
index 1cccb364a46..1e5294bfe1e 100644
--- a/webview-ui/src/i18n/locales/ja/settings.json
+++ b/webview-ui/src/i18n/locales/ja/settings.json
@@ -467,7 +467,55 @@
"numCtx": "コンテキストウィンドウサイズ (num_ctx)",
"numCtxHelp": "モデルのデフォルトのコンテキストウィンドウサイズを上書きします。モデルのModelfile構成を使用するには、空のままにします。最小値は128です。",
"description": "Ollamaを使用すると、ローカルコンピューターでモデルを実行できます。始め方については、クイックスタートガイドをご覧ください。",
- "warning": "注意:Roo Codeは複雑なプロンプトを使用し、Claudeモデルで最適に動作します。能力の低いモデルは期待通りに動作しない場合があります。"
+ "warning": "注意:Roo Codeは複雑なプロンプトを使用し、Claudeモデルで最適に動作します。能力の低いモデルは期待通りに動作しない場合があります。",
+ "test": "テスト",
+ "testing": "テスト中...",
+ "completedIn": "{{duration}}msで完了",
+ "refreshModels": "モデルを更新",
+ "refreshing": "更新中...",
+ "messages": {
+ "connectionSuccess": "{{baseUrl}}でOllamaに正常に接続しました",
+ "connectionInvalidUrl": "無効なURL: {{baseUrl}}",
+ "connectionRefused": "{{baseUrl}}でOllamaに接続できません。Ollamaが実行中であることを確認してください。",
+ "connectionTimeout": "Ollamaへの接続がタイムアウトしました。URLが正しく、Ollamaにアクセスできることを確認してください。",
+ "connectionNetworkError": "Ollamaへの接続中にネットワークエラーが発生しました。ネットワーク接続を確認してください。",
+ "connectionHttpError": "Ollamaがエラーを返しました: {{status}} {{statusText}}",
+ "connectionFailed": "接続に失敗しました: {{error}}",
+ "connectionTestError": "接続テスト中にエラーが発生しました: {{error}}",
+ "refreshSuccess": "ツールサポート付きの{{count}}個のモデルが見つかりました(合計{{total}}個)",
+ "refreshNoModels": "モデルが見つかりませんでした。Ollamaが実行中で、モデルがインストールされていることを確認してください。",
+ "refreshFailed": "モデルの更新に失敗しました: {{error}}"
+ },
+ "connectionSettings": "接続設定",
+ "toolsSupport": "ツールサポート",
+ "noToolsSupport": "ツールサポートなし",
+ "models": {
+ "one": "モデル",
+ "other": "モデル"
+ },
+ "noToolsSupportHelp": "これらのモデルはネイティブツール呼び出しをサポートしておらず、Roo Codeでは使用できません。参考としてのみ表示されます。",
+ "table": {
+ "modelName": "モデル名",
+ "context": "コンテキスト",
+ "size": "サイズ",
+ "quantization": "量子化",
+ "family": "ファミリー",
+ "images": "画像",
+ "yes": "はい",
+ "no": "いいえ"
+ },
+ "streaming": "ストリーミング",
+ "streamingHelp": "Ollama APIリクエストではストリーミングが常に有効です。応答は生成されると同時にリアルタイムでストリーミングされます。",
+ "requestTimeout": "リクエストタイムアウト (ms)",
+ "requestTimeoutHelp": "LLM APIリクエスト(チャット完了、思考作業)のタイムアウト(ミリ秒)。デフォルト: 3600000 (60分)。範囲: 1000-7200000 (120分)。",
+ "modelDiscoveryTimeout": "モデル検出タイムアウト (ms)",
+ "modelDiscoveryTimeoutHelp": "モデル検出リクエスト(モデル一覧と詳細の取得)のタイムアウト(ミリ秒)。デフォルト: 10000 (10秒)。範囲: 1000-600000 (10分)。",
+ "maxRetries": "最大再試行回数",
+ "maxRetriesHelp": "失敗したリクエストの最大再試行回数。デフォルト: 0 (再試行なし)。範囲: 0-10。",
+ "retryDelay": "再試行遅延 (ms)",
+ "retryDelayHelp": "再試行の間の初期遅延(ミリ秒)。指数バックオフを使用します。デフォルト: 1000 (1秒)。範囲: 100-10000。",
+ "enableLogging": "リクエストログを有効化",
+ "enableLoggingHelp": "Ollama APIリクエスト、応答、エラーの詳細ログを有効化します。ログには時間情報と接続詳細が含まれます。"
},
"unboundApiKey": "Unbound APIキー",
"getUnboundApiKey": "Unbound APIキーを取得",
diff --git a/webview-ui/src/i18n/locales/ko/settings.json b/webview-ui/src/i18n/locales/ko/settings.json
index 9600d9872b8..8cd6758463d 100644
--- a/webview-ui/src/i18n/locales/ko/settings.json
+++ b/webview-ui/src/i18n/locales/ko/settings.json
@@ -467,7 +467,55 @@
"numCtx": "컨텍스트 창 크기(num_ctx)",
"numCtxHelp": "모델의 기본 컨텍스트 창 크기를 재정의합니다. 모델의 Modelfile 구성을 사용하려면 비워 둡니다. 최소값은 128입니다.",
"description": "Ollama를 사용하면 컴퓨터에서 로컬로 모델을 실행할 수 있습니다. 시작하는 방법은 빠른 시작 가이드를 참조하세요.",
- "warning": "참고: Roo Code는 복잡한 프롬프트를 사용하며 Claude 모델에서 가장 잘 작동합니다. 덜 강력한 모델은 예상대로 작동하지 않을 수 있습니다."
+ "warning": "참고: Roo Code는 복잡한 프롬프트를 사용하며 Claude 모델에서 가장 잘 작동합니다. 덜 강력한 모델은 예상대로 작동하지 않을 수 있습니다.",
+ "test": "테스트",
+ "testing": "테스트 중...",
+ "completedIn": "{{duration}}ms에 완료",
+ "refreshModels": "모델 새로고침",
+ "refreshing": "새로고침 중...",
+ "messages": {
+ "connectionSuccess": "{{baseUrl}}에서 Ollama에 성공적으로 연결되었습니다",
+ "connectionInvalidUrl": "잘못된 URL: {{baseUrl}}",
+ "connectionRefused": "{{baseUrl}}에서 Ollama에 연결할 수 없습니다. Ollama가 실행 중인지 확인하세요.",
+ "connectionTimeout": "Ollama에 대한 연결이 시간 초과되었습니다. URL이 올바른지, Ollama에 접근할 수 있는지 확인하세요.",
+ "connectionNetworkError": "Ollama에 연결하는 중 네트워크 오류가 발생했습니다. 네트워크 연결을 확인하세요.",
+ "connectionHttpError": "Ollama가 오류를 반환했습니다: {{status}} {{statusText}}",
+ "connectionFailed": "연결 실패: {{error}}",
+ "connectionTestError": "연결 테스트 중 오류 발생: {{error}}",
+ "refreshSuccess": "도구 지원이 있는 {{count}}개의 모델을 찾았습니다 (총 {{total}}개)",
+ "refreshNoModels": "모델을 찾을 수 없습니다. Ollama가 실행 중이고 모델이 설치되어 있는지 확인하세요.",
+ "refreshFailed": "모델 새로고침 실패: {{error}}"
+ },
+ "connectionSettings": "연결 설정",
+ "toolsSupport": "도구 지원",
+ "noToolsSupport": "도구 지원 없음",
+ "models": {
+ "one": "모델",
+ "other": "모델"
+ },
+ "noToolsSupportHelp": "이 모델들은 네이티브 도구 호출을 지원하지 않으며 Roo Code와 함께 사용할 수 없습니다. 참고용으로만 표시됩니다.",
+ "table": {
+ "modelName": "모델 이름",
+ "context": "컨텍스트",
+ "size": "크기",
+ "quantization": "양자화",
+ "family": "패밀리",
+ "images": "이미지",
+ "yes": "예",
+ "no": "아니오"
+ },
+ "streaming": "스트리밍",
+ "streamingHelp": "Ollama API 요청에 대해 스트리밍이 항상 활성화되어 있습니다. 응답은 생성되는 대로 실시간으로 스트리밍됩니다.",
+ "requestTimeout": "요청 시간 제한 (ms)",
+ "requestTimeoutHelp": "LLM API 요청(채팅 완성, 사고 작업)의 시간 제한(밀리초). 기본값: 3600000 (60분). 범위: 1000-7200000 (120분).",
+ "modelDiscoveryTimeout": "모델 검색 시간 제한 (ms)",
+ "modelDiscoveryTimeoutHelp": "모델 검색 요청(모델 목록 및 세부 정보 가져오기)의 시간 제한(밀리초). 기본값: 10000 (10초). 범위: 1000-600000 (10분).",
+ "maxRetries": "최대 재시도",
+ "maxRetriesHelp": "실패한 요청에 대한 최대 재시도 시도 횟수. 기본값: 0 (재시도 없음). 범위: 0-10.",
+ "retryDelay": "재시도 지연 (ms)",
+ "retryDelayHelp": "재시도 시도 간 초기 지연(밀리초). 지수 백오프를 사용합니다. 기본값: 1000 (1초). 범위: 100-10000.",
+ "enableLogging": "요청 로깅 활성화",
+ "enableLoggingHelp": "Ollama API 요청, 응답 및 오류의 상세 로깅을 활성화합니다. 로그에는 시간 정보 및 연결 세부 정보가 포함됩니다."
},
"unboundApiKey": "Unbound API 키",
"getUnboundApiKey": "Unbound API 키 받기",
diff --git a/webview-ui/src/i18n/locales/nl/settings.json b/webview-ui/src/i18n/locales/nl/settings.json
index 3722aa2a0d9..20ae00f3ae2 100644
--- a/webview-ui/src/i18n/locales/nl/settings.json
+++ b/webview-ui/src/i18n/locales/nl/settings.json
@@ -26,7 +26,7 @@
"discardButton": "Wijzigingen negeren"
},
"sections": {
- "providers": "Providers",
+ "providers": "Aanbieders",
"modes": "Modi",
"mcp": "MCP-servers",
"worktrees": "Worktrees",
@@ -467,7 +467,55 @@
"numCtx": "Contextvenstergrootte (num_ctx)",
"numCtxHelp": "Overschrijft de standaard contextvenstergrootte van het model. Laat leeg om de Modelfile-configuratie van het model te gebruiken. De minimumwaarde is 128.",
"description": "Ollama laat je modellen lokaal op je computer draaien. Zie hun quickstart-gids voor instructies.",
- "warning": "Let op: Roo Code gebruikt complexe prompts en werkt het beste met Claude-modellen. Minder krachtige modellen werken mogelijk niet zoals verwacht."
+ "warning": "Let op: Roo Code gebruikt complexe prompts en werkt het beste met Claude-modellen. Minder krachtige modellen werken mogelijk niet zoals verwacht.",
+ "test": "Testen",
+ "testing": "Testen...",
+ "completedIn": "Voltooid in {{duration}}ms",
+ "refreshModels": "Modellen vernieuwen",
+ "refreshing": "Vernieuwen...",
+ "messages": {
+ "connectionSuccess": "Succesvol verbonden met Ollama op {{baseUrl}}",
+ "connectionInvalidUrl": "Ongeldige URL: {{baseUrl}}",
+ "connectionRefused": "Kan niet verbinden met Ollama op {{baseUrl}}. Zorg ervoor dat Ollama draait.",
+ "connectionTimeout": "Verbinding met Ollama is verlopen. Controleer of de URL correct is en Ollama toegankelijk is.",
+ "connectionNetworkError": "Netwerkfout bij verbinden met Ollama. Controleer uw netwerkverbinding.",
+ "connectionHttpError": "Ollama gaf een fout terug: {{status}} {{statusText}}",
+ "connectionFailed": "Verbinding mislukt: {{error}}",
+ "connectionTestError": "Fout bij testen van verbinding: {{error}}",
+ "refreshSuccess": "{{count}} model(len) gevonden met tools-ondersteuning ({{total}} totaal)",
+ "refreshNoModels": "Geen modellen gevonden. Zorg ervoor dat Ollama draait en modellen heeft geïnstalleerd.",
+ "refreshFailed": "Vernieuwen van modellen mislukt: {{error}}"
+ },
+ "connectionSettings": "Verbindingsinstellingen",
+ "toolsSupport": "Toolondersteuning",
+ "noToolsSupport": "Geen toolondersteuning",
+ "models": {
+ "one": "model",
+ "other": "modellen"
+ },
+ "noToolsSupportHelp": "Deze modellen ondersteunen geen native tool-aanroepen en kunnen niet worden gebruikt met Roo Code. Ze worden alleen ter referentie weergegeven.",
+ "table": {
+ "modelName": "Modelnaam",
+ "context": "Context",
+ "size": "Grootte",
+ "quantization": "Kwantisatie",
+ "family": "Familie",
+ "images": "Afbeeldingen",
+ "yes": "Ja",
+ "no": "Nee"
+ },
+ "streaming": "Streaming",
+ "streamingHelp": "Streaming is altijd ingeschakeld voor Ollama API-verzoeken. Reacties worden in real-time gestreamd terwijl ze worden gegenereerd.",
+ "requestTimeout": "Verzoek time-out (ms)",
+ "requestTimeoutHelp": "Time-out in milliseconden voor LLM API-verzoeken (chatvoltooiingen, denkwerk). Standaard: 3600000 (60 minuten). Bereik: 1000-7200000 (120 minuten).",
+ "modelDiscoveryTimeout": "Modeldetectie time-out (ms)",
+ "modelDiscoveryTimeoutHelp": "Time-out in milliseconden voor modeldetectie-verzoeken (lijst en ophalen van modeldetails). Standaard: 10000 (10 seconden). Bereik: 1000-600000 (10 minuten).",
+ "maxRetries": "Max. nieuwe pogingen",
+ "maxRetriesHelp": "Maximum aantal nieuwe pogingen voor mislukte verzoeken. Standaard: 0 (geen nieuwe pogingen). Bereik: 0-10.",
+ "retryDelay": "Vertraging nieuwe poging (ms)",
+ "retryDelayHelp": "Initiële vertraging tussen nieuwe pogingen in milliseconden. Gebruikt exponentiële backoff. Standaard: 1000 (1 seconde). Bereik: 100-10000.",
+ "enableLogging": "Verzoeklogboek inschakelen",
+ "enableLoggingHelp": "Gedetailleerde logboekregistratie van Ollama API-verzoeken, reacties en fouten inschakelen. Logboeken bevatten tijdinformatie en verbindingsdetails."
},
"unboundApiKey": "Unbound API-sleutel",
"getUnboundApiKey": "Unbound API-sleutel ophalen",
diff --git a/webview-ui/src/i18n/locales/pl/settings.json b/webview-ui/src/i18n/locales/pl/settings.json
index ceab6602981..caaa80a1519 100644
--- a/webview-ui/src/i18n/locales/pl/settings.json
+++ b/webview-ui/src/i18n/locales/pl/settings.json
@@ -467,7 +467,57 @@
"numCtx": "Rozmiar okna kontekstu (num_ctx)",
"numCtxHelp": "Zastępuje domyślny rozmiar okna kontekstu modelu. Pozostaw puste, aby użyć konfiguracji Modelfile modelu. Minimalna wartość to 128.",
"description": "Ollama pozwala na lokalne uruchamianie modeli na twoim komputerze. Aby rozpocząć, zapoznaj się z przewodnikiem szybkiego startu.",
- "warning": "Uwaga: Roo Code używa złożonych podpowiedzi i działa najlepiej z modelami Claude. Modele o niższych możliwościach mogą nie działać zgodnie z oczekiwaniami."
+ "warning": "Uwaga: Roo Code używa złożonych podpowiedzi i działa najlepiej z modelami Claude. Modele o niższych możliwościach mogą nie działać zgodnie z oczekiwaniami.",
+ "test": "Testuj",
+ "testing": "Testowanie...",
+ "completedIn": "Ukończono w {{duration}}ms",
+ "refreshModels": "Odśwież modele",
+ "refreshing": "Odświeżanie...",
+ "messages": {
+ "connectionSuccess": "Pomyślnie połączono z Ollama pod adresem {{baseUrl}}",
+ "connectionInvalidUrl": "Nieprawidłowy URL: {{baseUrl}}",
+ "connectionRefused": "Nie można połączyć się z Ollama pod adresem {{baseUrl}}. Upewnij się, że Ollama działa.",
+ "connectionTimeout": "Połączenie z Ollama przekroczyło limit czasu. Sprawdź, czy adres URL jest poprawny i Ollama jest dostępna.",
+ "connectionNetworkError": "Błąd sieci podczas łączenia z Ollama. Sprawdź połączenie sieciowe.",
+ "connectionHttpError": "Ollama zwróciła błąd: {{status}} {{statusText}}",
+ "connectionFailed": "Połączenie nie powiodło się: {{error}}",
+ "connectionTestError": "Błąd podczas testowania połączenia: {{error}}",
+ "refreshSuccess": "Znaleziono {{count}} model(i) z obsługą narzędzi ({{total}} łącznie)",
+ "refreshNoModels": "Nie znaleziono modeli. Upewnij się, że Ollama działa i ma zainstalowane modele.",
+ "refreshFailed": "Odświeżanie modeli nie powiodło się: {{error}}"
+ },
+ "connectionSettings": "Ustawienia połączenia",
+ "toolsSupport": "Obsługa narzędzi",
+ "noToolsSupport": "Brak obsługi narzędzi",
+ "models": {
+ "one": "model",
+ "few": "modele",
+ "many": "modeli",
+ "other": "modeli"
+ },
+ "noToolsSupportHelp": "Te modele nie obsługują natywnych wywołań narzędzi i nie mogą być używane z Roo Code. Są wyświetlane tylko w celach informacyjnych.",
+ "table": {
+ "modelName": "Nazwa modelu",
+ "context": "Kontekst",
+ "size": "Rozmiar",
+ "quantization": "Kwantyzacja",
+ "family": "Rodzina",
+ "images": "Obrazy",
+ "yes": "Tak",
+ "no": "Nie"
+ },
+ "streaming": "Przesyłanie strumieniowe",
+ "streamingHelp": "Przesyłanie strumieniowe jest zawsze włączone dla żądań API Ollama. Odpowiedzi są przesyłane strumieniowo w czasie rzeczywistym podczas generowania.",
+ "requestTimeout": "Limit czasu żądania (ms)",
+ "requestTimeoutHelp": "Limit czasu w milisekundach dla żądań API LLM (uzupełnienia czatu, praca myślowa). Domyślnie: 3600000 (60 minut). Zakres: 1000-7200000 (120 minut).",
+ "modelDiscoveryTimeout": "Limit czasu wykrywania modelu (ms)",
+ "modelDiscoveryTimeoutHelp": "Limit czasu w milisekundach dla żądań wykrywania modelu (lista i pobieranie szczegółów modelu). Domyślnie: 10000 (10 sekund). Zakres: 1000-600000 (10 minut).",
+ "maxRetries": "Maks. ponownych prób",
+ "maxRetriesHelp": "Maksymalna liczba prób ponowienia dla nieudanych żądań. Domyślnie: 0 (brak ponownych prób). Zakres: 0-10.",
+ "retryDelay": "Opóźnienie ponownej próby (ms)",
+ "retryDelayHelp": "Początkowe opóźnienie między próbami ponowienia w milisekundach. Używa wykładniczego wycofania. Domyślnie: 1000 (1 sekunda). Zakres: 100-10000.",
+ "enableLogging": "Włącz rejestrowanie żądań",
+ "enableLoggingHelp": "Włącz szczegółowe rejestrowanie żądań, odpowiedzi i błędów API Ollama. Dzienniki zawierają informacje o czasie i szczegóły połączenia."
},
"unboundApiKey": "Klucz API Unbound",
"getUnboundApiKey": "Uzyskaj klucz API Unbound",
diff --git a/webview-ui/src/i18n/locales/pt-BR/settings.json b/webview-ui/src/i18n/locales/pt-BR/settings.json
index 43361acc39c..99350075a44 100644
--- a/webview-ui/src/i18n/locales/pt-BR/settings.json
+++ b/webview-ui/src/i18n/locales/pt-BR/settings.json
@@ -467,7 +467,55 @@
"numCtx": "Tamanho da janela de contexto (num_ctx)",
"numCtxHelp": "Substitui o tamanho da janela de contexto padrão do modelo. Deixe em branco para usar a configuração do Modelfile do modelo. O valor mínimo é 128.",
"description": "O Ollama permite que você execute modelos localmente em seu computador. Para instruções sobre como começar, veja o guia de início rápido deles.",
- "warning": "Nota: O Roo Code usa prompts complexos e funciona melhor com modelos Claude. Modelos menos capazes podem não funcionar como esperado."
+ "warning": "Nota: O Roo Code usa prompts complexos e funciona melhor com modelos Claude. Modelos menos capazes podem não funcionar como esperado.",
+ "test": "Testar",
+ "testing": "Testando...",
+ "completedIn": "Concluído em {{duration}}ms",
+ "refreshModels": "Atualizar modelos",
+ "refreshing": "Atualizando...",
+ "messages": {
+ "connectionSuccess": "Conectado com sucesso ao Ollama em {{baseUrl}}",
+ "connectionInvalidUrl": "URL inválida: {{baseUrl}}",
+ "connectionRefused": "Não é possível conectar ao Ollama em {{baseUrl}}. Certifique-se de que o Ollama está em execução.",
+ "connectionTimeout": "A conexão com o Ollama expirou. Verifique se a URL está correta e o Ollama está acessível.",
+ "connectionNetworkError": "Erro de rede ao conectar ao Ollama. Verifique sua conexão de rede.",
+ "connectionHttpError": "Ollama retornou um erro: {{status}} {{statusText}}",
+ "connectionFailed": "Falha ao conectar: {{error}}",
+ "connectionTestError": "Erro ao testar a conexão: {{error}}",
+ "refreshSuccess": "Encontrados {{count}} modelo(s) com suporte a ferramentas ({{total}} no total)",
+ "refreshNoModels": "Nenhum modelo encontrado. Certifique-se de que o Ollama está em execução e tem modelos instalados.",
+ "refreshFailed": "Falha ao atualizar os modelos: {{error}}"
+ },
+ "connectionSettings": "Configurações de conexão",
+ "toolsSupport": "Suporte a ferramentas",
+ "noToolsSupport": "Sem suporte a ferramentas",
+ "models": {
+ "one": "modelo",
+ "other": "modelos"
+ },
+ "noToolsSupportHelp": "Esses modelos não suportam chamadas de ferramentas nativas e não podem ser usados com Roo Code. Eles são mostrados apenas como referência.",
+ "table": {
+ "modelName": "Nome do modelo",
+ "context": "Contexto",
+ "size": "Tamanho",
+ "quantization": "Quantização",
+ "family": "Família",
+ "images": "Imagens",
+ "yes": "Sim",
+ "no": "Não"
+ },
+ "streaming": "Transmissão",
+ "streamingHelp": "A transmissão está sempre habilitada para solicitações da API Ollama. As respostas são transmitidas em tempo real conforme são geradas.",
+ "requestTimeout": "Tempo limite da solicitação (ms)",
+ "requestTimeoutHelp": "Tempo limite em milissegundos para solicitações da API LLM (conclusões de chat, trabalho de pensamento). Padrão: 3600000 (60 minutos). Intervalo: 1000-7200000 (120 minutos).",
+ "modelDiscoveryTimeout": "Tempo limite de descoberta de modelos (ms)",
+ "modelDiscoveryTimeoutHelp": "Tempo limite em milissegundos para solicitações de descoberta de modelos (listagem e obtenção de detalhes do modelo). Padrão: 10000 (10 segundos). Intervalo: 1000-600000 (10 minutos).",
+ "maxRetries": "Máximo de tentativas",
+ "maxRetriesHelp": "Número máximo de tentativas de repetição para solicitações falhadas. Padrão: 0 (sem tentativas). Intervalo: 0-10.",
+ "retryDelay": "Atraso de tentativa (ms)",
+ "retryDelayHelp": "Atraso inicial entre tentativas de repetição em milissegundos. Usa backoff exponencial. Padrão: 1000 (1 segundo). Intervalo: 100-10000.",
+ "enableLogging": "Habilitar registro de solicitações",
+ "enableLoggingHelp": "Habilitar registro detalhado de solicitações, respostas e erros da API Ollama. Os registros incluem informações de tempo e detalhes de conexão."
},
"unboundApiKey": "Chave de API Unbound",
"getUnboundApiKey": "Obter chave de API Unbound",
diff --git a/webview-ui/src/i18n/locales/ru/settings.json b/webview-ui/src/i18n/locales/ru/settings.json
index 9245cb323bb..1e8f78ecf2a 100644
--- a/webview-ui/src/i18n/locales/ru/settings.json
+++ b/webview-ui/src/i18n/locales/ru/settings.json
@@ -467,7 +467,57 @@
"numCtx": "Размер контекстного окна (num_ctx)",
"numCtxHelp": "Переопределяет размер контекстного окна модели по умолчанию. Оставьте пустым, чтобы использовать конфигурацию Modelfile модели. Минимальное значение — 128.",
"description": "Ollama позволяет запускать модели локально на вашем компьютере. Для начала ознакомьтесь с кратким руководством.",
- "warning": "Примечание: Roo Code использует сложные подсказки и лучше всего работает с моделями Claude. Менее мощные модели могут работать некорректно."
+ "warning": "Примечание: Roo Code использует сложные подсказки и лучше всего работает с моделями Claude. Менее мощные модели могут работать некорректно.",
+ "test": "Тест",
+ "testing": "Тестирование...",
+ "completedIn": "Завершено за {{duration}}ms",
+ "refreshModels": "Обновить модели",
+ "refreshing": "Обновляем...",
+ "messages": {
+ "connectionSuccess": "Успешно подключено к Ollama по адресу {{baseUrl}}",
+ "connectionInvalidUrl": "Неверный URL: {{baseUrl}}",
+ "connectionRefused": "Не удается подключиться к Ollama по адресу {{baseUrl}}. Убедитесь, что Ollama запущена.",
+ "connectionTimeout": "Истекло время ожидания подключения к Ollama. Проверьте, правильный ли URL и доступна ли Ollama.",
+ "connectionNetworkError": "Ошибка сети при подключении к Ollama. Проверьте сетевое подключение.",
+ "connectionHttpError": "Ollama вернула ошибку: {{status}} {{statusText}}",
+ "connectionFailed": "Не удалось подключиться: {{error}}",
+ "connectionTestError": "Ошибка при проверке подключения: {{error}}",
+ "refreshSuccess": "Найдено {{count}} модель(ей) с поддержкой инструментов ({{total}} всего)",
+ "refreshNoModels": "Модели не найдены. Убедитесь, что Ollama запущена и установлены модели.",
+ "refreshFailed": "Не удалось обновить модели: {{error}}"
+ },
+ "connectionSettings": "Настройки подключения",
+ "toolsSupport": "Поддержка инструментов",
+ "noToolsSupport": "Без поддержки инструментов",
+ "models": {
+ "one": "модель",
+ "few": "модели",
+ "many": "моделей",
+ "other": "моделей"
+ },
+ "noToolsSupportHelp": "Эти модели не поддерживают нативные вызовы инструментов и не могут использоваться с Roo Code. Они отображаются только для справки.",
+ "table": {
+ "modelName": "Название модели",
+ "context": "Контекст",
+ "size": "Размер",
+ "quantization": "Квантование",
+ "family": "Семейство",
+ "images": "Изображения",
+ "yes": "Да",
+ "no": "Нет"
+ },
+ "streaming": "Потоковая передача",
+ "streamingHelp": "Потоковая передача всегда включена для запросов API Ollama. Ответы передаются в реальном времени по мере их генерации.",
+ "requestTimeout": "Таймаут запроса (мс)",
+ "requestTimeoutHelp": "Таймаут в миллисекундах для запросов API LLM (завершения чата, мыслительная работа). По умолчанию: 3600000 (60 минут). Диапазон: 1000-7200000 (120 минут).",
+ "modelDiscoveryTimeout": "Таймаут обнаружения моделей (мс)",
+ "modelDiscoveryTimeoutHelp": "Таймаут в миллисекундах для запросов обнаружения моделей (список и получение деталей модели). По умолчанию: 10000 (10 секунд). Диапазон: 1000-600000 (10 минут).",
+ "maxRetries": "Макс. повторов",
+ "maxRetriesHelp": "Максимальное количество попыток повтора для неудачных запросов. По умолчанию: 0 (без повторов). Диапазон: 0-10.",
+ "retryDelay": "Задержка повтора (мс)",
+ "retryDelayHelp": "Начальная задержка между попытками повтора в миллисекундах. Использует экспоненциальный откат. По умолчанию: 1000 (1 секунда). Диапазон: 100-10000.",
+ "enableLogging": "Включить логирование запросов",
+ "enableLoggingHelp": "Включить подробное логирование запросов, ответов и ошибок API Ollama. Логи включают информацию о времени и детали подключения."
},
"unboundApiKey": "Unbound API-ключ",
"getUnboundApiKey": "Получить Unbound API-ключ",
diff --git a/webview-ui/src/i18n/locales/tr/settings.json b/webview-ui/src/i18n/locales/tr/settings.json
index 74768674924..9d2f99974d4 100644
--- a/webview-ui/src/i18n/locales/tr/settings.json
+++ b/webview-ui/src/i18n/locales/tr/settings.json
@@ -467,7 +467,55 @@
"numCtx": "Bağlam Penceresi Boyutu (num_ctx)",
"numCtxHelp": "Modelin varsayılan bağlam penceresi boyutunu geçersiz kılar. Modelin Modelfile yapılandırmasını kullanmak için boş bırakın. Minimum değer 128'dir.",
"description": "Ollama, modelleri bilgisayarınızda yerel olarak çalıştırmanıza olanak tanır. Başlamak için hızlı başlangıç kılavuzlarına bakın.",
- "warning": "Not: Roo Code karmaşık istemler kullanır ve Claude modelleriyle en iyi şekilde çalışır. Daha az yetenekli modeller beklendiği gibi çalışmayabilir."
+ "warning": "Not: Roo Code karmaşık istemler kullanır ve Claude modelleriyle en iyi şekilde çalışır. Daha az yetenekli modeller beklendiği gibi çalışmayabilir.",
+ "test": "Test",
+ "testing": "Test ediliyor...",
+ "completedIn": "{{duration}}ms içinde tamamlandı",
+ "refreshModels": "Modelleri yenile",
+ "refreshing": "Yenileniyor...",
+ "messages": {
+ "connectionSuccess": "{{baseUrl}} adresindeki Ollama'ya başarıyla bağlanıldı",
+ "connectionInvalidUrl": "Geçersiz URL: {{baseUrl}}",
+ "connectionRefused": "{{baseUrl}} adresindeki Ollama'ya bağlanılamıyor. Ollama'nın çalıştığından emin olun.",
+ "connectionTimeout": "Ollama'ya bağlantı zaman aşımına uğradı. URL'nin doğru olduğunu ve Ollama'ya erişilebildiğini kontrol edin.",
+ "connectionNetworkError": "Ollama'ya bağlanırken ağ hatası oluştu. Ağ bağlantınızı kontrol edin.",
+ "connectionHttpError": "Ollama hata döndürdü: {{status}} {{statusText}}",
+ "connectionFailed": "Bağlantı başarısız: {{error}}",
+ "connectionTestError": "Bağlantı test edilirken hata oluştu: {{error}}",
+ "refreshSuccess": "Araç desteği olan {{count}} model bulundu (toplam {{total}})",
+ "refreshNoModels": "Model bulunamadı. Ollama'nın çalıştığından ve modellerin yüklü olduğundan emin olun.",
+ "refreshFailed": "Modeller yenilenirken hata oluştu: {{error}}"
+ },
+ "connectionSettings": "Bağlantı ayarları",
+ "toolsSupport": "Araç desteği",
+ "noToolsSupport": "Araç desteği yok",
+ "models": {
+ "one": "model",
+ "other": "modeller"
+ },
+ "noToolsSupportHelp": "Bu modeller yerel araç çağrılarını desteklemez ve Roo Code ile kullanılamaz. Yalnızca referans olarak gösterilirler.",
+ "table": {
+ "modelName": "Model adı",
+ "context": "Bağlam",
+ "size": "Boyut",
+ "quantization": "Kuantizasyon",
+ "family": "Aile",
+ "images": "Görüntüler",
+ "yes": "Evet",
+ "no": "Hayır"
+ },
+ "streaming": "Akış",
+ "streamingHelp": "Ollama API istekleri için akış her zaman etkindir. Yanıtlar oluşturuldukça gerçek zamanlı olarak akışa alınır.",
+ "requestTimeout": "İstek zaman aşımı (ms)",
+ "requestTimeoutHelp": "LLM API istekleri (sohbet tamamlama, düşünme işi) için milisaniye cinsinden zaman aşımı. Varsayılan: 3600000 (60 dakika). Aralık: 1000-7200000 (120 dakika).",
+ "modelDiscoveryTimeout": "Model keşfi zaman aşımı (ms)",
+ "modelDiscoveryTimeoutHelp": "Model keşfi istekleri (liste ve model ayrıntılarını alma) için milisaniye cinsinden zaman aşımı. Varsayılan: 10000 (10 saniye). Aralık: 1000-600000 (10 dakika).",
+ "maxRetries": "Maks. yeniden deneme",
+ "maxRetriesHelp": "Başarısız istekler için maksimum yeniden deneme denemesi sayısı. Varsayılan: 0 (yeniden deneme yok). Aralık: 0-10.",
+ "retryDelay": "Yeniden deneme gecikmesi (ms)",
+ "retryDelayHelp": "Milisaniye cinsinden yeniden deneme denemeleri arasındaki başlangıç gecikmesi. Üstel geri çekilme kullanır. Varsayılan: 1000 (1 saniye). Aralık: 100-10000.",
+ "enableLogging": "İstek günlüğünü etkinleştir",
+ "enableLoggingHelp": "Ollama API istekleri, yanıtları ve hatalarının ayrıntılı günlüğünü etkinleştir. Günlükler zaman bilgisi ve bağlantı ayrıntılarını içerir."
},
"unboundApiKey": "Unbound API Anahtarı",
"getUnboundApiKey": "Unbound API Anahtarı Al",
diff --git a/webview-ui/src/i18n/locales/vi/settings.json b/webview-ui/src/i18n/locales/vi/settings.json
index ba4579a0f9d..3ce0c95dd94 100644
--- a/webview-ui/src/i18n/locales/vi/settings.json
+++ b/webview-ui/src/i18n/locales/vi/settings.json
@@ -467,7 +467,55 @@
"numCtx": "Kích thước cửa sổ ngữ cảnh (num_ctx)",
"numCtxHelp": "Ghi đè kích thước cửa sổ ngữ cảnh mặc định của mô hình. Để trống để sử dụng cấu hình Modelfile của mô hình. Giá trị tối thiểu là 128.",
"description": "Ollama cho phép bạn chạy các mô hình cục bộ trên máy tính của bạn. Để biết hướng dẫn về cách bắt đầu, xem hướng dẫn nhanh của họ.",
- "warning": "Lưu ý: Roo Code sử dụng các lời nhắc phức tạp và hoạt động tốt nhất với các mô hình Claude. Các mô hình kém mạnh hơn có thể không hoạt động như mong đợi."
+ "warning": "Lưu ý: Roo Code sử dụng các lời nhắc phức tạp và hoạt động tốt nhất với các mô hình Claude. Các mô hình kém mạnh hơn có thể không hoạt động như mong đợi.",
+ "test": "Kiểm tra",
+ "testing": "Đang kiểm tra...",
+ "completedIn": "Hoàn thành trong {{duration}}ms",
+ "refreshModels": "Làm mới mô hình",
+ "refreshing": "Đang làm mới...",
+ "messages": {
+ "connectionSuccess": "Đã kết nối thành công với Ollama tại {{baseUrl}}",
+ "connectionInvalidUrl": "URL không hợp lệ: {{baseUrl}}",
+ "connectionRefused": "Không thể kết nối với Ollama tại {{baseUrl}}. Đảm bảo Ollama đang chạy.",
+ "connectionTimeout": "Kết nối với Ollama đã hết thời gian chờ. Kiểm tra xem URL có đúng không và Ollama có thể truy cập được không.",
+ "connectionNetworkError": "Lỗi mạng khi kết nối với Ollama. Kiểm tra kết nối mạng của bạn.",
+ "connectionHttpError": "Ollama trả về lỗi: {{status}} {{statusText}}",
+ "connectionFailed": "Kết nối thất bại: {{error}}",
+ "connectionTestError": "Lỗi khi kiểm tra kết nối: {{error}}",
+ "refreshSuccess": "Tìm thấy {{count}} mô hình có hỗ trợ công cụ ({{total}} tổng cộng)",
+ "refreshNoModels": "Không tìm thấy mô hình nào. Đảm bảo Ollama đang chạy và đã cài đặt mô hình.",
+ "refreshFailed": "Làm mới mô hình thất bại: {{error}}"
+ },
+ "connectionSettings": "Cài đặt kết nối",
+ "toolsSupport": "Hỗ trợ công cụ",
+ "noToolsSupport": "Không hỗ trợ công cụ",
+ "models": {
+ "one": "mô hình",
+ "other": "mô hình"
+ },
+ "noToolsSupportHelp": "Các mô hình này không hỗ trợ gọi công cụ gốc và không thể sử dụng với Roo Code. Chúng chỉ được hiển thị để tham khảo.",
+ "table": {
+ "modelName": "Tên mô hình",
+ "context": "Ngữ cảnh",
+ "size": "Kích thước",
+ "quantization": "Lượng tử hóa",
+ "family": "Họ",
+ "images": "Hình ảnh",
+ "yes": "Có",
+ "no": "Không"
+ },
+ "streaming": "Phát trực tuyến",
+ "streamingHelp": "Phát trực tuyến luôn được bật cho các yêu cầu API Ollama. Phản hồi được phát trực tuyến theo thời gian thực khi chúng được tạo.",
+ "requestTimeout": "Hết thời gian yêu cầu (ms)",
+ "requestTimeoutHelp": "Hết thời gian tính bằng mili giây cho các yêu cầu API LLM (hoàn thành trò chuyện, công việc suy nghĩ). Mặc định: 3600000 (60 phút). Phạm vi: 1000-7200000 (120 phút).",
+ "modelDiscoveryTimeout": "Hết thời gian khám phá mô hình (ms)",
+ "modelDiscoveryTimeoutHelp": "Hết thời gian tính bằng mili giây cho các yêu cầu khám phá mô hình (danh sách và lấy chi tiết mô hình). Mặc định: 10000 (10 giây). Phạm vi: 1000-600000 (10 phút).",
+ "maxRetries": "Số lần thử lại tối đa",
+ "maxRetriesHelp": "Số lần thử lại tối đa cho các yêu cầu thất bại. Mặc định: 0 (không thử lại). Phạm vi: 0-10.",
+ "retryDelay": "Độ trễ thử lại (ms)",
+ "retryDelayHelp": "Độ trễ ban đầu giữa các lần thử lại tính bằng mili giây. Sử dụng backoff theo cấp số nhân. Mặc định: 1000 (1 giây). Phạm vi: 100-10000.",
+ "enableLogging": "Bật ghi nhật ký yêu cầu",
+ "enableLoggingHelp": "Bật ghi nhật ký chi tiết về yêu cầu, phản hồi và lỗi API Ollama. Nhật ký bao gồm thông tin thời gian và chi tiết kết nối."
},
"unboundApiKey": "Khóa API Unbound",
"getUnboundApiKey": "Lấy khóa API Unbound",
diff --git a/webview-ui/src/i18n/locales/zh-CN/settings.json b/webview-ui/src/i18n/locales/zh-CN/settings.json
index 868acf4487f..77389de2ff7 100644
--- a/webview-ui/src/i18n/locales/zh-CN/settings.json
+++ b/webview-ui/src/i18n/locales/zh-CN/settings.json
@@ -467,7 +467,55 @@
"numCtx": "上下文窗口大小 (num_ctx)",
"numCtxHelp": "覆盖模型的默认上下文窗口大小。留空以使用模型的 Modelfile 配置。最小值为 128。",
"description": "Ollama 允许您在本地计算机上运行模型。有关如何开始使用的说明,请参阅其快速入门指南。",
- "warning": "注意:Roo Code 使用复杂的提示,与 Claude 模型配合最佳。功能较弱的模型可能无法按预期工作。"
+ "warning": "注意:Roo Code 使用复杂的提示,与 Claude 模型配合最佳。功能较弱的模型可能无法按预期工作。",
+ "test": "测试",
+ "testing": "测试中...",
+ "completedIn": "在{{duration}}ms内完成",
+ "refreshModels": "刷新模型",
+ "refreshing": "刷新中...",
+ "messages": {
+ "connectionSuccess": "成功连接到 {{baseUrl}} 的 Ollama",
+ "connectionInvalidUrl": "无效的 URL: {{baseUrl}}",
+ "connectionRefused": "无法连接到 {{baseUrl}} 的 Ollama。请确保 Ollama 正在运行。",
+ "connectionTimeout": "连接到 Ollama 超时。请检查 URL 是否正确以及 Ollama 是否可访问。",
+ "connectionNetworkError": "连接到 Ollama 时发生网络错误。请检查您的网络连接。",
+ "connectionHttpError": "Ollama 返回错误: {{status}} {{statusText}}",
+ "connectionFailed": "连接失败: {{error}}",
+ "connectionTestError": "测试连接时出错: {{error}}",
+ "refreshSuccess": "找到 {{count}} 个支持工具的模型(共 {{total}} 个)",
+ "refreshNoModels": "未找到模型。请确保 Ollama 正在运行并已安装模型。",
+ "refreshFailed": "刷新模型失败: {{error}}"
+ },
+ "connectionSettings": "连接设置",
+ "toolsSupport": "工具支持",
+ "noToolsSupport": "无工具支持",
+ "models": {
+ "one": "模型",
+ "other": "模型"
+ },
+ "noToolsSupportHelp": "这些模型不支持原生工具调用,无法与 Roo Code 一起使用。它们仅作为参考显示。",
+ "table": {
+ "modelName": "模型名称",
+ "context": "上下文",
+ "size": "大小",
+ "quantization": "量化",
+ "family": "系列",
+ "images": "图像",
+ "yes": "是",
+ "no": "否"
+ },
+ "streaming": "流式传输",
+ "streamingHelp": "Ollama API 请求始终启用流式传输。响应在生成时实时流式传输。",
+ "requestTimeout": "请求超时 (ms)",
+ "requestTimeoutHelp": "LLM API 请求(聊天完成、思考工作)的超时时间(毫秒)。默认值: 3600000 (60 分钟)。范围: 1000-7200000 (120 分钟)。",
+ "modelDiscoveryTimeout": "模型发现超时 (ms)",
+ "modelDiscoveryTimeoutHelp": "模型发现请求(列出和获取模型详细信息)的超时时间(毫秒)。默认值: 10000 (10 秒)。范围: 1000-600000 (10 分钟)。",
+ "maxRetries": "最大重试次数",
+ "maxRetriesHelp": "失败请求的最大重试尝试次数。默认值: 0 (不重试)。范围: 0-10。",
+ "retryDelay": "重试延迟 (ms)",
+ "retryDelayHelp": "重试尝试之间的初始延迟(毫秒)。使用指数退避。默认值: 1000 (1 秒)。范围: 100-10000。",
+ "enableLogging": "启用请求日志",
+ "enableLoggingHelp": "启用 Ollama API 请求、响应和错误的详细日志记录。日志包括时间信息和连接详细信息。"
},
"unboundApiKey": "Unbound API 密钥",
"getUnboundApiKey": "获取 Unbound API 密钥",
diff --git a/webview-ui/src/i18n/locales/zh-TW/settings.json b/webview-ui/src/i18n/locales/zh-TW/settings.json
index e240f205303..8213eedb7b7 100644
--- a/webview-ui/src/i18n/locales/zh-TW/settings.json
+++ b/webview-ui/src/i18n/locales/zh-TW/settings.json
@@ -476,7 +476,55 @@
"numCtx": "上下文視窗大小(num_ctx)",
"numCtxHelp": "覆寫模型的預設上下文視窗大小。留空以使用模型的 Modelfile 設定。最小值為 128。",
"description": "Ollama 允許您在本機電腦執行模型。請參閱快速入門指南。",
- "warning": "注意:Roo Code 使用複雜提示詞,與 Claude 模型搭配最佳。功能較弱的模型可能無法正常運作。"
+ "warning": "注意:Roo Code 使用複雜提示詞,與 Claude 模型搭配最佳。功能較弱的模型可能無法正常運作。",
+ "test": "測試",
+ "testing": "測試中...",
+ "completedIn": "在{{duration}}ms內完成",
+ "refreshModels": "重新整理模型",
+ "refreshing": "重新整理中...",
+ "messages": {
+ "connectionSuccess": "成功連接到 {{baseUrl}} 的 Ollama",
+ "connectionInvalidUrl": "無效的 URL: {{baseUrl}}",
+ "connectionRefused": "無法連接到 {{baseUrl}} 的 Ollama。請確保 Ollama 正在執行。",
+ "connectionTimeout": "連接到 Ollama 逾時。請檢查 URL 是否正確以及 Ollama 是否可存取。",
+ "connectionNetworkError": "連接到 Ollama 時發生網路錯誤。請檢查您的網路連線。",
+ "connectionHttpError": "Ollama 傳回錯誤: {{status}} {{statusText}}",
+ "connectionFailed": "連接失敗: {{error}}",
+ "connectionTestError": "測試連接時發生錯誤: {{error}}",
+ "refreshSuccess": "找到 {{count}} 個支援工具的模型(共 {{total}} 個)",
+ "refreshNoModels": "找不到模型。請確保 Ollama 正在執行並已安裝模型。",
+ "refreshFailed": "重新整理模型失敗: {{error}}"
+ },
+ "connectionSettings": "連線設定",
+ "toolsSupport": "工具支援",
+ "noToolsSupport": "無工具支援",
+ "models": {
+ "one": "模型",
+ "other": "模型"
+ },
+ "noToolsSupportHelp": "這些模型不支援原生工具呼叫,無法與 Roo Code 一起使用。它們僅作為參考顯示。",
+ "table": {
+ "modelName": "模型名稱",
+ "context": "上下文",
+ "size": "大小",
+ "quantization": "量化",
+ "family": "系列",
+ "images": "影像",
+ "yes": "是",
+ "no": "否"
+ },
+ "streaming": "串流",
+ "streamingHelp": "Ollama API 請求始終啟用串流。回應在生成時即時串流。",
+ "requestTimeout": "請求逾時 (ms)",
+ "requestTimeoutHelp": "LLM API 請求(聊天完成、思考工作)的逾時時間(毫秒)。預設值: 3600000 (60 分鐘)。範圍: 1000-7200000 (120 分鐘)。",
+ "modelDiscoveryTimeout": "模型探索逾時 (ms)",
+ "modelDiscoveryTimeoutHelp": "模型探索請求(列出和取得模型詳細資訊)的逾時時間(毫秒)。預設值: 10000 (10 秒)。範圍: 1000-600000 (10 分鐘)。",
+ "maxRetries": "最大重試次數",
+ "maxRetriesHelp": "失敗請求的最大重試嘗試次數。預設值: 0 (不重試)。範圍: 0-10。",
+ "retryDelay": "重試延遲 (ms)",
+ "retryDelayHelp": "重試嘗試之間的初始延遲(毫秒)。使用指數退避。預設值: 1000 (1 秒)。範圍: 100-10000。",
+ "enableLogging": "啟用請求記錄",
+ "enableLoggingHelp": "啟用 Ollama API 請求、回應和錯誤的詳細記錄。記錄包括時間資訊和連線詳細資訊。"
},
"unboundApiKey": "Unbound API 金鑰",
"getUnboundApiKey": "取得 Unbound API 金鑰",
diff --git a/webview-ui/src/i18n/setup.ts b/webview-ui/src/i18n/setup.ts
index 678cdc1d49c..411389ff2fc 100644
--- a/webview-ui/src/i18n/setup.ts
+++ b/webview-ui/src/i18n/setup.ts
@@ -37,6 +37,8 @@ i18next.use(initReactI18next).init({
interpolation: {
escapeValue: false, // React already escapes by default
},
+ // Note: returnObjects is not set to true because i18next should automatically
+ // resolve plural objects to strings when count parameter is provided
})
export function loadTranslations() {