From 6b07a613f18b0fea4bb89c084e861fd3b7bd5ae4 Mon Sep 17 00:00:00 2001 From: CyberMind-FR Date: Thu, 22 Jan 2026 05:09:14 +0100 Subject: [PATCH] fix(luci-app-localai): Fix chat timeout and port issues - Change default API port from 8080 to 8081 - Increase chat API timeout to 120 seconds (LLMs can be slow on ARM) - Use custom fetch-based chat call with AbortController for timeout control - Fix wget/curl timeout for RPCD backend Resolves "XHR request timed out" errors when using chat with TinyLlama. Co-Authored-By: Claude Opus 4.5 --- .../resources/view/localai/chat.js | 56 ++++++++++++++++--- .../root/usr/libexec/rpcd/luci.localai | 7 ++- 2 files changed, 52 insertions(+), 11 deletions(-) diff --git a/package/secubox/luci-app-localai/htdocs/luci-static/resources/view/localai/chat.js b/package/secubox/luci-app-localai/htdocs/luci-static/resources/view/localai/chat.js index 39c4347f..0f335619 100644 --- a/package/secubox/luci-app-localai/htdocs/luci-static/resources/view/localai/chat.js +++ b/package/secubox/luci-app-localai/htdocs/luci-static/resources/view/localai/chat.js @@ -9,12 +9,52 @@ var callModels = rpc.declare({ expect: { models: [] } }); -var callChat = rpc.declare({ - object: 'luci.localai', - method: 'chat', - params: ['model', 'messages'], - expect: { response: '' } -}); +// Custom chat function with longer timeout (LLMs can be slow) +function callChatWithTimeout(model, messages, timeoutMs) { + return new Promise(function(resolve, reject) { + var timeout = timeoutMs || 120000; // 2 minutes default + var controller = new AbortController(); + var timeoutId = setTimeout(function() { + controller.abort(); + reject(new Error('Request timed out - model may need more time')); + }, timeout); + + // Use ubus directly via L.Request or fetch + var payload = JSON.stringify({ + jsonrpc: '2.0', + id: 1, + method: 'call', + params: [ + L.env.sessionid || '00000000000000000000000000000000', + 'luci.localai', + 'chat', + { model: model, messages: JSON.parse(messages) } + ] + }); + + fetch(L.env.requestpath + 'admin/ubus', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: payload, + signal: controller.signal + }) + .then(function(response) { return response.json(); }) + .then(function(data) { + clearTimeout(timeoutId); + if (data.result && data.result[1]) { + resolve(data.result[1]); + } else if (data.error) { + reject(new Error(data.error.message || 'RPC error')); + } else { + resolve({ response: '', error: 'Unexpected response format' }); + } + }) + .catch(function(err) { + clearTimeout(timeoutId); + reject(err); + }); + }); +} return view.extend({ title: _('LocalAI Chat'), @@ -137,8 +177,8 @@ return view.extend({ // Build messages array this.messages.push({ role: 'user', content: message }); - // Send to API - callChat(this.selectedModel, JSON.stringify(this.messages)) + // Send to API (120s timeout for slow models) + callChatWithTimeout(this.selectedModel, JSON.stringify(this.messages), 120000) .then(function(result) { var loading = document.getElementById('loading-msg'); if (loading) loading.remove(); diff --git a/package/secubox/luci-app-localai/root/usr/libexec/rpcd/luci.localai b/package/secubox/luci-app-localai/root/usr/libexec/rpcd/luci.localai index ac86a934..6cbb461e 100644 --- a/package/secubox/luci-app-localai/root/usr/libexec/rpcd/luci.localai +++ b/package/secubox/luci-app-localai/root/usr/libexec/rpcd/luci.localai @@ -10,7 +10,7 @@ LOCALAI_CTL="/usr/sbin/localaictl" # Load UCI config load_config() { config_load "$CONFIG" - config_get API_PORT main api_port "8080" + config_get API_PORT main api_port "8081" config_get DATA_PATH main data_path "/srv/localai" config_get MODELS_PATH main models_path "/srv/localai/models" config_get MEMORY_LIMIT main memory_limit "2G" @@ -388,13 +388,14 @@ do_chat() { local tmpfile="/tmp/localai_chat_$$" local tmpfile_err="/tmp/localai_chat_err_$$" + # Use longer timeout for LLM responses (120 seconds) if command -v curl >/dev/null 2>&1; then - curl -s -X POST "http://127.0.0.1:$API_PORT/v1/chat/completions" \ + curl -s -m 120 -X POST "http://127.0.0.1:$API_PORT/v1/chat/completions" \ -H "Content-Type: application/json" \ -d "$request_body" \ -o "$tmpfile" 2>"$tmpfile_err" else - wget -q -O "$tmpfile" --post-data "$request_body" \ + wget -q -T 120 -O "$tmpfile" --post-data "$request_body" \ --header="Content-Type: application/json" \ "http://127.0.0.1:$API_PORT/v1/chat/completions" 2>"$tmpfile_err" fi