feat(localai): Add LocalAI LuCI app with chat, models management and portal integration

- Add secubox-app-localai package with LXC container support for LocalAI service
- Add luci-app-localai with dashboard, chat, models and settings views
- Implement RPCD backend for LocalAI API integration via /v1/models and /v1/chat/completions
- Use direct RPC declarations in LuCI views for reliable frontend communication
- Add LocalAI and Glances to secubox-portal services page
- Move Glances from services to monitoring section

Packages:
- secubox-app-localai: 0.1.0-r1
- luci-app-localai: 0.1.0-r8
- luci-app-secubox-portal: 0.6.0-r5

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
CyberMind-FR 2026-01-21 16:54:13 +01:00
parent 5e29599682
commit 6b28c4260b
16 changed files with 2772 additions and 1 deletions

View File

@ -0,0 +1,33 @@
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2025 CyberMind.fr - Gandalf
#
# LuCI LocalAI - Self-hosted LLM Management Interface
#
include $(TOPDIR)/rules.mk
PKG_NAME:=luci-app-localai
PKG_VERSION:=0.1.0
PKG_RELEASE:=8
PKG_ARCH:=all
PKG_LICENSE:=Apache-2.0
PKG_MAINTAINER:=CyberMind <contact@cybermind.fr>
LUCI_TITLE:=LuCI LocalAI Dashboard
LUCI_DESCRIPTION:=Modern dashboard for LocalAI LLM management on OpenWrt
LUCI_DEPENDS:=+luci-base +luci-app-secubox +luci-lib-jsonc +rpcd +rpcd-mod-luci +secubox-app-localai
LUCI_PKGARCH:=all
# File permissions
PKG_FILE_MODES:=/usr/libexec/rpcd/luci.localai:root:root:755
include $(TOPDIR)/feeds/luci/luci.mk
define Package/$(PKG_NAME)/conffiles
/etc/config/localai
endef
# call BuildPackage - OpenWrt buildroot

View File

@ -0,0 +1,140 @@
'use strict';
'require baseclass';
'require rpc';
/**
* LocalAI Dashboard API
* Package: luci-app-localai
* RPCD object: luci.localai
*/
// Version: 0.1.0
var callStatus = rpc.declare({
object: 'luci.localai',
method: 'status',
expect: { }
});
var callModels = rpc.declare({
object: 'luci.localai',
method: 'models',
expect: { models: [] }
});
var callConfig = rpc.declare({
object: 'luci.localai',
method: 'config',
expect: { }
});
var callHealth = rpc.declare({
object: 'luci.localai',
method: 'health',
expect: { healthy: false }
});
var callMetrics = rpc.declare({
object: 'luci.localai',
method: 'metrics',
expect: { }
});
var callStart = rpc.declare({
object: 'luci.localai',
method: 'start',
expect: { success: false }
});
var callStop = rpc.declare({
object: 'luci.localai',
method: 'stop',
expect: { success: false }
});
var callRestart = rpc.declare({
object: 'luci.localai',
method: 'restart',
expect: { success: false }
});
var callModelInstall = rpc.declare({
object: 'luci.localai',
method: 'model_install',
params: ['name'],
expect: { success: false }
});
var callModelRemove = rpc.declare({
object: 'luci.localai',
method: 'model_remove',
params: ['name'],
expect: { success: false }
});
var callChat = rpc.declare({
object: 'luci.localai',
method: 'chat',
params: ['model', 'messages'],
expect: { response: '' }
});
var callComplete = rpc.declare({
object: 'luci.localai',
method: 'complete',
params: ['model', 'prompt'],
expect: { text: '' }
});
function formatBytes(bytes) {
if (bytes === 0) return '0 B';
var k = 1024;
var sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
var i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
}
function formatUptime(seconds) {
if (!seconds) return 'N/A';
var days = Math.floor(seconds / 86400);
var hours = Math.floor((seconds % 86400) / 3600);
var mins = Math.floor((seconds % 3600) / 60);
if (days > 0) return days + 'd ' + hours + 'h';
if (hours > 0) return hours + 'h ' + mins + 'm';
return mins + 'm';
}
return baseclass.extend({
getStatus: callStatus,
getModels: callModels,
getConfig: callConfig,
getHealth: callHealth,
getMetrics: callMetrics,
start: callStart,
stop: callStop,
restart: callRestart,
modelInstall: callModelInstall,
modelRemove: callModelRemove,
chat: callChat,
complete: callComplete,
formatBytes: formatBytes,
formatUptime: formatUptime,
// Aggregate function for dashboard
getDashboardData: function() {
return Promise.all([
callStatus(),
callModels(),
callHealth(),
callMetrics()
]).then(function(results) {
return {
status: results[0] || {},
models: results[1] || { models: [] },
health: results[2] || { healthy: false },
metrics: results[3] || {}
};
});
}
});

View File

@ -0,0 +1,334 @@
'use strict';
'require view';
'require ui';
'require rpc';
var callModels = rpc.declare({
object: 'luci.localai',
method: 'models',
expect: { models: [] }
});
var callChat = rpc.declare({
object: 'luci.localai',
method: 'chat',
params: ['model', 'messages'],
expect: { response: '' }
});
return view.extend({
title: _('LocalAI Chat'),
messages: [],
selectedModel: null,
load: function() {
return callModels();
},
render: function(data) {
var self = this;
var models = data.models || [];
var container = E('div', { 'class': 'localai-chat' }, [
E('style', {}, this.getCSS()),
// Header
E('div', { 'class': 'chat-header' }, [
E('div', { 'class': 'chat-title' }, [
E('span', { 'class': 'chat-icon' }, '💬'),
_('LocalAI Chat')
]),
E('div', { 'class': 'chat-model-select' }, [
E('label', {}, _('Model:')),
E('select', {
'id': 'model-select',
'change': function(e) { self.selectedModel = e.target.value; }
}, models.length > 0 ?
models.map(function(m) {
var modelId = m.id || m.name;
var displayName = m.loaded ? modelId + ' ✓' : modelId;
return E('option', { 'value': modelId }, displayName);
}) :
[E('option', { 'value': '' }, _('No models available'))]
)
])
]),
// Chat Messages
E('div', { 'class': 'chat-messages', 'id': 'chat-messages' }, [
E('div', { 'class': 'chat-welcome' }, [
E('div', { 'class': 'welcome-icon' }, '🤖'),
E('div', { 'class': 'welcome-title' }, _('Welcome to LocalAI Chat')),
E('div', { 'class': 'welcome-text' }, _('Start a conversation with your local AI model.'))
])
]),
// Input Area
E('div', { 'class': 'chat-input-area' }, [
E('textarea', {
'id': 'chat-input',
'class': 'chat-input',
'placeholder': _('Type your message...'),
'rows': 2,
'keydown': function(e) {
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault();
self.sendMessage();
}
}
}),
E('button', {
'class': 'chat-send-btn',
'click': function() { self.sendMessage(); }
}, [E('span', {}, '➤'), _('Send')])
])
]);
// Set initial model
if (models.length > 0) {
var loadedModel = models.find(function(m) { return m.loaded; });
this.selectedModel = loadedModel ? (loadedModel.id || loadedModel.name) : (models[0].id || models[0].name);
}
return container;
},
sendMessage: function() {
var self = this;
var input = document.getElementById('chat-input');
var messagesContainer = document.getElementById('chat-messages');
var message = input.value.trim();
if (!message) return;
if (!this.selectedModel) {
ui.addNotification(null, E('p', _('Please select a model first')), 'error');
return;
}
// Clear welcome if present
var welcome = messagesContainer.querySelector('.chat-welcome');
if (welcome) welcome.remove();
// Add user message
messagesContainer.appendChild(E('div', { 'class': 'chat-message user' }, [
E('div', { 'class': 'message-avatar' }, '👤'),
E('div', { 'class': 'message-content' }, [
E('div', { 'class': 'message-text' }, message)
])
]));
// Add loading indicator
var loadingMsg = E('div', { 'class': 'chat-message assistant loading', 'id': 'loading-msg' }, [
E('div', { 'class': 'message-avatar' }, '🤖'),
E('div', { 'class': 'message-content' }, [
E('div', { 'class': 'message-loading' }, [
E('span', {}), E('span', {}), E('span', {})
])
])
]);
messagesContainer.appendChild(loadingMsg);
messagesContainer.scrollTop = messagesContainer.scrollHeight;
// Clear input
input.value = '';
// Build messages array
this.messages.push({ role: 'user', content: message });
// Send to API
callChat(this.selectedModel, JSON.stringify(this.messages))
.then(function(result) {
var loading = document.getElementById('loading-msg');
if (loading) loading.remove();
var response = result.response || result.error || _('No response');
self.messages.push({ role: 'assistant', content: response });
messagesContainer.appendChild(E('div', { 'class': 'chat-message assistant' }, [
E('div', { 'class': 'message-avatar' }, '🤖'),
E('div', { 'class': 'message-content' }, [
E('div', { 'class': 'message-text' }, response)
])
]));
messagesContainer.scrollTop = messagesContainer.scrollHeight;
})
.catch(function(err) {
var loading = document.getElementById('loading-msg');
if (loading) loading.remove();
messagesContainer.appendChild(E('div', { 'class': 'chat-message assistant error' }, [
E('div', { 'class': 'message-avatar' }, '⚠️'),
E('div', { 'class': 'message-content' }, [
E('div', { 'class': 'message-text' }, _('Error: ') + err.message)
])
]));
messagesContainer.scrollTop = messagesContainer.scrollHeight;
});
},
getCSS: function() {
return `
.localai-chat {
font-family: 'Inter', -apple-system, sans-serif;
background: #030712;
color: #f8fafc;
min-height: calc(100vh - 100px);
display: flex;
flex-direction: column;
padding: 16px;
}
.chat-header {
display: flex;
align-items: center;
justify-content: space-between;
padding: 16px 20px;
background: #0f172a;
border: 1px solid #334155;
border-radius: 12px;
margin-bottom: 16px;
}
.chat-title {
display: flex;
align-items: center;
gap: 12px;
font-size: 18px;
font-weight: 600;
}
.chat-icon { font-size: 24px; }
.chat-model-select {
display: flex;
align-items: center;
gap: 10px;
}
.chat-model-select select {
padding: 8px 12px;
background: #1e293b;
border: 1px solid #334155;
border-radius: 8px;
color: #f8fafc;
font-size: 13px;
}
.chat-messages {
flex: 1;
overflow-y: auto;
padding: 20px;
background: #0f172a;
border: 1px solid #334155;
border-radius: 12px;
margin-bottom: 16px;
display: flex;
flex-direction: column;
gap: 16px;
min-height: 400px;
}
.chat-welcome {
text-align: center;
padding: 60px 20px;
color: #64748b;
}
.welcome-icon { font-size: 64px; margin-bottom: 20px; }
.welcome-title { font-size: 20px; font-weight: 600; margin-bottom: 10px; color: #94a3b8; }
.welcome-text { font-size: 14px; }
.chat-message {
display: flex;
gap: 12px;
max-width: 80%;
}
.chat-message.user {
align-self: flex-end;
flex-direction: row-reverse;
}
.message-avatar {
width: 36px;
height: 36px;
border-radius: 10px;
background: #1e293b;
display: flex;
align-items: center;
justify-content: center;
font-size: 18px;
flex-shrink: 0;
}
.chat-message.user .message-avatar {
background: linear-gradient(135deg, #06b6d4, #0ea5e9);
}
.chat-message.assistant .message-avatar {
background: linear-gradient(135deg, #a855f7, #6366f1);
}
.message-content {
padding: 12px 16px;
border-radius: 12px;
background: #1e293b;
}
.chat-message.user .message-content {
background: linear-gradient(135deg, #06b6d4, #0ea5e9);
}
.chat-message.error .message-content {
background: rgba(239, 68, 68, 0.2);
border: 1px solid rgba(239, 68, 68, 0.3);
}
.message-text {
font-size: 14px;
line-height: 1.5;
white-space: pre-wrap;
}
.message-loading {
display: flex;
gap: 4px;
}
.message-loading span {
width: 8px;
height: 8px;
background: #a855f7;
border-radius: 50%;
animation: bounce 1.4s infinite ease-in-out;
}
.message-loading span:nth-child(1) { animation-delay: -0.32s; }
.message-loading span:nth-child(2) { animation-delay: -0.16s; }
@keyframes bounce {
0%, 80%, 100% { transform: scale(0); }
40% { transform: scale(1); }
}
.chat-input-area {
display: flex;
gap: 12px;
padding: 16px;
background: #0f172a;
border: 1px solid #334155;
border-radius: 12px;
}
.chat-input {
flex: 1;
padding: 12px 16px;
background: #1e293b;
border: 1px solid #334155;
border-radius: 10px;
color: #f8fafc;
font-size: 14px;
resize: none;
font-family: inherit;
}
.chat-input:focus {
outline: none;
border-color: #a855f7;
}
.chat-send-btn {
display: flex;
align-items: center;
gap: 8px;
padding: 12px 24px;
background: linear-gradient(135deg, #a855f7, #6366f1);
border: none;
border-radius: 10px;
color: white;
font-size: 14px;
font-weight: 600;
cursor: pointer;
}
.chat-send-btn:hover {
box-shadow: 0 0 20px rgba(168, 85, 247, 0.4);
}
`;
}
});

View File

@ -0,0 +1,602 @@
'use strict';
'require view';
'require ui';
'require rpc';
var callStatus = rpc.declare({
object: 'luci.localai',
method: 'status',
expect: { }
});
var callModels = rpc.declare({
object: 'luci.localai',
method: 'models',
expect: { models: [] }
});
var callHealth = rpc.declare({
object: 'luci.localai',
method: 'health',
expect: { healthy: false }
});
var callMetrics = rpc.declare({
object: 'luci.localai',
method: 'metrics',
expect: { }
});
var callStart = rpc.declare({
object: 'luci.localai',
method: 'start',
expect: { success: false }
});
var callStop = rpc.declare({
object: 'luci.localai',
method: 'stop',
expect: { success: false }
});
var callRestart = rpc.declare({
object: 'luci.localai',
method: 'restart',
expect: { success: false }
});
function formatBytes(bytes) {
if (!bytes || bytes === 0) return '0 B';
var k = 1024;
var sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
var i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
}
function formatUptime(seconds) {
if (!seconds) return 'N/A';
var days = Math.floor(seconds / 86400);
var hours = Math.floor((seconds % 86400) / 3600);
var mins = Math.floor((seconds % 3600) / 60);
if (days > 0) return days + 'd ' + hours + 'h';
if (hours > 0) return hours + 'h ' + mins + 'm';
return mins + 'm';
}
return view.extend({
title: _('LocalAI Dashboard'),
refreshInterval: 5000,
data: null,
load: function() {
return Promise.all([
callStatus(),
callModels(),
callHealth(),
callMetrics()
]).then(function(results) {
return {
status: results[0] || {},
models: results[1] || { models: [] },
health: results[2] || { healthy: false },
metrics: results[3] || {}
};
});
},
render: function(data) {
var self = this;
this.data = data;
var container = E('div', { 'class': 'localai-dashboard' }, [
// Header
E('div', { 'class': 'lai-header' }, [
E('div', { 'class': 'lai-logo' }, [
E('div', { 'class': 'lai-logo-icon' }, '🤖'),
E('div', { 'class': 'lai-logo-text' }, [
E('span', {}, 'Local'),
'AI'
])
]),
E('div', { 'class': 'lai-header-info' }, [
E('div', {
'class': 'lai-status-badge ' + (data.status.running ? '' : 'offline'),
'id': 'lai-status-badge'
}, [
E('span', { 'class': 'lai-status-dot' }),
data.status.running ? _('Running') : _('Stopped')
])
])
]),
// Quick Stats
E('div', { 'class': 'lai-quick-stats' }, [
E('div', { 'class': 'lai-quick-stat', 'style': '--stat-gradient: linear-gradient(135deg, #a855f7, #6366f1)' }, [
E('div', { 'class': 'lai-quick-stat-header' }, [
E('span', { 'class': 'lai-quick-stat-icon' }, '📊'),
E('span', { 'class': 'lai-quick-stat-label' }, _('Models'))
]),
E('div', { 'class': 'lai-quick-stat-value', 'id': 'models-count' },
(data.models.models || []).length.toString()
),
E('div', { 'class': 'lai-quick-stat-sub' }, _('Installed'))
]),
E('div', { 'class': 'lai-quick-stat', 'style': '--stat-gradient: linear-gradient(135deg, #10b981, #059669)' }, [
E('div', { 'class': 'lai-quick-stat-header' }, [
E('span', { 'class': 'lai-quick-stat-icon' }, '💾'),
E('span', { 'class': 'lai-quick-stat-label' }, _('Memory'))
]),
E('div', { 'class': 'lai-quick-stat-value', 'id': 'memory-used' },
formatBytes(data.metrics.memory_used || 0)
),
E('div', { 'class': 'lai-quick-stat-sub' }, _('Used'))
]),
E('div', { 'class': 'lai-quick-stat', 'style': '--stat-gradient: linear-gradient(135deg, #06b6d4, #0ea5e9)' }, [
E('div', { 'class': 'lai-quick-stat-header' }, [
E('span', { 'class': 'lai-quick-stat-icon' }, '⏱️'),
E('span', { 'class': 'lai-quick-stat-label' }, _('Uptime'))
]),
E('div', { 'class': 'lai-quick-stat-value', 'id': 'uptime' },
data.status.running ? formatUptime(data.status.uptime) : '--'
),
E('div', { 'class': 'lai-quick-stat-sub' }, _('Running'))
]),
E('div', { 'class': 'lai-quick-stat', 'style': '--stat-gradient: linear-gradient(135deg, #f59e0b, #d97706)' }, [
E('div', { 'class': 'lai-quick-stat-header' }, [
E('span', { 'class': 'lai-quick-stat-icon' }, '🔌'),
E('span', { 'class': 'lai-quick-stat-label' }, _('API Port'))
]),
E('div', { 'class': 'lai-quick-stat-value' }, data.status.api_port || '8080'),
E('div', { 'class': 'lai-quick-stat-sub' }, _('Endpoint'))
])
]),
// Main Cards Grid
E('div', { 'class': 'lai-cards-grid' }, [
// Service Control Card
E('div', { 'class': 'lai-card' }, [
E('div', { 'class': 'lai-card-header' }, [
E('div', { 'class': 'lai-card-title' }, [
E('span', { 'class': 'lai-card-title-icon' }, '⚙️'),
_('Service Control')
]),
E('div', {
'class': 'lai-card-badge ' + (data.status.running ? 'running' : 'stopped')
}, data.status.running ? _('Active') : _('Inactive'))
]),
E('div', { 'class': 'lai-card-body' }, [
E('div', { 'class': 'lai-service-info' }, [
E('div', { 'class': 'lai-service-row' }, [
E('span', { 'class': 'lai-service-label' }, _('Status')),
E('span', {
'class': 'lai-service-value ' + (data.status.running ? 'running' : 'stopped'),
'id': 'service-status'
}, data.status.running ? _('Running') : _('Stopped'))
]),
E('div', { 'class': 'lai-service-row' }, [
E('span', { 'class': 'lai-service-label' }, _('Memory Limit')),
E('span', { 'class': 'lai-service-value' }, data.status.memory_limit || '2G')
]),
E('div', { 'class': 'lai-service-row' }, [
E('span', { 'class': 'lai-service-label' }, _('Threads')),
E('span', { 'class': 'lai-service-value' }, data.status.threads || '4')
]),
E('div', { 'class': 'lai-service-row' }, [
E('span', { 'class': 'lai-service-label' }, _('Context Size')),
E('span', { 'class': 'lai-service-value' }, data.status.context_size || '2048')
])
]),
E('div', { 'class': 'lai-service-controls' }, [
E('button', {
'class': 'lai-btn lai-btn-success' + (data.status.running ? ' disabled' : ''),
'click': function() { self.handleServiceAction('start'); },
'disabled': data.status.running
}, [E('span', {}, '▶'), _('Start')]),
E('button', {
'class': 'lai-btn lai-btn-danger' + (!data.status.running ? ' disabled' : ''),
'click': function() { self.handleServiceAction('stop'); },
'disabled': !data.status.running
}, [E('span', {}, '⏹'), _('Stop')]),
E('button', {
'class': 'lai-btn lai-btn-warning',
'click': function() { self.handleServiceAction('restart'); }
}, [E('span', {}, '🔄'), _('Restart')])
])
])
]),
// Models Card
E('div', { 'class': 'lai-card' }, [
E('div', { 'class': 'lai-card-header' }, [
E('div', { 'class': 'lai-card-title' }, [
E('span', { 'class': 'lai-card-title-icon' }, '🧠'),
_('Installed Models')
]),
E('div', { 'class': 'lai-card-badge' },
(data.models.models || []).length + ' ' + _('models')
)
]),
E('div', { 'class': 'lai-card-body' }, [
this.renderModelsList(data.models.models || [])
])
])
]),
// API Info Card
E('div', { 'class': 'lai-card', 'style': 'margin-top: 20px' }, [
E('div', { 'class': 'lai-card-header' }, [
E('div', { 'class': 'lai-card-title' }, [
E('span', { 'class': 'lai-card-title-icon' }, '🔗'),
_('API Endpoints')
])
]),
E('div', { 'class': 'lai-card-body' }, [
E('div', { 'class': 'lai-api-info' }, [
E('div', { 'class': 'lai-api-endpoint' }, [
E('code', {}, 'http://' + window.location.hostname + ':' + (data.status.api_port || '8080') + '/v1/chat/completions'),
E('span', { 'class': 'lai-api-method' }, 'POST'),
E('span', { 'class': 'lai-api-desc' }, _('Chat completion'))
]),
E('div', { 'class': 'lai-api-endpoint' }, [
E('code', {}, 'http://' + window.location.hostname + ':' + (data.status.api_port || '8080') + '/v1/models'),
E('span', { 'class': 'lai-api-method get' }, 'GET'),
E('span', { 'class': 'lai-api-desc' }, _('List models'))
])
])
])
])
]);
// Include CSS
var style = E('style', {}, this.getCSS());
container.insertBefore(style, container.firstChild);
return container;
},
renderModelsList: function(models) {
if (!models || models.length === 0) {
return E('div', { 'class': 'lai-empty' }, [
E('div', { 'class': 'lai-empty-icon' }, '📦'),
E('div', { 'class': 'lai-empty-text' }, _('No models installed')),
E('div', { 'class': 'lai-empty-hint' }, [
_('Install a model with: '),
E('code', {}, 'localaictl model-install tinyllama')
])
]);
}
return E('div', { 'class': 'lai-models-list' },
models.map(function(model) {
var displayName = model.id || model.name;
return E('div', { 'class': 'lai-model-item' + (model.loaded ? ' loaded' : '') }, [
E('div', { 'class': 'lai-model-icon' }, model.loaded ? '✅' : '🤖'),
E('div', { 'class': 'lai-model-info' }, [
E('div', { 'class': 'lai-model-name' }, displayName),
E('div', { 'class': 'lai-model-meta' }, [
model.size > 0 ? E('span', { 'class': 'lai-model-size' }, formatBytes(model.size)) : null,
E('span', { 'class': 'lai-model-type' }, model.loaded ? _('Active') : model.type)
].filter(Boolean))
])
]);
})
);
},
handleServiceAction: function(action) {
var self = this;
ui.showModal(_('Service Control'), [
E('p', {}, _('Processing...')),
E('div', { 'class': 'spinning' })
]);
var actionFn;
switch(action) {
case 'start': actionFn = callStart(); break;
case 'stop': actionFn = callStop(); break;
case 'restart': actionFn = callRestart(); break;
}
actionFn.then(function(result) {
ui.hideModal();
if (result.success) {
ui.addNotification(null, E('p', _('Service ' + action + ' successful')), 'success');
window.location.reload();
} else {
ui.addNotification(null, E('p', result.error || _('Operation failed')), 'error');
}
}).catch(function(err) {
ui.hideModal();
ui.addNotification(null, E('p', err.message), 'error');
});
},
getCSS: function() {
return `
.localai-dashboard {
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
background: #030712;
color: #f8fafc;
min-height: 100vh;
padding: 16px;
}
.lai-header {
display: flex;
align-items: center;
justify-content: space-between;
padding: 12px 0 20px;
border-bottom: 1px solid #334155;
margin-bottom: 20px;
}
.lai-logo {
display: flex;
align-items: center;
gap: 14px;
}
.lai-logo-icon {
width: 46px;
height: 46px;
background: linear-gradient(135deg, #a855f7, #6366f1);
border-radius: 12px;
display: flex;
align-items: center;
justify-content: center;
font-size: 24px;
}
.lai-logo-text {
font-size: 24px;
font-weight: 700;
}
.lai-logo-text span {
background: linear-gradient(135deg, #a855f7, #6366f1);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
}
.lai-status-badge {
display: flex;
align-items: center;
gap: 8px;
padding: 8px 16px;
border-radius: 24px;
background: rgba(16, 185, 129, 0.15);
color: #10b981;
border: 1px solid rgba(16, 185, 129, 0.3);
font-weight: 600;
}
.lai-status-badge.offline {
background: rgba(239, 68, 68, 0.15);
color: #ef4444;
border-color: rgba(239, 68, 68, 0.3);
}
.lai-status-dot {
width: 10px;
height: 10px;
background: currentColor;
border-radius: 50%;
}
.lai-quick-stats {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(180px, 1fr));
gap: 14px;
margin-bottom: 24px;
}
.lai-quick-stat {
background: #0f172a;
border: 1px solid #334155;
border-radius: 12px;
padding: 20px;
position: relative;
overflow: hidden;
}
.lai-quick-stat::before {
content: '';
position: absolute;
top: 0;
left: 0;
right: 0;
height: 3px;
background: var(--stat-gradient);
}
.lai-quick-stat-header {
display: flex;
align-items: center;
gap: 10px;
margin-bottom: 12px;
}
.lai-quick-stat-icon { font-size: 22px; }
.lai-quick-stat-label {
font-size: 11px;
text-transform: uppercase;
color: #64748b;
}
.lai-quick-stat-value {
font-size: 32px;
font-weight: 700;
background: var(--stat-gradient);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
}
.lai-quick-stat-sub {
font-size: 11px;
color: #64748b;
margin-top: 6px;
}
.lai-cards-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(400px, 1fr));
gap: 20px;
}
.lai-card {
background: #0f172a;
border: 1px solid #334155;
border-radius: 12px;
overflow: hidden;
}
.lai-card-header {
display: flex;
align-items: center;
justify-content: space-between;
padding: 16px 20px;
border-bottom: 1px solid #334155;
background: rgba(0, 0, 0, 0.3);
}
.lai-card-title {
display: flex;
align-items: center;
gap: 12px;
font-size: 15px;
font-weight: 600;
}
.lai-card-title-icon { font-size: 20px; }
.lai-card-badge {
font-size: 12px;
padding: 5px 12px;
border-radius: 16px;
background: linear-gradient(135deg, #a855f7, #6366f1);
color: white;
}
.lai-card-badge.running { background: linear-gradient(135deg, #10b981, #059669); }
.lai-card-badge.stopped { background: rgba(100, 116, 139, 0.3); color: #94a3b8; }
.lai-card-body { padding: 20px; }
.lai-service-info {
display: flex;
flex-direction: column;
gap: 12px;
margin-bottom: 20px;
}
.lai-service-row {
display: flex;
justify-content: space-between;
padding: 8px 12px;
background: #030712;
border-radius: 8px;
}
.lai-service-label { color: #94a3b8; font-size: 13px; }
.lai-service-value { font-size: 13px; }
.lai-service-value.running { color: #10b981; }
.lai-service-value.stopped { color: #ef4444; }
.lai-service-controls {
display: flex;
gap: 10px;
}
.lai-btn {
display: inline-flex;
align-items: center;
gap: 6px;
padding: 10px 16px;
border: none;
border-radius: 8px;
font-size: 13px;
font-weight: 500;
cursor: pointer;
}
.lai-btn-success {
background: linear-gradient(135deg, #10b981, #059669);
color: white;
}
.lai-btn-danger {
background: linear-gradient(135deg, #ef4444, #dc2626);
color: white;
}
.lai-btn-warning {
background: linear-gradient(135deg, #f59e0b, #d97706);
color: white;
}
.lai-btn.disabled {
opacity: 0.5;
cursor: not-allowed;
}
.lai-models-list {
display: flex;
flex-direction: column;
gap: 12px;
}
.lai-model-item {
display: flex;
align-items: center;
gap: 14px;
padding: 14px;
background: #1e293b;
border-radius: 10px;
}
.lai-model-item.loaded {
border: 1px solid rgba(16, 185, 129, 0.3);
background: rgba(16, 185, 129, 0.05);
}
.lai-model-icon {
width: 44px;
height: 44px;
background: linear-gradient(135deg, #a855f7, #6366f1);
border-radius: 10px;
display: flex;
align-items: center;
justify-content: center;
font-size: 20px;
}
.lai-model-name {
font-weight: 600;
margin-bottom: 4px;
}
.lai-model-meta {
display: flex;
gap: 12px;
font-size: 12px;
color: #94a3b8;
}
.lai-model-type {
padding: 2px 8px;
background: #334155;
border-radius: 4px;
}
.lai-empty {
text-align: center;
padding: 40px 20px;
color: #64748b;
}
.lai-empty-icon { font-size: 48px; margin-bottom: 12px; }
.lai-empty-text { font-size: 16px; margin-bottom: 8px; }
.lai-empty-hint { font-size: 13px; }
.lai-empty-hint code {
background: #1e293b;
padding: 4px 8px;
border-radius: 4px;
}
.lai-api-info {
display: flex;
flex-direction: column;
gap: 10px;
}
.lai-api-endpoint {
display: flex;
align-items: center;
gap: 12px;
padding: 12px;
background: #030712;
border-radius: 8px;
}
.lai-api-endpoint code {
font-size: 12px;
color: #06b6d4;
flex: 1;
}
.lai-api-method {
padding: 4px 8px;
background: #f59e0b;
color: #030712;
border-radius: 4px;
font-size: 10px;
font-weight: 700;
}
.lai-api-method.get { background: #10b981; }
.lai-api-desc {
font-size: 12px;
color: #94a3b8;
min-width: 120px;
}
`;
}
});

View File

@ -0,0 +1,255 @@
'use strict';
'require view';
'require ui';
'require rpc';
var callModels = rpc.declare({
object: 'luci.localai',
method: 'models',
expect: { models: [] }
});
var callModelInstall = rpc.declare({
object: 'luci.localai',
method: 'model_install',
params: ['name'],
expect: { success: false }
});
var callModelRemove = rpc.declare({
object: 'luci.localai',
method: 'model_remove',
params: ['name'],
expect: { success: false }
});
function formatBytes(bytes) {
if (!bytes || bytes === 0) return '0 B';
var k = 1024;
var sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
var i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
}
return view.extend({
title: _('LocalAI Models'),
load: function() {
return callModels();
},
render: function(data) {
var self = this;
var models = data.models || [];
var presets = [
{ name: 'tinyllama', desc: 'TinyLlama 1.1B - Ultra-lightweight', size: '669 MB' },
{ name: 'phi2', desc: 'Microsoft Phi-2 - Compact and efficient', size: '1.6 GB' },
{ name: 'mistral', desc: 'Mistral 7B Instruct - High quality assistant', size: '4.1 GB' },
{ name: 'gte-small', desc: 'GTE Small - Fast embeddings', size: '67 MB' }
];
return E('div', { 'class': 'localai-models' }, [
E('style', {}, this.getCSS()),
E('div', { 'class': 'models-header' }, [
E('h2', {}, [E('span', {}, '🧠 '), _('Model Management')]),
E('p', {}, _('Install and manage AI models for LocalAI'))
]),
// Installed Models
E('div', { 'class': 'models-section' }, [
E('h3', {}, _('Installed Models')),
models.length > 0 ?
E('div', { 'class': 'models-grid' },
models.map(function(m) {
var displayId = m.id || m.name;
return E('div', { 'class': 'model-card installed' + (m.loaded ? ' active' : '') }, [
E('div', { 'class': 'model-card-icon' }, m.loaded ? '✅' : '🤖'),
E('div', { 'class': 'model-card-info' }, [
E('div', { 'class': 'model-card-name' }, displayId),
E('div', { 'class': 'model-card-meta' }, [
m.size > 0 ? E('span', {}, formatBytes(m.size)) : null,
E('span', {}, m.loaded ? _('Loaded') : m.type)
].filter(Boolean))
]),
E('button', {
'class': 'model-btn danger',
'click': function() { self.removeModel(m.name || displayId); }
}, _('Remove'))
]);
})
) :
E('div', { 'class': 'empty-state' }, [
E('span', {}, '📦'),
E('p', {}, _('No models installed yet'))
])
]),
// Available Presets
E('div', { 'class': 'models-section' }, [
E('h3', {}, _('Available Presets')),
E('div', { 'class': 'models-grid' },
presets.map(function(p) {
var isInstalled = models.some(function(m) {
var mId = (m.id || '').toLowerCase();
var mName = (m.name || '').toLowerCase();
return mId.includes(p.name) || mName.includes(p.name);
});
return E('div', { 'class': 'model-card preset' + (isInstalled ? ' installed' : '') }, [
E('div', { 'class': 'model-card-icon' }, isInstalled ? '✅' : '📥'),
E('div', { 'class': 'model-card-info' }, [
E('div', { 'class': 'model-card-name' }, p.name),
E('div', { 'class': 'model-card-desc' }, p.desc),
E('div', { 'class': 'model-card-size' }, p.size)
]),
!isInstalled ?
E('button', {
'class': 'model-btn install',
'click': function() { self.installModel(p.name); }
}, _('Install')) :
E('span', { 'class': 'model-installed-badge' }, _('Installed'))
]);
})
)
])
]);
},
installModel: function(name) {
ui.showModal(_('Installing Model'), [
E('p', {}, _('Downloading and installing model: ') + name),
E('p', { 'class': 'note' }, _('This may take several minutes...')),
E('div', { 'class': 'spinning' })
]);
callModelInstall(name).then(function(result) {
ui.hideModal();
if (result.success) {
ui.addNotification(null, E('p', _('Model installed successfully')), 'success');
window.location.reload();
} else {
ui.addNotification(null, E('p', result.error || _('Installation failed')), 'error');
}
}).catch(function(err) {
ui.hideModal();
ui.addNotification(null, E('p', err.message), 'error');
});
},
removeModel: function(name) {
var self = this;
ui.showModal(_('Remove Model'), [
E('p', {}, _('Remove model: ') + name + '?'),
E('div', { 'class': 'right' }, [
E('button', { 'class': 'btn', 'click': ui.hideModal }, _('Cancel')),
E('button', {
'class': 'btn danger',
'click': function() {
callModelRemove(name).then(function(result) {
ui.hideModal();
if (result.success) {
ui.addNotification(null, E('p', _('Model removed')), 'success');
window.location.reload();
} else {
ui.addNotification(null, E('p', result.error || _('Removal failed')), 'error');
}
});
}
}, _('Remove'))
])
]);
},
getCSS: function() {
return `
.localai-models {
font-family: 'Inter', -apple-system, sans-serif;
background: #030712;
color: #f8fafc;
min-height: 100vh;
padding: 20px;
}
.models-header { margin-bottom: 30px; }
.models-header h2 { font-size: 24px; margin-bottom: 8px; }
.models-header p { color: #94a3b8; }
.models-section {
background: #0f172a;
border: 1px solid #334155;
border-radius: 12px;
padding: 20px;
margin-bottom: 20px;
}
.models-section h3 {
margin: 0 0 16px 0;
font-size: 16px;
color: #a855f7;
}
.models-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(300px, 1fr));
gap: 16px;
}
.model-card {
display: flex;
align-items: center;
gap: 14px;
padding: 16px;
background: #1e293b;
border: 1px solid #334155;
border-radius: 10px;
}
.model-card.active {
border-color: rgba(16, 185, 129, 0.4);
background: rgba(16, 185, 129, 0.08);
}
.model-card-icon {
width: 48px;
height: 48px;
background: linear-gradient(135deg, #a855f7, #6366f1);
border-radius: 10px;
display: flex;
align-items: center;
justify-content: center;
font-size: 24px;
}
.model-card-info { flex: 1; }
.model-card-name { font-weight: 600; margin-bottom: 4px; }
.model-card-desc { font-size: 12px; color: #94a3b8; }
.model-card-size { font-size: 11px; color: #64748b; margin-top: 4px; }
.model-card-meta {
display: flex;
gap: 10px;
font-size: 12px;
color: #94a3b8;
}
.model-btn {
padding: 8px 16px;
border: none;
border-radius: 6px;
font-size: 13px;
cursor: pointer;
}
.model-btn.install {
background: linear-gradient(135deg, #10b981, #059669);
color: white;
}
.model-btn.danger {
background: linear-gradient(135deg, #ef4444, #dc2626);
color: white;
}
.model-installed-badge {
font-size: 12px;
color: #10b981;
}
.empty-state {
text-align: center;
padding: 40px;
color: #64748b;
}
.empty-state span { font-size: 48px; }
`;
}
});

View File

@ -0,0 +1,91 @@
'use strict';
'require view';
'require form';
'require uci';
return view.extend({
load: function() {
return uci.load('localai');
},
render: function() {
var m, s, o;
m = new form.Map('localai', _('LocalAI Configuration'),
_('Configure LocalAI service settings. Changes require a service restart.'));
// Main settings section with tabs
s = m.section(form.TypedSection, 'main', _('Service Settings'));
s.anonymous = true;
s.addremove = false;
s.tab('general', _('General'));
s.tab('paths', _('Storage'));
// General tab options
o = s.taboption('general', form.Flag, 'enabled', _('Enable Service'));
o.default = '0';
o.rmempty = false;
o = s.taboption('general', form.Value, 'api_port', _('API Port'));
o.datatype = 'port';
o.default = '8080';
o.placeholder = '8080';
o = s.taboption('general', form.Value, 'api_host', _('API Host'));
o.default = '0.0.0.0';
o.placeholder = '0.0.0.0';
o.description = _('Use 0.0.0.0 to listen on all interfaces');
o = s.taboption('general', form.Value, 'memory_limit', _('Memory Limit'));
o.default = '2G';
o.placeholder = '2G';
o.description = _('Maximum memory for the container (e.g., 2G, 4G)');
o = s.taboption('general', form.Value, 'threads', _('CPU Threads'));
o.datatype = 'uinteger';
o.default = '4';
o.placeholder = '4';
o.description = _('Number of CPU threads for inference');
o = s.taboption('general', form.Value, 'context_size', _('Context Size'));
o.datatype = 'uinteger';
o.default = '2048';
o.placeholder = '2048';
o.description = _('Maximum context window size in tokens');
o = s.taboption('general', form.Flag, 'cors', _('Enable CORS'));
o.default = '1';
o.description = _('Allow cross-origin requests to API');
o = s.taboption('general', form.Flag, 'debug', _('Debug Mode'));
o.default = '0';
o.description = _('Enable verbose logging');
// Paths tab options
o = s.taboption('paths', form.Value, 'data_path', _('Data Directory'));
o.default = '/srv/localai';
o.placeholder = '/srv/localai';
o = s.taboption('paths', form.Value, 'models_path', _('Models Directory'));
o.default = '/srv/localai/models';
o.placeholder = '/srv/localai/models';
// GPU section (experimental)
s = m.section(form.TypedSection, 'gpu', _('GPU Acceleration (Experimental)'));
s.anonymous = true;
s.addremove = false;
o = s.option(form.Flag, 'enabled', _('Enable GPU'));
o.default = '0';
o.description = _('Enable GPU acceleration (requires compatible hardware)');
o = s.option(form.ListValue, 'backend', _('GPU Backend'));
o.value('vulkan', 'Vulkan (ARM64)');
o.value('cuda', 'CUDA (Nvidia)');
o.value('rocm', 'ROCm (AMD)');
o.default = 'vulkan';
o.depends('enabled', '1');
return m.render();
}
});

View File

@ -0,0 +1,445 @@
#!/bin/sh
# RPCD backend for LocalAI LuCI integration
# Copyright (C) 2025 CyberMind.fr
. /lib/functions.sh
CONFIG="localai"
LOCALAI_CTL="/usr/sbin/localaictl"
# Load UCI config
load_config() {
config_load "$CONFIG"
config_get API_PORT main api_port "8080"
config_get DATA_PATH main data_path "/srv/localai"
config_get MODELS_PATH main models_path "/srv/localai/models"
config_get MEMORY_LIMIT main memory_limit "2G"
config_get THREADS main threads "4"
config_get CONTEXT_SIZE main context_size "2048"
}
# Check if LocalAI is running
is_running() {
pgrep local-ai >/dev/null 2>&1
}
# Get service status
get_status() {
load_config
local running="false"
local uptime=0
if is_running; then
running="true"
# Get process uptime
local pid=$(pgrep local-ai | head -1)
if [ -n "$pid" ] && [ -d "/proc/$pid" ]; then
local start_time=$(stat -c %Y /proc/$pid 2>/dev/null || echo 0)
local now=$(date +%s)
uptime=$((now - start_time))
fi
fi
# Get enabled status
local enabled="false"
[ "$(uci -q get ${CONFIG}.main.enabled)" = "1" ] && enabled="true"
cat <<EOF
{
"running": $running,
"enabled": $enabled,
"uptime": $uptime,
"api_port": $API_PORT,
"memory_limit": "$MEMORY_LIMIT",
"threads": $THREADS,
"context_size": $CONTEXT_SIZE,
"data_path": "$DATA_PATH",
"models_path": "$MODELS_PATH"
}
EOF
}
# Get installed models - queries both LocalAI API and filesystem
get_models() {
load_config
local tmpfile="/tmp/localai_models_$$"
local first=1
local seen=""
echo '{"models":['
# First, try to get models from LocalAI API (shows loaded/active models)
if is_running; then
wget -q -O "$tmpfile" "http://127.0.0.1:$API_PORT/v1/models" 2>/dev/null
if [ -f "$tmpfile" ] && [ -s "$tmpfile" ]; then
# Try indexed access for each model (max 20)
local i=0
while [ $i -lt 20 ]; do
local model_id=$(jsonfilter -i "$tmpfile" -e "@.data[$i].id" 2>/dev/null)
[ -z "$model_id" ] && break
[ $first -eq 0 ] && echo ","
first=0
seen="$seen $model_id"
cat <<EOF
{
"id": "$model_id",
"name": "$model_id",
"size": 0,
"type": "loaded",
"loaded": true
}
EOF
i=$((i + 1))
done
fi
rm -f "$tmpfile"
fi
# Scan filesystem for model files
if [ -d "$MODELS_PATH" ]; then
for model in "$MODELS_PATH"/*.gguf "$MODELS_PATH"/*.bin "$MODELS_PATH"/*.onnx; do
[ -f "$model" ] || continue
local name=$(basename "$model")
local basename_no_ext="${name%.*}"
local size=$(stat -c %s "$model" 2>/dev/null || echo 0)
local ext="${name##*.}"
local type="unknown"
local loaded="false"
case "$ext" in
gguf) type="llama-cpp" ;;
bin) type="transformers" ;;
onnx) type="onnx" ;;
esac
# Check if this model is in the seen list (loaded from API)
case " $seen " in
*" $basename_no_ext "*) continue ;;
esac
[ $first -eq 0 ] && echo ","
first=0
cat <<EOF
{
"id": "$basename_no_ext",
"name": "$name",
"size": $size,
"type": "$type",
"path": "$model",
"loaded": $loaded
}
EOF
done
fi
# Also scan for YAML model configs (LocalAI model definitions)
if [ -d "$MODELS_PATH" ]; then
for yaml in "$MODELS_PATH"/*.yaml "$MODELS_PATH"/*.yml; do
[ -f "$yaml" ] || continue
local name=$(basename "$yaml")
local basename_no_ext="${name%.*}"
# Skip if already seen
case " $seen " in
*" $basename_no_ext "*) continue ;;
esac
# Check if there's a model file with same base name already shown
local already_shown=0
for ext in gguf bin onnx; do
[ -f "$MODELS_PATH/$basename_no_ext.$ext" ] && already_shown=1
done
[ $already_shown -eq 1 ] && continue
[ $first -eq 0 ] && echo ","
first=0
cat <<EOF
{
"id": "$basename_no_ext",
"name": "$basename_no_ext",
"size": 0,
"type": "config",
"path": "$yaml",
"loaded": false
}
EOF
done
fi
echo ']}'
}
# Get configuration
get_config() {
load_config
cat <<EOF
{
"api_port": $API_PORT,
"data_path": "$DATA_PATH",
"models_path": "$MODELS_PATH",
"memory_limit": "$MEMORY_LIMIT",
"threads": $THREADS,
"context_size": $CONTEXT_SIZE
}
EOF
}
# Health check
get_health() {
load_config
local healthy="false"
local api_status="unknown"
if is_running; then
# Check API health endpoint
local response=$(wget -q -O - "http://127.0.0.1:$API_PORT/readyz" 2>/dev/null)
if echo "$response" | grep -q "ok"; then
healthy="true"
api_status="ok"
else
api_status="unhealthy"
fi
else
api_status="stopped"
fi
cat <<EOF
{
"healthy": $healthy,
"api_status": "$api_status"
}
EOF
}
# Get metrics
get_metrics() {
load_config
local mem_used=0
local cpu_percent=0
if is_running; then
local pid=$(pgrep local-ai | head -1)
if [ -n "$pid" ]; then
# Get memory usage from /proc
mem_used=$(awk '/VmRSS/ {print $2*1024}' /proc/$pid/status 2>/dev/null || echo 0)
# Get CPU from ps
cpu_percent=$(ps -o %cpu= -p $pid 2>/dev/null | tr -d ' ' || echo "0")
fi
fi
cat <<EOF
{
"memory_used": $mem_used,
"cpu_percent": $cpu_percent
}
EOF
}
# Start service
do_start() {
if is_running; then
echo '{"success":false,"error":"Already running"}'
return
fi
/etc/init.d/localai start >/dev/null 2>&1
sleep 2
if is_running; then
echo '{"success":true}'
else
echo '{"success":false,"error":"Failed to start"}'
fi
}
# Stop service
do_stop() {
/etc/init.d/localai stop >/dev/null 2>&1
sleep 1
if ! is_running; then
echo '{"success":true}'
else
echo '{"success":false,"error":"Failed to stop"}'
fi
}
# Restart service
do_restart() {
/etc/init.d/localai restart >/dev/null 2>&1
sleep 3
if is_running; then
echo '{"success":true}'
else
echo '{"success":false,"error":"Failed to restart"}'
fi
}
# Install model
do_model_install() {
local name="$1"
[ -z "$name" ] && { echo '{"success":false,"error":"Model name required"}'; return; }
local output=$($LOCALAI_CTL model-install "$name" 2>&1)
local ret=$?
if [ $ret -eq 0 ]; then
echo '{"success":true}'
else
local error=$(echo "$output" | tail -1 | sed 's/"/\\"/g')
echo "{\"success\":false,\"error\":\"$error\"}"
fi
}
# Remove model
do_model_remove() {
local name="$1"
[ -z "$name" ] && { echo '{"success":false,"error":"Model name required"}'; return; }
local output=$($LOCALAI_CTL model-remove "$name" 2>&1)
local ret=$?
if [ $ret -eq 0 ]; then
echo '{"success":true}'
else
local error=$(echo "$output" | tail -1 | sed 's/"/\\"/g')
echo "{\"success\":false,\"error\":\"$error\"}"
fi
}
# Chat completion (proxy to LocalAI API)
do_chat() {
load_config
local model="$1"
local messages="$2"
if ! is_running; then
echo '{"response":"","error":"LocalAI is not running"}'
return
fi
# Validate inputs
[ -z "$model" ] && { echo '{"response":"","error":"Model not specified"}'; return; }
[ -z "$messages" ] && { echo '{"response":"","error":"Messages not provided"}'; return; }
# Build request body - messages should already be a JSON array string
local request_body="{\"model\":\"$model\",\"messages\":$messages}"
# Call LocalAI API using a temp file for better handling
local tmpfile="/tmp/localai_chat_$$"
local http_code
http_code=$(wget -q -O "$tmpfile" --post-data "$request_body" \
--header="Content-Type: application/json" \
"http://127.0.0.1:$API_PORT/v1/chat/completions" 2>/dev/null; echo $?)
if [ -f "$tmpfile" ] && [ -s "$tmpfile" ]; then
# Extract message content using jsonfilter
local content=$(jsonfilter -i "$tmpfile" -e '@.choices[0].message.content' 2>/dev/null)
local error=$(jsonfilter -i "$tmpfile" -e '@.error.message' 2>/dev/null)
rm -f "$tmpfile"
if [ -n "$error" ]; then
# Escape quotes and newlines in error
error=$(echo "$error" | sed 's/"/\\"/g' | tr '\n' ' ')
echo "{\"response\":\"\",\"error\":\"$error\"}"
elif [ -n "$content" ]; then
# Escape quotes and newlines in content
content=$(echo "$content" | sed 's/"/\\"/g' | tr '\n' '\\n')
echo "{\"response\":\"$content\"}"
else
echo '{"response":"","error":"Empty response from API"}'
fi
else
rm -f "$tmpfile" 2>/dev/null
echo '{"response":"","error":"API request failed - no response"}'
fi
}
# Text completion
do_complete() {
load_config
local model="$1"
local prompt="$2"
if ! is_running; then
echo '{"text":"","error":"LocalAI is not running"}'
return
fi
local response=$(wget -q -O - --post-data "{\"model\":\"$model\",\"prompt\":\"$prompt\"}" \
--header="Content-Type: application/json" \
"http://127.0.0.1:$API_PORT/v1/completions" 2>/dev/null)
if [ -n "$response" ]; then
local text=$(echo "$response" | jsonfilter -e '@.choices[0].text' 2>/dev/null)
echo "{\"text\":\"$(echo "$text" | sed 's/"/\\"/g')\"}"
else
echo '{"text":"","error":"API request failed"}'
fi
}
# UBUS method list
case "$1" in
list)
cat <<'EOF'
{
"status": {},
"models": {},
"config": {},
"health": {},
"metrics": {},
"start": {},
"stop": {},
"restart": {},
"model_install": {"name": "string"},
"model_remove": {"name": "string"},
"chat": {"model": "string", "messages": "array"},
"complete": {"model": "string", "prompt": "string"}
}
EOF
;;
call)
case "$2" in
status) get_status ;;
models) get_models ;;
config) get_config ;;
health) get_health ;;
metrics) get_metrics ;;
start) do_start ;;
stop) do_stop ;;
restart) do_restart ;;
model_install)
read -r input
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
do_model_install "$name"
;;
model_remove)
read -r input
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
do_model_remove "$name"
;;
chat)
read -r input
model=$(echo "$input" | jsonfilter -e '@.model' 2>/dev/null)
messages=$(echo "$input" | jsonfilter -e '@.messages' 2>/dev/null)
do_chat "$model" "$messages"
;;
complete)
read -r input
model=$(echo "$input" | jsonfilter -e '@.model' 2>/dev/null)
prompt=$(echo "$input" | jsonfilter -e '@.prompt' 2>/dev/null)
do_complete "$model" "$prompt"
;;
*) echo '{"error":"Unknown method"}' ;;
esac
;;
esac

View File

@ -0,0 +1,56 @@
{
"admin/secubox/services/localai": {
"title": "LocalAI",
"order": 50,
"action": {
"type": "firstchild"
},
"depends": {
"acl": ["luci-app-localai"]
}
},
"admin/secubox/services/localai/dashboard": {
"title": "Dashboard",
"order": 10,
"action": {
"type": "view",
"path": "localai/dashboard"
},
"depends": {
"acl": ["luci-app-localai"]
}
},
"admin/secubox/services/localai/models": {
"title": "Models",
"order": 20,
"action": {
"type": "view",
"path": "localai/models"
},
"depends": {
"acl": ["luci-app-localai"]
}
},
"admin/secubox/services/localai/chat": {
"title": "Chat",
"order": 30,
"action": {
"type": "view",
"path": "localai/chat"
},
"depends": {
"acl": ["luci-app-localai"]
}
},
"admin/secubox/services/localai/settings": {
"title": "Settings",
"order": 90,
"action": {
"type": "view",
"path": "localai/settings"
},
"depends": {
"acl": ["luci-app-localai"]
}
}
}

View File

@ -0,0 +1,39 @@
{
"luci-app-localai": {
"description": "Grant access to LuCI LocalAI Dashboard",
"read": {
"ubus": {
"luci.localai": [
"status",
"models",
"config",
"health",
"metrics"
],
"system": [ "info", "board" ],
"file": [ "read", "stat", "exec" ]
},
"uci": [ "localai" ],
"file": {
"/etc/config/localai": [ "read" ],
"/srv/localai/*": [ "read" ]
}
},
"write": {
"ubus": {
"luci.localai": [
"start",
"stop",
"restart",
"model_install",
"model_remove",
"model_load",
"model_unload",
"chat",
"complete"
]
},
"uci": [ "localai" ]
}
}
}

View File

@ -11,7 +11,7 @@ LUCI_DESCRIPTION:=Unified entry point for all SecuBox applications with tabbed n
LUCI_DEPENDS:=+luci-base +luci-theme-secubox LUCI_DEPENDS:=+luci-base +luci-theme-secubox
LUCI_PKGARCH:=all LUCI_PKGARCH:=all
PKG_VERSION:=0.6.0 PKG_VERSION:=0.6.0
PKG_RELEASE:=2 PKG_RELEASE:=5
PKG_LICENSE:=GPL-3.0-or-later PKG_LICENSE:=GPL-3.0-or-later
PKG_MAINTAINER:=SecuBox Team <secubox@example.com> PKG_MAINTAINER:=SecuBox Team <secubox@example.com>

View File

@ -235,6 +235,30 @@ return baseclass.extend({
path: 'admin/secubox/services/mmpm/dashboard', path: 'admin/secubox/services/mmpm/dashboard',
service: 'mmpm', service: 'mmpm',
version: '3.1.0' version: '3.1.0'
},
'glances': {
id: 'glances',
name: 'Glances',
desc: 'Cross-platform system monitoring tool with web interface',
icon: '\ud83d\udcca',
iconBg: 'rgba(16, 185, 129, 0.15)',
iconColor: '#10b981',
section: 'monitoring',
path: 'admin/secubox/monitoring/glances/dashboard',
service: 'glances',
version: '4.2.1'
},
'localai': {
id: 'localai',
name: 'LocalAI',
desc: 'Self-hosted, privacy-first AI/LLM with OpenAI-compatible API',
icon: '\ud83e\udd16',
iconBg: 'rgba(168, 85, 247, 0.15)',
iconColor: '#a855f7',
section: 'services',
path: 'admin/secubox/services/localai/dashboard',
service: 'localai',
version: '3.10.0'
} }
}, },

View File

@ -21,6 +21,16 @@
"acl": ["luci-app-secubox-portal"] "acl": ["luci-app-secubox-portal"]
} }
}, },
"admin/secubox/services": {
"title": "Services",
"order": 50,
"action": {
"type": "firstchild"
},
"depends": {
"acl": ["luci-app-secubox-portal"]
}
},
"admin/secubox/apps": { "admin/secubox/apps": {
"title": "Apps", "title": "Apps",
"order": 60, "order": 60,

View File

@ -0,0 +1,73 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=secubox-app-localai
PKG_RELEASE:=4
PKG_VERSION:=0.1.0
PKG_ARCH:=all
PKG_MAINTAINER:=CyberMind Studio <contact@cybermind.fr>
PKG_LICENSE:=Apache-2.0
include $(INCLUDE_DIR)/package.mk
define Package/secubox-app-localai
SECTION:=utils
CATEGORY:=Utilities
PKGARCH:=all
SUBMENU:=SecuBox Apps
TITLE:=SecuBox LocalAI - Self-hosted LLM (LXC)
DEPENDS:=+uci +libuci +jsonfilter
endef
define Package/secubox-app-localai/description
LocalAI - Self-hosted, privacy-first AI/LLM for SecuBox-powered OpenWrt systems.
Features:
- OpenAI-compatible API (drop-in replacement)
- No cloud dependency - all processing on-device
- Support for various models (LLaMA, Mistral, Phi, etc.)
- Text generation, embeddings, transcription
- Image generation (optional)
Runs in LXC container for isolation.
Configure in /etc/config/localai.
endef
define Package/secubox-app-localai/conffiles
/etc/config/localai
endef
define Build/Compile
endef
define Package/secubox-app-localai/install
$(INSTALL_DIR) $(1)/etc/config
$(INSTALL_CONF) ./files/etc/config/localai $(1)/etc/config/localai
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_BIN) ./files/etc/init.d/localai $(1)/etc/init.d/localai
$(INSTALL_DIR) $(1)/usr/sbin
$(INSTALL_BIN) ./files/usr/sbin/localaictl $(1)/usr/sbin/localaictl
endef
define Package/secubox-app-localai/postinst
#!/bin/sh
[ -n "$${IPKG_INSTROOT}" ] || {
echo ""
echo "LocalAI installed."
echo ""
echo "To install and start LocalAI:"
echo " localaictl install"
echo " /etc/init.d/localai start"
echo ""
echo "API endpoint: http://<router-ip>:8080/v1"
echo "Web UI: http://<router-ip>:8080"
echo ""
echo "Download models with:"
echo " localaictl model-install <model-name>"
echo ""
}
exit 0
endef
$(eval $(call BuildPackage,secubox-app-localai))

View File

@ -0,0 +1,51 @@
config main 'main'
option enabled '0'
option api_port '8080'
option api_host '0.0.0.0'
option data_path '/srv/localai'
option models_path '/srv/localai/models'
option memory_limit '2G'
option threads '4'
option context_size '2048'
option debug '0'
option cors '1'
# GPU settings (experimental on ARM64)
config gpu 'gpu'
option enabled '0'
option backend 'vulkan'
# Default model to load on startup
config model 'default'
option enabled '1'
option name 'phi-2'
option backend 'llama-cpp'
# Model presets
config preset 'phi2'
option name 'phi-2'
option url 'https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q4_K_M.gguf'
option size '1.6G'
option type 'text-generation'
option description 'Microsoft Phi-2 - Compact and efficient'
config preset 'mistral'
option name 'mistral-7b'
option url 'https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf'
option size '4.1G'
option type 'text-generation'
option description 'Mistral 7B Instruct - High quality assistant'
config preset 'tinyllama'
option name 'tinyllama'
option url 'https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf'
option size '669M'
option type 'text-generation'
option description 'TinyLlama 1.1B - Ultra-lightweight'
config preset 'gte_small'
option name 'gte-small'
option url 'https://huggingface.co/Supabase/gte-small/resolve/main/model.onnx'
option size '67M'
option type 'embeddings'
option description 'GTE Small - Fast embeddings'

View File

@ -0,0 +1,40 @@
#!/bin/sh /etc/rc.common
# SecuBox LocalAI - Self-hosted LLM service
# Copyright (C) 2025 CyberMind.fr
START=95
STOP=10
USE_PROCD=1
PROG=/usr/sbin/localaictl
start_service() {
local enabled
config_load localai
config_get enabled main enabled '0'
[ "$enabled" = "1" ] || {
echo "LocalAI is disabled. Enable with: uci set localai.main.enabled=1"
return 0
}
procd_open_instance
procd_set_param command $PROG service-run
procd_set_param respawn 3600 5 5
procd_set_param stdout 1
procd_set_param stderr 1
procd_close_instance
}
stop_service() {
$PROG service-stop
}
service_triggers() {
procd_add_reload_trigger "localai"
}
reload_service() {
stop
start
}

View File

@ -0,0 +1,578 @@
#!/bin/sh
# SecuBox LocalAI manager - LXC container support
# Copyright (C) 2025 CyberMind.fr
CONFIG="localai"
LXC_NAME="localai"
OPKG_UPDATED=0
LOCALAI_VERSION="v3.10.0"
# Paths
LXC_PATH="/srv/lxc"
LXC_ROOTFS="$LXC_PATH/$LXC_NAME/rootfs"
LXC_CONFIG="$LXC_PATH/$LXC_NAME/config"
usage() {
cat <<'EOF'
Usage: localaictl <command>
Commands:
install Install prerequisites and create LXC container
check Run prerequisite checks
update Update LocalAI in container
status Show container and service status
logs Show LocalAI logs (use -f to follow)
shell Open shell in container
Model Management:
models List installed models
model-install <n> Install model from preset or URL
model-remove <n> Remove installed model
model-load <n> Load model into memory
model-unload <n> Unload model from memory
Service Control:
service-run Internal: run container under procd
service-stop Stop container
API Endpoints (default port 8080):
/v1/chat/completions - Chat completion (OpenAI compatible)
/v1/completions - Text completion
/v1/embeddings - Generate embeddings
/v1/models - List available models
/ - Web UI
Configuration: /etc/config/localai
EOF
}
require_root() { [ "$(id -u)" -eq 0 ] || { echo "Root required" >&2; exit 1; }; }
log_info() { echo "[INFO] $*"; }
log_warn() { echo "[WARN] $*" >&2; }
log_error() { echo "[ERROR] $*" >&2; }
uci_get() { uci -q get ${CONFIG}.$1; }
uci_set() { uci set ${CONFIG}.$1="$2" && uci commit ${CONFIG}; }
uci_get_list() { uci -q get ${CONFIG}.$1 2>/dev/null; }
# Load configuration with defaults
load_config() {
api_port="$(uci_get main.api_port || echo 8080)"
api_host="$(uci_get main.api_host || echo 0.0.0.0)"
data_path="$(uci_get main.data_path || echo /srv/localai)"
models_path="$(uci_get main.models_path || echo /srv/localai/models)"
memory_limit="$(uci_get main.memory_limit || echo 2G)"
threads="$(uci_get main.threads || echo 4)"
context_size="$(uci_get main.context_size || echo 2048)"
debug="$(uci_get main.debug || echo 0)"
cors="$(uci_get main.cors || echo 1)"
gpu_enabled="$(uci_get gpu.enabled || echo 0)"
gpu_backend="$(uci_get gpu.backend || echo vulkan)"
}
ensure_dir() { [ -d "$1" ] || mkdir -p "$1"; }
has_lxc() {
command -v lxc-start >/dev/null 2>&1 && \
command -v lxc-stop >/dev/null 2>&1
}
# Ensure required packages are installed
ensure_packages() {
require_root
for pkg in "$@"; do
if ! opkg list-installed | grep -q "^$pkg "; then
if [ "$OPKG_UPDATED" -eq 0 ]; then
opkg update || return 1
OPKG_UPDATED=1
fi
opkg install "$pkg" || return 1
fi
done
}
# =============================================================================
# LXC CONTAINER FUNCTIONS
# =============================================================================
lxc_check_prereqs() {
log_info "Checking LXC prerequisites..."
ensure_packages lxc lxc-common lxc-attach lxc-start lxc-stop lxc-destroy || return 1
if [ ! -d /sys/fs/cgroup ]; then
log_error "cgroups not mounted at /sys/fs/cgroup"
return 1
fi
log_info "LXC ready"
}
lxc_create_rootfs() {
load_config
if [ -d "$LXC_ROOTFS" ] && [ -x "$LXC_ROOTFS/usr/bin/local-ai" ]; then
log_info "LXC rootfs already exists with LocalAI"
return 0
fi
log_info "Creating LXC rootfs for LocalAI..."
ensure_dir "$LXC_PATH/$LXC_NAME"
lxc_download_binary || return 1
lxc_create_config || return 1
log_info "LXC rootfs created successfully"
}
lxc_download_binary() {
local rootfs="$LXC_ROOTFS"
local arch
# Detect architecture - LocalAI uses lowercase format: local-ai-vX.X.X-linux-arm64
case "$(uname -m)" in
x86_64) arch="linux-x86_64" ;;
aarch64) arch="linux-arm64" ;;
armv7l) arch="linux-arm" ;;
*) arch="linux-x86_64" ;;
esac
log_info "Downloading LocalAI $LOCALAI_VERSION for $arch..."
ensure_dir "$rootfs/usr/bin"
ensure_dir "$rootfs/data"
ensure_dir "$rootfs/models"
ensure_dir "$rootfs/tmp"
ensure_dir "$rootfs/etc"
# Download LocalAI binary - format: local-ai-v3.10.0-linux-arm64
local binary_url="https://github.com/mudler/LocalAI/releases/download/${LOCALAI_VERSION}/local-ai-${LOCALAI_VERSION}-${arch}"
log_info "Downloading from: $binary_url"
if wget -q --show-progress -O "$rootfs/usr/bin/local-ai" "$binary_url"; then
chmod +x "$rootfs/usr/bin/local-ai"
log_info "LocalAI binary downloaded successfully ($(ls -sh "$rootfs/usr/bin/local-ai" | cut -d' ' -f1))"
else
log_error "Failed to download LocalAI binary"
log_error "URL: $binary_url"
return 1
fi
# Create minimal rootfs structure
mkdir -p "$rootfs/bin" "$rootfs/lib" "$rootfs/proc" "$rootfs/sys" "$rootfs/dev"
# Create resolv.conf
echo "nameserver 8.8.8.8" > "$rootfs/etc/resolv.conf"
# Create startup script
cat > "$rootfs/usr/bin/start-localai.sh" << 'START'
#!/bin/sh
export PATH="/usr/bin:/bin:$PATH"
cd /data
# Read environment variables
API_PORT="${LOCALAI_API_PORT:-8080}"
API_HOST="${LOCALAI_API_HOST:-0.0.0.0}"
THREADS="${LOCALAI_THREADS:-4}"
CONTEXT_SIZE="${LOCALAI_CONTEXT_SIZE:-2048}"
DEBUG="${LOCALAI_DEBUG:-0}"
CORS="${LOCALAI_CORS:-1}"
GPU_ENABLED="${LOCALAI_GPU_ENABLED:-0}"
# Build args
ARGS="--address ${API_HOST}:${API_PORT}"
ARGS="$ARGS --models-path /models"
ARGS="$ARGS --threads $THREADS"
ARGS="$ARGS --context-size $CONTEXT_SIZE"
[ "$DEBUG" = "1" ] && ARGS="$ARGS --debug"
[ "$CORS" = "1" ] && ARGS="$ARGS --cors"
echo "Starting LocalAI..."
echo "API: http://${API_HOST}:${API_PORT}"
echo "Models path: /models"
echo "Threads: $THREADS, Context: $CONTEXT_SIZE"
exec /usr/bin/local-ai $ARGS
START
chmod +x "$rootfs/usr/bin/start-localai.sh"
log_info "LocalAI binary and startup script installed"
}
lxc_create_config() {
load_config
# Build command line flags
local cors_flag=""
local debug_flag=""
[ "$cors" = "1" ] && cors_flag=" --cors"
[ "$debug" = "1" ] && debug_flag=" --debug"
cat > "$LXC_CONFIG" << EOF
# LocalAI LXC Configuration
lxc.uts.name = $LXC_NAME
# Root filesystem
lxc.rootfs.path = dir:$LXC_ROOTFS
# Network - use host network for simplicity
lxc.net.0.type = none
# Mounts
lxc.mount.auto = proc:mixed sys:ro cgroup:mixed
lxc.mount.entry = $data_path data none bind,create=dir 0 0
lxc.mount.entry = $models_path models none bind,create=dir 0 0
# Environment variables for configuration
lxc.environment = LOCALAI_API_PORT=$api_port
lxc.environment = LOCALAI_API_HOST=$api_host
lxc.environment = LOCALAI_THREADS=$threads
lxc.environment = LOCALAI_CONTEXT_SIZE=$context_size
lxc.environment = LOCALAI_DEBUG=$debug
lxc.environment = LOCALAI_CORS=$cors
lxc.environment = LOCALAI_GPU_ENABLED=$gpu_enabled
# Capabilities
lxc.cap.drop = sys_admin sys_module mac_admin mac_override
# cgroups limits
lxc.cgroup.memory.limit_in_bytes = $memory_limit
# Run binary directly (no shell needed in minimal rootfs)
lxc.init.cmd = /usr/bin/local-ai --address ${api_host}:${api_port} --models-path /models --threads $threads --context-size $context_size${cors_flag}${debug_flag}
# Console
lxc.console.size = 4096
lxc.pty.max = 1024
EOF
log_info "LXC config created at $LXC_CONFIG"
}
lxc_stop() {
if lxc-info -n "$LXC_NAME" >/dev/null 2>&1; then
lxc-stop -n "$LXC_NAME" -k >/dev/null 2>&1 || true
fi
}
lxc_run() {
load_config
lxc_stop
if [ ! -f "$LXC_CONFIG" ]; then
log_error "LXC not configured. Run 'localaictl install' first."
return 1
fi
# Regenerate config to pick up any UCI changes
lxc_create_config
# Ensure mount points exist
ensure_dir "$data_path"
ensure_dir "$models_path"
log_info "Starting LocalAI LXC container..."
log_info "API endpoint: http://0.0.0.0:$api_port/v1"
log_info "Web UI: http://0.0.0.0:$api_port"
log_info "Models path: $models_path"
exec lxc-start -n "$LXC_NAME" -F -f "$LXC_CONFIG"
}
lxc_status() {
load_config
echo "=== LocalAI Status ==="
echo ""
if lxc-info -n "$LXC_NAME" >/dev/null 2>&1; then
lxc-info -n "$LXC_NAME"
else
echo "LXC container '$LXC_NAME' not found or not configured"
fi
echo ""
echo "=== Configuration ==="
echo "API port: $api_port"
echo "Data path: $data_path"
echo "Models path: $models_path"
echo "Memory limit: $memory_limit"
echo "Threads: $threads"
echo "Context size: $context_size"
echo ""
# Check API health
if wget -q -O - "http://127.0.0.1:$api_port/readyz" 2>/dev/null | grep -q "ok"; then
echo "API Status: HEALTHY"
else
echo "API Status: NOT RESPONDING"
fi
}
lxc_logs() {
if [ "$1" = "-f" ]; then
logread -f -e localai
else
logread -e localai | tail -100
fi
}
lxc_shell() {
lxc-attach -n "$LXC_NAME" -- /bin/sh
}
lxc_destroy() {
lxc_stop
if [ -d "$LXC_PATH/$LXC_NAME" ]; then
rm -rf "$LXC_PATH/$LXC_NAME"
log_info "LXC container destroyed"
fi
}
# =============================================================================
# MODEL MANAGEMENT
# =============================================================================
cmd_models() {
load_config
echo "=== Installed Models ==="
echo ""
if [ -d "$models_path" ]; then
local count=0
for model in "$models_path"/*.gguf "$models_path"/*.bin "$models_path"/*.onnx; do
[ -f "$model" ] || continue
count=$((count + 1))
local name=$(basename "$model")
local size=$(ls -lh "$model" | awk '{print $5}')
echo " $count. $name ($size)"
done
if [ "$count" -eq 0 ]; then
echo " No models installed"
echo ""
echo "Install a model with:"
echo " localaictl model-install phi2"
echo " localaictl model-install tinyllama"
fi
else
echo " Models directory not found: $models_path"
fi
echo ""
echo "=== Available Presets ==="
echo ""
# List presets from UCI config
uci show localai 2>/dev/null | grep "preset\[" | while read line; do
local section=$(echo "$line" | cut -d. -f2 | cut -d= -f1)
local name=$(uci_get "$section.name")
local desc=$(uci_get "$section.description")
local size=$(uci_get "$section.size")
[ -n "$name" ] && echo " $name - $desc ($size)"
done
}
cmd_model_install() {
load_config
require_root
local model_name="$1"
[ -z "$model_name" ] && { echo "Usage: localaictl model-install <preset-name|url>"; return 1; }
ensure_dir "$models_path"
# Check if it's a preset
local preset_url=""
local preset_file=""
# Search presets
for section in $(uci show localai 2>/dev/null | grep "=preset" | cut -d. -f2 | cut -d= -f1); do
local pname=$(uci_get "$section.name")
if [ "$pname" = "$model_name" ]; then
preset_url=$(uci_get "$section.url")
preset_file=$(basename "$preset_url")
break
fi
done
if [ -n "$preset_url" ]; then
log_info "Installing preset model: $model_name"
log_info "URL: $preset_url"
if wget -O "$models_path/$preset_file" "$preset_url"; then
log_info "Model installed: $models_path/$preset_file"
# Create model config YAML
cat > "$models_path/$model_name.yaml" << EOF
name: $model_name
backend: llama-cpp
parameters:
model: $preset_file
context_size: $context_size
threads: $threads
EOF
log_info "Model config created: $models_path/$model_name.yaml"
else
log_error "Failed to download model"
return 1
fi
elif echo "$model_name" | grep -q "^http"; then
# Direct URL download
local filename=$(basename "$model_name")
log_info "Downloading model from URL..."
if wget -O "$models_path/$filename" "$model_name"; then
log_info "Model installed: $models_path/$filename"
else
log_error "Failed to download model"
return 1
fi
else
log_error "Unknown model or preset: $model_name"
# List available presets from UCI
local presets=""
for section in $(uci show localai 2>/dev/null | grep "=preset" | cut -d. -f2 | cut -d= -f1); do
local pname=$(uci_get "$section.name")
[ -n "$pname" ] && presets="$presets $pname"
done
log_info "Available presets:$presets"
return 1
fi
}
cmd_model_remove() {
load_config
require_root
local model_name="$1"
[ -z "$model_name" ] && { echo "Usage: localaictl model-remove <model-name>"; return 1; }
# Find and remove model files
local found=0
for ext in gguf bin onnx yaml; do
local file="$models_path/$model_name.$ext"
if [ -f "$file" ]; then
rm -f "$file"
log_info "Removed: $file"
found=1
fi
done
[ "$found" -eq 0 ] && log_warn "Model not found: $model_name"
}
# =============================================================================
# COMMANDS
# =============================================================================
cmd_install() {
require_root
load_config
if ! has_lxc; then
log_error "LXC not available. Install lxc packages first."
exit 1
fi
log_info "Installing LocalAI..."
# Create directories
ensure_dir "$data_path"
ensure_dir "$models_path"
lxc_check_prereqs || exit 1
lxc_create_rootfs || exit 1
uci_set main.enabled '1'
/etc/init.d/localai enable
log_info "LocalAI installed."
log_info "Start with: /etc/init.d/localai start"
log_info "API endpoint: http://<router-ip>:$api_port/v1"
log_info ""
log_info "Install a model to get started:"
log_info " localaictl model-install tinyllama # Lightweight (669MB)"
log_info " localaictl model-install phi2 # Balanced (1.6GB)"
}
cmd_check() {
load_config
log_info "Checking prerequisites..."
if has_lxc; then
log_info "LXC: available"
lxc_check_prereqs
else
log_warn "LXC: not available"
fi
# Check memory
local mem_total=$(grep MemTotal /proc/meminfo | awk '{print $2}')
local mem_gb=$((mem_total / 1024 / 1024))
log_info "System memory: ${mem_gb}GB"
if [ "$mem_gb" -lt 2 ]; then
log_warn "Low memory! LocalAI requires at least 2GB RAM for most models"
fi
}
cmd_update() {
require_root
load_config
log_info "Updating LocalAI..."
lxc_destroy
lxc_create_rootfs || exit 1
if /etc/init.d/localai enabled >/dev/null 2>&1; then
/etc/init.d/localai restart
else
log_info "Update complete. Restart manually to apply."
fi
}
cmd_status() {
lxc_status
}
cmd_logs() {
lxc_logs "$@"
}
cmd_shell() {
lxc_shell
}
cmd_service_run() {
require_root
load_config
if ! has_lxc; then
log_error "LXC not available"
exit 1
fi
lxc_check_prereqs || exit 1
lxc_run
}
cmd_service_stop() {
require_root
lxc_stop
}
# Main Entry Point
case "${1:-}" in
install) shift; cmd_install "$@" ;;
check) shift; cmd_check "$@" ;;
update) shift; cmd_update "$@" ;;
status) shift; cmd_status "$@" ;;
logs) shift; cmd_logs "$@" ;;
shell) shift; cmd_shell "$@" ;;
models) shift; cmd_models "$@" ;;
model-install) shift; cmd_model_install "$@" ;;
model-remove) shift; cmd_model_remove "$@" ;;
service-run) shift; cmd_service_run "$@" ;;
service-stop) shift; cmd_service_stop "$@" ;;
help|--help|-h|'') usage ;;
*) echo "Unknown command: $1" >&2; usage >&2; exit 1 ;;
esac