secubox-openwrt/package/secubox/secubox-app-localai/files/etc/init.d/localai
CyberMind-FR 612a1be6ea feat(localai): Rewrite secubox-app-localai with native binary download
- Replace Docker/LXC-based approach with direct binary download
- Download LocalAI v2.25.0 binary from GitHub releases
- Add localaictl CLI for install, model management, and service control
- Change default port to 8081 (avoid CrowdSec conflict on 8080)
- Remove secubox-app-localai-wb (merged into secubox-app-localai)
- Add model presets: tinyllama, phi2, mistral

Usage:
  localaictl install
  localaictl model-install tinyllama
  /etc/init.d/localai enable && /etc/init.d/localai start

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-22 04:55:17 +01:00

80 lines
1.9 KiB
Bash

#!/bin/sh /etc/rc.common
# LocalAI init script
# Copyright (C) 2025 CyberMind.fr
START=99
STOP=10
USE_PROCD=1
PROG=/usr/bin/local-ai
CONFIG=localai
BACKEND_ASSETS=/usr/share/localai/backend-assets
start_service() {
local enabled
config_load "$CONFIG"
config_get enabled main enabled '0'
[ "$enabled" = "1" ] || return 0
# Find binary
local binary=""
if [ -x "$PROG" ]; then
binary="$PROG"
elif [ -x "/opt/localai/bin/local-ai" ]; then
binary="/opt/localai/bin/local-ai"
else
logger -t localai -p err "LocalAI binary not found. Run: localaictl install"
return 1
fi
# Load configuration
local api_port api_host models_path threads context_size debug cors
config_get api_port main api_port '8081'
config_get api_host main api_host '0.0.0.0'
config_get models_path main models_path '/srv/localai/models'
config_get threads main threads '4'
config_get context_size main context_size '2048'
config_get debug main debug '0'
config_get cors main cors '1'
# Ensure models directory exists
mkdir -p "$models_path"
# Build command arguments - use 'run' subcommand
local args="run --address ${api_host}:${api_port}"
args="$args --models-path $models_path"
args="$args --threads $threads"
args="$args --context-size $context_size"
[ "$cors" = "1" ] && args="$args --cors"
[ "$debug" = "1" ] && args="$args --debug"
# Point to backend assets if they exist
if [ -d "$BACKEND_ASSETS" ]; then
args="$args --backend-assets-path $BACKEND_ASSETS"
fi
procd_open_instance
procd_set_param command "$binary" $args
procd_set_param respawn ${respawn_threshold:-3600} ${respawn_timeout:-5} ${respawn_retry:-5}
procd_set_param stdout 1
procd_set_param stderr 1
procd_set_param pidfile /var/run/localai.pid
procd_close_instance
logger -t localai "Started LocalAI on ${api_host}:${api_port}"
}
stop_service() {
logger -t localai "Stopping LocalAI"
}
service_triggers() {
procd_add_reload_trigger "$CONFIG"
}
reload_service() {
stop
start
}