New package for building LocalAI from source with llama-cpp backend: - localai-wb-ctl: On-device build management - check: Verify build prerequisites - install-deps: Install build dependencies - build: Compile LocalAI with llama-cpp - Model management, service control - build-sdk.sh: Cross-compile script for SDK - Uses OpenWrt toolchain for ARM64 - Produces optimized binary with llama-cpp Alternative to Docker-based secubox-app-localai for native builds. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
77 lines
1.8 KiB
Bash
77 lines
1.8 KiB
Bash
#!/bin/sh /etc/rc.common
|
|
# LocalAI-WB init script
|
|
# Copyright (C) 2025 CyberMind.fr
|
|
|
|
START=99
|
|
STOP=10
|
|
USE_PROCD=1
|
|
|
|
PROG=/opt/localai/bin/local-ai
|
|
ALT_PROG=/usr/bin/local-ai-wb
|
|
CONFIG=localai-wb
|
|
|
|
start_service() {
|
|
local enabled
|
|
config_load "$CONFIG"
|
|
config_get enabled main enabled '0'
|
|
|
|
[ "$enabled" = "1" ] || return 0
|
|
|
|
# Find binary
|
|
local binary=""
|
|
if [ -x "$PROG" ]; then
|
|
binary="$PROG"
|
|
elif [ -x "$ALT_PROG" ]; then
|
|
binary="$ALT_PROG"
|
|
elif [ -x "/usr/bin/local-ai" ]; then
|
|
binary="/usr/bin/local-ai"
|
|
else
|
|
logger -t localai-wb -p err "LocalAI binary not found. Run: localai-wb-ctl build"
|
|
return 1
|
|
fi
|
|
|
|
# Load configuration
|
|
local api_port api_host models_path threads context_size debug cors
|
|
config_get api_port main api_port '8080'
|
|
config_get api_host main api_host '0.0.0.0'
|
|
config_get models_path main models_path '/srv/localai/models'
|
|
config_get threads main threads '4'
|
|
config_get context_size main context_size '2048'
|
|
config_get debug main debug '0'
|
|
config_get cors main cors '1'
|
|
|
|
# Ensure models directory exists
|
|
mkdir -p "$models_path"
|
|
|
|
# Build command arguments
|
|
local args="--address ${api_host}:${api_port}"
|
|
args="$args --models-path $models_path"
|
|
args="$args --threads $threads"
|
|
args="$args --context-size $context_size"
|
|
[ "$cors" = "1" ] && args="$args --cors"
|
|
[ "$debug" = "1" ] && args="$args --debug"
|
|
|
|
procd_open_instance
|
|
procd_set_param command "$binary" $args
|
|
procd_set_param respawn ${respawn_threshold:-3600} ${respawn_timeout:-5} ${respawn_retry:-5}
|
|
procd_set_param stdout 1
|
|
procd_set_param stderr 1
|
|
procd_set_param pidfile /var/run/localai-wb.pid
|
|
procd_close_instance
|
|
|
|
logger -t localai-wb "Started LocalAI on ${api_host}:${api_port}"
|
|
}
|
|
|
|
stop_service() {
|
|
logger -t localai-wb "Stopping LocalAI"
|
|
}
|
|
|
|
service_triggers() {
|
|
procd_add_reload_trigger "$CONFIG"
|
|
}
|
|
|
|
reload_service() {
|
|
stop
|
|
start
|
|
}
|