GitHub releases use: local-ai-Linux-arm64 (not local-ai-v2.25.0-linux-arm64) - Fixed architecture naming (Linux-arm64, Linux-x86_64) - Removed version from filename - Added URL logging for debugging Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
807 lines
21 KiB
Bash
807 lines
21 KiB
Bash
#!/bin/sh
|
|
# SecuBox LocalAI manager - Multi-runtime support (LXC, Docker, Podman)
|
|
# Copyright (C) 2025 CyberMind.fr
|
|
|
|
CONFIG="localai"
|
|
CONTAINER_NAME="localai"
|
|
LOCALAI_VERSION="v2.25.0"
|
|
|
|
usage() {
|
|
cat <<'EOF'
|
|
Usage: localaictl <command>
|
|
|
|
Commands:
|
|
install Install LocalAI (auto-detect or use configured runtime)
|
|
install --lxc Force LXC installation (standalone binary)
|
|
install --docker Force Docker installation (full image with backends)
|
|
install --podman Force Podman installation (full image with backends)
|
|
check Run prerequisite checks
|
|
update Update LocalAI
|
|
status Show container and service status
|
|
logs Show LocalAI logs (use -f to follow)
|
|
shell Open shell in container
|
|
|
|
Model Management:
|
|
models List installed models
|
|
model-install <n> Install model from preset or URL
|
|
model-remove <n> Remove installed model
|
|
|
|
Service Control:
|
|
service-run Internal: run container under procd
|
|
service-stop Stop container
|
|
|
|
Runtimes:
|
|
lxc - LXC container with rootfs extracted from Docker image
|
|
(includes all backends: llama-cpp, whisper, etc.)
|
|
Falls back to standalone binary if no docker/podman available
|
|
docker - Run Docker container directly
|
|
podman - Run Podman container directly (rootless)
|
|
|
|
Configuration: /etc/config/localai
|
|
Set runtime with: uci set localai.main.runtime=<lxc|docker|podman|auto>
|
|
EOF
|
|
}
|
|
|
|
require_root() { [ "$(id -u)" -eq 0 ] || { echo "Root required" >&2; exit 1; }; }
|
|
|
|
log_info() { echo "[INFO] $*"; logger -t localai "$*"; }
|
|
log_warn() { echo "[WARN] $*" >&2; logger -t localai -p warning "$*"; }
|
|
log_error() { echo "[ERROR] $*" >&2; logger -t localai -p err "$*"; }
|
|
|
|
uci_get() { uci -q get ${CONFIG}.$1; }
|
|
uci_set() { uci set ${CONFIG}.$1="$2" && uci commit ${CONFIG}; }
|
|
|
|
# Load configuration with defaults
|
|
load_config() {
|
|
api_port="$(uci_get main.api_port || echo 8080)"
|
|
api_host="$(uci_get main.api_host || echo 0.0.0.0)"
|
|
data_path="$(uci_get main.data_path || echo /srv/localai)"
|
|
models_path="$(uci_get main.models_path || echo /srv/localai/models)"
|
|
memory_limit="$(uci_get main.memory_limit || echo 2g)"
|
|
threads="$(uci_get main.threads || echo 4)"
|
|
context_size="$(uci_get main.context_size || echo 2048)"
|
|
debug="$(uci_get main.debug || echo 0)"
|
|
cors="$(uci_get main.cors || echo 1)"
|
|
runtime="$(uci_get main.runtime || echo auto)"
|
|
|
|
# LXC settings
|
|
lxc_path="$(uci_get lxc.path || echo /srv/lxc)"
|
|
lxc_version="$(uci_get lxc.version || echo $LOCALAI_VERSION)"
|
|
|
|
# Docker settings
|
|
docker_image="$(uci_get docker.image || echo localai/localai:${LOCALAI_VERSION}-ffmpeg)"
|
|
|
|
# Ensure paths exist
|
|
[ -d "$data_path" ] || mkdir -p "$data_path"
|
|
[ -d "$models_path" ] || mkdir -p "$models_path"
|
|
}
|
|
|
|
# =============================================================================
|
|
# RUNTIME DETECTION
|
|
# =============================================================================
|
|
|
|
detect_runtime() {
|
|
local configured="$runtime"
|
|
|
|
# If auto or empty, detect available runtime
|
|
if [ "$configured" = "auto" ] || [ -z "$configured" ]; then
|
|
# Check what's already running first
|
|
if command -v lxc-info >/dev/null 2>&1 && lxc-info -n "$CONTAINER_NAME" -s 2>/dev/null | grep -q "RUNNING"; then
|
|
echo "lxc"
|
|
return
|
|
fi
|
|
if command -v podman >/dev/null 2>&1 && podman ps --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
|
|
echo "podman"
|
|
return
|
|
fi
|
|
if command -v docker >/dev/null 2>&1 && docker ps --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
|
|
echo "docker"
|
|
return
|
|
fi
|
|
|
|
# Nothing running, check what's installed (prefer docker/podman for backends)
|
|
if command -v podman >/dev/null 2>&1; then
|
|
echo "podman"
|
|
elif command -v docker >/dev/null 2>&1; then
|
|
echo "docker"
|
|
elif command -v lxc-start >/dev/null 2>&1; then
|
|
echo "lxc"
|
|
else
|
|
echo ""
|
|
fi
|
|
else
|
|
echo "$configured"
|
|
fi
|
|
}
|
|
|
|
has_runtime() {
|
|
local rt=$(detect_runtime)
|
|
[ -n "$rt" ]
|
|
}
|
|
|
|
# =============================================================================
|
|
# CONTAINER STATE CHECKS
|
|
# =============================================================================
|
|
|
|
is_running() {
|
|
load_config
|
|
local rt=$(detect_runtime)
|
|
|
|
case "$rt" in
|
|
lxc)
|
|
lxc-info -n "$CONTAINER_NAME" -s 2>/dev/null | grep -q "RUNNING"
|
|
;;
|
|
podman)
|
|
podman ps --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"
|
|
;;
|
|
docker)
|
|
docker ps --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"
|
|
;;
|
|
*)
|
|
pgrep -f "local-ai" >/dev/null 2>&1
|
|
;;
|
|
esac
|
|
}
|
|
|
|
container_exists() {
|
|
load_config
|
|
local rt=$(detect_runtime)
|
|
|
|
case "$rt" in
|
|
lxc)
|
|
[ -d "$lxc_path/$CONTAINER_NAME" ]
|
|
;;
|
|
podman)
|
|
podman ps -a --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"
|
|
;;
|
|
docker)
|
|
docker ps -a --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"
|
|
;;
|
|
*)
|
|
return 1
|
|
;;
|
|
esac
|
|
}
|
|
|
|
# =============================================================================
|
|
# LXC FUNCTIONS
|
|
# =============================================================================
|
|
|
|
lxc_stop() {
|
|
if lxc-info -n "$CONTAINER_NAME" >/dev/null 2>&1; then
|
|
lxc-stop -n "$CONTAINER_NAME" -k >/dev/null 2>&1 || true
|
|
fi
|
|
}
|
|
|
|
lxc_install() {
|
|
log_info "Installing LocalAI using LXC..."
|
|
|
|
# Check LXC packages
|
|
if ! command -v lxc-start >/dev/null 2>&1; then
|
|
log_error "LXC not installed. Install with: opkg install lxc lxc-common"
|
|
return 1
|
|
fi
|
|
|
|
local rootfs="$lxc_path/$CONTAINER_NAME/rootfs"
|
|
local config="$lxc_path/$CONTAINER_NAME/config"
|
|
|
|
# Check if we can extract from Docker image (preferred - includes all backends)
|
|
local use_docker_extract=0
|
|
if command -v podman >/dev/null 2>&1 && runtime_is_working podman; then
|
|
use_docker_extract=1
|
|
elif command -v docker >/dev/null 2>&1 && runtime_is_working docker; then
|
|
use_docker_extract=1
|
|
fi
|
|
|
|
if [ "$use_docker_extract" = "1" ]; then
|
|
lxc_install_from_docker "$rootfs" || return 1
|
|
else
|
|
log_warn "No working Docker/Podman daemon - using standalone binary"
|
|
log_warn "For full backend support, start Docker: /etc/init.d/dockerd start"
|
|
lxc_install_standalone "$rootfs" || return 1
|
|
fi
|
|
|
|
# Create LXC config
|
|
lxc_create_config "$config" "$rootfs"
|
|
|
|
log_info "LXC container configured at $lxc_path/$CONTAINER_NAME"
|
|
uci_set main.runtime 'lxc'
|
|
return 0
|
|
}
|
|
|
|
# Check if container runtime daemon is actually working
|
|
runtime_is_working() {
|
|
local rt="$1"
|
|
case "$rt" in
|
|
podman)
|
|
# Podman is daemonless, just check command works
|
|
podman info >/dev/null 2>&1
|
|
;;
|
|
docker)
|
|
# Docker needs daemon running
|
|
docker info >/dev/null 2>&1
|
|
;;
|
|
*)
|
|
return 1
|
|
;;
|
|
esac
|
|
}
|
|
|
|
# Extract rootfs from Docker image (includes all backends)
|
|
lxc_install_from_docker() {
|
|
local rootfs="$1"
|
|
local rt=""
|
|
|
|
# Detect available AND WORKING runtime for extraction
|
|
if command -v podman >/dev/null 2>&1 && runtime_is_working podman; then
|
|
rt="podman"
|
|
elif command -v docker >/dev/null 2>&1 && runtime_is_working docker; then
|
|
rt="docker"
|
|
else
|
|
log_error "Need working podman or docker to extract image"
|
|
log_error "Docker installed but daemon not running? Start with: /etc/init.d/dockerd start"
|
|
return 1
|
|
fi
|
|
|
|
log_info "Extracting LocalAI rootfs from Docker image..."
|
|
log_info "Image: $docker_image"
|
|
log_info "This includes ALL backends (llama-cpp, whisper, etc.)"
|
|
|
|
# Pull the image
|
|
log_info "Pulling image (this may take a while)..."
|
|
if ! $rt pull "$docker_image"; then
|
|
log_error "Failed to pull image"
|
|
return 1
|
|
fi
|
|
|
|
# Create temp container to export
|
|
local temp_container="localai-extract-$$"
|
|
log_info "Creating temporary container..."
|
|
$rt create --name "$temp_container" "$docker_image" >/dev/null 2>&1
|
|
|
|
# Export and extract rootfs
|
|
mkdir -p "$rootfs"
|
|
log_info "Exporting rootfs (2-4GB, please wait)..."
|
|
|
|
if $rt export "$temp_container" | tar -xf - -C "$rootfs" 2>/dev/null; then
|
|
log_info "Rootfs extracted successfully"
|
|
else
|
|
log_error "Failed to extract rootfs"
|
|
$rt rm -f "$temp_container" >/dev/null 2>&1
|
|
return 1
|
|
fi
|
|
|
|
# Cleanup temp container
|
|
$rt rm -f "$temp_container" >/dev/null 2>&1
|
|
|
|
# Optionally remove the Docker image to save space
|
|
# $rt rmi "$docker_image" >/dev/null 2>&1
|
|
|
|
# Create necessary directories
|
|
mkdir -p "$rootfs/models" "$rootfs/build" "$rootfs/tmp"
|
|
|
|
# Setup resolv.conf
|
|
echo "nameserver 8.8.8.8" > "$rootfs/etc/resolv.conf"
|
|
|
|
local rootfs_size=$(du -sh "$rootfs" 2>/dev/null | cut -f1)
|
|
log_info "Rootfs size: $rootfs_size"
|
|
log_info "All LocalAI backends are now available!"
|
|
|
|
return 0
|
|
}
|
|
|
|
# Fallback: Download standalone binary (limited backends)
|
|
lxc_install_standalone() {
|
|
local rootfs="$1"
|
|
|
|
log_warn "No Docker/Podman available - using standalone binary"
|
|
log_warn "Note: Standalone binary has LIMITED backend support"
|
|
|
|
# Create directories
|
|
mkdir -p "$rootfs/usr/bin" "$rootfs/data" "$rootfs/models" "$rootfs/tmp" "$rootfs/etc"
|
|
mkdir -p "$rootfs/bin" "$rootfs/lib" "$rootfs/proc" "$rootfs/sys" "$rootfs/dev"
|
|
|
|
# Detect architecture (GitHub releases use: local-ai-Linux-arm64, local-ai-Linux-x86_64)
|
|
local arch
|
|
case "$(uname -m)" in
|
|
x86_64) arch="Linux-x86_64" ;;
|
|
aarch64) arch="Linux-arm64" ;;
|
|
armv7l) arch="Linux-arm64" ;; # Try arm64 for armv7
|
|
*) arch="Linux-x86_64" ;;
|
|
esac
|
|
|
|
# Download LocalAI binary
|
|
local binary_url="https://github.com/mudler/LocalAI/releases/download/${lxc_version}/local-ai-${arch}"
|
|
log_info "Downloading LocalAI $lxc_version for $arch..."
|
|
log_info "URL: $binary_url"
|
|
|
|
if ! wget -q --show-progress -O "$rootfs/usr/bin/local-ai" "$binary_url"; then
|
|
log_error "Failed to download LocalAI binary"
|
|
return 1
|
|
fi
|
|
chmod +x "$rootfs/usr/bin/local-ai"
|
|
log_info "Binary downloaded: $(ls -sh "$rootfs/usr/bin/local-ai" | cut -d' ' -f1)"
|
|
|
|
# Create resolv.conf
|
|
echo "nameserver 8.8.8.8" > "$rootfs/etc/resolv.conf"
|
|
|
|
return 0
|
|
}
|
|
|
|
# Create LXC configuration file
|
|
lxc_create_config() {
|
|
local config="$1"
|
|
local rootfs="$2"
|
|
|
|
# Build command flags
|
|
local cors_flag="" debug_flag=""
|
|
[ "$cors" = "1" ] && cors_flag=" --cors"
|
|
[ "$debug" = "1" ] && debug_flag=" --debug"
|
|
|
|
# Detect init command based on rootfs type
|
|
local init_cmd="/usr/bin/local-ai"
|
|
if [ -f "$rootfs/build/entrypoint.sh" ]; then
|
|
# Docker image has entrypoint script
|
|
init_cmd="/build/entrypoint.sh"
|
|
fi
|
|
|
|
cat > "$config" << EOF
|
|
# LocalAI LXC Configuration
|
|
lxc.uts.name = $CONTAINER_NAME
|
|
lxc.rootfs.path = dir:$rootfs
|
|
|
|
# Network - use host network
|
|
lxc.net.0.type = none
|
|
|
|
# Mount points
|
|
lxc.mount.auto = proc:mixed sys:ro cgroup:mixed
|
|
lxc.mount.entry = $models_path models none bind,create=dir 0 0
|
|
lxc.mount.entry = $data_path build none bind,create=dir 0 0
|
|
lxc.mount.entry = /dev/null dev/null none bind,create=file 0 0
|
|
lxc.mount.entry = /dev/zero dev/zero none bind,create=file 0 0
|
|
lxc.mount.entry = /dev/urandom dev/urandom none bind,create=file 0 0
|
|
|
|
# Environment variables
|
|
lxc.environment = LOCALAI_THREADS=$threads
|
|
lxc.environment = LOCALAI_CONTEXT_SIZE=$context_size
|
|
lxc.environment = LOCALAI_ADDRESS=${api_host}:${api_port}
|
|
lxc.environment = LOCALAI_MODELS_PATH=/models
|
|
lxc.environment = LOCALAI_DEBUG=$debug
|
|
lxc.environment = LOCALAI_CORS=$cors
|
|
lxc.environment = PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
|
|
|
# Security
|
|
lxc.cap.drop = sys_admin sys_module mac_admin mac_override
|
|
|
|
# Resources
|
|
lxc.cgroup.memory.limit_in_bytes = $memory_limit
|
|
|
|
# Init command
|
|
lxc.init.cmd = $init_cmd --address ${api_host}:${api_port} --models-path /models --threads $threads --context-size $context_size${cors_flag}${debug_flag}
|
|
|
|
# Console
|
|
lxc.console.size = 4096
|
|
lxc.pty.max = 1024
|
|
EOF
|
|
}
|
|
|
|
lxc_run() {
|
|
load_config
|
|
lxc_stop
|
|
|
|
local config="$lxc_path/$CONTAINER_NAME/config"
|
|
if [ ! -f "$config" ]; then
|
|
log_error "LXC not configured. Run 'localaictl install --lxc' first."
|
|
return 1
|
|
fi
|
|
|
|
log_info "Starting LocalAI LXC container..."
|
|
log_info "API: http://${api_host}:${api_port}"
|
|
exec lxc-start -n "$CONTAINER_NAME" -F -f "$config"
|
|
}
|
|
|
|
# =============================================================================
|
|
# DOCKER/PODMAN FUNCTIONS
|
|
# =============================================================================
|
|
|
|
docker_stop() {
|
|
local rt="$1"
|
|
if $rt ps --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
|
|
$rt stop "$CONTAINER_NAME" >/dev/null 2>&1 || true
|
|
fi
|
|
if $rt ps -a --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
|
|
$rt rm -f "$CONTAINER_NAME" >/dev/null 2>&1 || true
|
|
fi
|
|
}
|
|
|
|
docker_install() {
|
|
local rt="$1"
|
|
|
|
log_info "Installing LocalAI using $rt..."
|
|
log_info "Image: $docker_image"
|
|
log_info "This includes all backends (llama-cpp, whisper, etc.)"
|
|
|
|
if ! $rt pull "$docker_image"; then
|
|
log_error "Failed to pull image"
|
|
return 1
|
|
fi
|
|
|
|
log_info "Image pulled successfully"
|
|
uci_set main.runtime "$rt"
|
|
return 0
|
|
}
|
|
|
|
docker_run() {
|
|
local rt="$1"
|
|
load_config
|
|
docker_stop "$rt"
|
|
|
|
log_info "Starting LocalAI container ($rt)..."
|
|
log_info "Image: $docker_image"
|
|
log_info "API: http://${api_host}:${api_port}"
|
|
|
|
local env_args="-e LOCALAI_THREADS=$threads -e LOCALAI_CONTEXT_SIZE=$context_size"
|
|
[ "$debug" = "1" ] && env_args="$env_args -e LOCALAI_DEBUG=true"
|
|
[ "$cors" = "1" ] && env_args="$env_args -e LOCALAI_CORS=true"
|
|
|
|
exec $rt run --rm \
|
|
--name "$CONTAINER_NAME" \
|
|
-p "${api_port}:8080" \
|
|
-v "${models_path}:/models:rw" \
|
|
-v "${data_path}:/build:rw" \
|
|
--memory="$memory_limit" \
|
|
$env_args \
|
|
"$docker_image"
|
|
}
|
|
|
|
# =============================================================================
|
|
# UNIFIED COMMANDS
|
|
# =============================================================================
|
|
|
|
cmd_install() {
|
|
require_root
|
|
load_config
|
|
|
|
local force_runtime=""
|
|
case "$1" in
|
|
--lxc) force_runtime="lxc" ;;
|
|
--docker) force_runtime="docker" ;;
|
|
--podman) force_runtime="podman" ;;
|
|
esac
|
|
|
|
local rt="${force_runtime:-$(detect_runtime)}"
|
|
|
|
if [ -z "$rt" ]; then
|
|
log_error "No container runtime found!"
|
|
log_error "Install one of:"
|
|
log_error " opkg install lxc lxc-common # For LXC"
|
|
log_error " opkg install podman # For Podman"
|
|
log_error " opkg install docker # For Docker"
|
|
return 1
|
|
fi
|
|
|
|
mkdir -p "$data_path" "$models_path"
|
|
|
|
case "$rt" in
|
|
lxc)
|
|
lxc_install || return 1
|
|
;;
|
|
podman|docker)
|
|
if ! command -v $rt >/dev/null 2>&1; then
|
|
log_error "$rt not installed"
|
|
return 1
|
|
fi
|
|
docker_install "$rt" || return 1
|
|
;;
|
|
*)
|
|
log_error "Unknown runtime: $rt"
|
|
return 1
|
|
;;
|
|
esac
|
|
|
|
uci_set main.enabled '1'
|
|
/etc/init.d/localai enable
|
|
|
|
log_info ""
|
|
log_info "LocalAI installed successfully! (runtime: $rt)"
|
|
log_info ""
|
|
log_info "Start with: /etc/init.d/localai start"
|
|
log_info "API: http://<router-ip>:$api_port/v1"
|
|
log_info ""
|
|
log_info "Install a model:"
|
|
log_info " localaictl model-install tinyllama"
|
|
}
|
|
|
|
cmd_stop() {
|
|
require_root
|
|
load_config
|
|
local rt=$(detect_runtime)
|
|
|
|
case "$rt" in
|
|
lxc) lxc_stop ;;
|
|
podman) docker_stop podman ;;
|
|
docker) docker_stop docker ;;
|
|
esac
|
|
}
|
|
|
|
cmd_run() {
|
|
require_root
|
|
load_config
|
|
local rt=$(detect_runtime)
|
|
|
|
if [ -z "$rt" ]; then
|
|
log_error "No runtime configured. Run 'localaictl install' first."
|
|
return 1
|
|
fi
|
|
|
|
case "$rt" in
|
|
lxc) lxc_run ;;
|
|
podman) docker_run podman ;;
|
|
docker) docker_run docker ;;
|
|
*)
|
|
log_error "Unknown runtime: $rt"
|
|
return 1
|
|
;;
|
|
esac
|
|
}
|
|
|
|
cmd_status() {
|
|
load_config
|
|
local rt=$(detect_runtime)
|
|
|
|
echo "=== LocalAI Status ==="
|
|
echo ""
|
|
echo "Runtime: ${rt:-NOT CONFIGURED}"
|
|
echo ""
|
|
|
|
if is_running; then
|
|
echo "Status: RUNNING"
|
|
elif container_exists; then
|
|
echo "Status: STOPPED"
|
|
else
|
|
echo "Status: NOT INSTALLED"
|
|
fi
|
|
|
|
echo ""
|
|
echo "=== Configuration ==="
|
|
echo "API port: $api_port"
|
|
echo "Data path: $data_path"
|
|
echo "Models path: $models_path"
|
|
echo "Memory limit: $memory_limit"
|
|
echo "Threads: $threads"
|
|
echo ""
|
|
|
|
if wget -q -O - "http://127.0.0.1:$api_port/readyz" 2>/dev/null | grep -q "ok"; then
|
|
echo "API Status: HEALTHY"
|
|
else
|
|
echo "API Status: NOT RESPONDING"
|
|
fi
|
|
}
|
|
|
|
cmd_logs() {
|
|
load_config
|
|
local rt=$(detect_runtime)
|
|
|
|
case "$rt" in
|
|
lxc)
|
|
if [ "$1" = "-f" ]; then
|
|
logread -f -e localai
|
|
else
|
|
logread -e localai | tail -100
|
|
fi
|
|
;;
|
|
podman|docker)
|
|
if [ "$1" = "-f" ]; then
|
|
$rt logs -f "$CONTAINER_NAME"
|
|
else
|
|
$rt logs --tail 100 "$CONTAINER_NAME"
|
|
fi
|
|
;;
|
|
*)
|
|
logread -e localai | tail -100
|
|
;;
|
|
esac
|
|
}
|
|
|
|
cmd_shell() {
|
|
load_config
|
|
local rt=$(detect_runtime)
|
|
|
|
if ! is_running; then
|
|
log_error "Container not running"
|
|
return 1
|
|
fi
|
|
|
|
case "$rt" in
|
|
lxc) lxc-attach -n "$CONTAINER_NAME" -- /bin/sh ;;
|
|
podman|docker) $rt exec -it "$CONTAINER_NAME" /bin/sh ;;
|
|
esac
|
|
}
|
|
|
|
cmd_check() {
|
|
load_config
|
|
|
|
echo "=== Prerequisite Check ==="
|
|
echo ""
|
|
|
|
# LXC
|
|
if command -v lxc-start >/dev/null 2>&1; then
|
|
echo "[OK] LXC available"
|
|
else
|
|
echo "[--] LXC not installed"
|
|
fi
|
|
|
|
# Podman
|
|
if command -v podman >/dev/null 2>&1; then
|
|
echo "[OK] Podman available"
|
|
else
|
|
echo "[--] Podman not installed"
|
|
fi
|
|
|
|
# Docker
|
|
if command -v docker >/dev/null 2>&1; then
|
|
echo "[OK] Docker available"
|
|
else
|
|
echo "[--] Docker not installed"
|
|
fi
|
|
|
|
echo ""
|
|
echo "Configured runtime: $runtime"
|
|
echo "Detected runtime: $(detect_runtime)"
|
|
echo ""
|
|
|
|
# Memory
|
|
local mem_total=$(grep MemTotal /proc/meminfo | awk '{print $2}')
|
|
local mem_gb=$((mem_total / 1024 / 1024))
|
|
echo "System memory: ${mem_gb}GB"
|
|
[ "$mem_gb" -lt 2 ] && echo "[WARN] Low memory - need at least 2GB"
|
|
|
|
# Storage
|
|
local storage=$(df -h "$data_path" 2>/dev/null | tail -1 | awk '{print $4}')
|
|
echo "Storage available: $storage"
|
|
}
|
|
|
|
cmd_update() {
|
|
require_root
|
|
load_config
|
|
local rt=$(detect_runtime)
|
|
|
|
log_info "Updating LocalAI..."
|
|
cmd_stop
|
|
|
|
case "$rt" in
|
|
lxc)
|
|
rm -rf "$lxc_path/$CONTAINER_NAME"
|
|
lxc_install
|
|
;;
|
|
podman|docker)
|
|
docker_install "$rt"
|
|
;;
|
|
esac
|
|
|
|
if [ "$(uci_get main.enabled)" = "1" ]; then
|
|
/etc/init.d/localai restart
|
|
fi
|
|
}
|
|
|
|
# =============================================================================
|
|
# MODEL MANAGEMENT
|
|
# =============================================================================
|
|
|
|
cmd_models() {
|
|
load_config
|
|
echo "=== Installed Models ==="
|
|
echo ""
|
|
|
|
if [ -d "$models_path" ]; then
|
|
local count=0
|
|
for model in "$models_path"/*.gguf "$models_path"/*.bin "$models_path"/*.onnx; do
|
|
[ -f "$model" ] || continue
|
|
count=$((count + 1))
|
|
local name=$(basename "$model")
|
|
local size=$(ls -lh "$model" | awk '{print $5}')
|
|
echo " $count. $name ($size)"
|
|
done
|
|
|
|
[ "$count" -eq 0 ] && echo " No models installed"
|
|
fi
|
|
|
|
echo ""
|
|
echo "=== Available Presets ==="
|
|
uci show localai 2>/dev/null | grep "=preset" | while read line; do
|
|
local section=$(echo "$line" | cut -d. -f2 | cut -d= -f1)
|
|
local name=$(uci_get "$section.name")
|
|
local desc=$(uci_get "$section.description")
|
|
local size=$(uci_get "$section.size")
|
|
[ -n "$name" ] && echo " $name - $desc ($size)"
|
|
done
|
|
}
|
|
|
|
cmd_model_install() {
|
|
load_config
|
|
require_root
|
|
|
|
local model_name="$1"
|
|
[ -z "$model_name" ] && { echo "Usage: localaictl model-install <name>"; return 1; }
|
|
|
|
mkdir -p "$models_path"
|
|
|
|
# Find preset
|
|
local preset_url="" preset_file=""
|
|
for section in $(uci show localai 2>/dev/null | grep "=preset" | cut -d. -f2 | cut -d= -f1); do
|
|
if [ "$(uci_get "$section.name")" = "$model_name" ]; then
|
|
preset_url=$(uci_get "$section.url")
|
|
preset_file=$(basename "$preset_url")
|
|
break
|
|
fi
|
|
done
|
|
|
|
if [ -n "$preset_url" ]; then
|
|
log_info "Installing model: $model_name"
|
|
log_info "URL: $preset_url"
|
|
|
|
if wget --show-progress -O "$models_path/$preset_file" "$preset_url"; then
|
|
cat > "$models_path/$model_name.yaml" << EOF
|
|
name: $model_name
|
|
backend: llama-cpp
|
|
parameters:
|
|
model: $preset_file
|
|
context_size: $context_size
|
|
threads: $threads
|
|
EOF
|
|
log_info "Model installed: $model_name"
|
|
log_info "Restart LocalAI to load: /etc/init.d/localai restart"
|
|
else
|
|
log_error "Download failed"
|
|
return 1
|
|
fi
|
|
elif echo "$model_name" | grep -q "^http"; then
|
|
local filename=$(basename "$model_name")
|
|
log_info "Downloading: $model_name"
|
|
wget --show-progress -O "$models_path/$filename" "$model_name" || return 1
|
|
log_info "Model installed: $filename"
|
|
else
|
|
log_error "Unknown model: $model_name"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
cmd_model_remove() {
|
|
load_config
|
|
require_root
|
|
|
|
local model_name="$1"
|
|
[ -z "$model_name" ] && { echo "Usage: localaictl model-remove <name>"; return 1; }
|
|
|
|
local found=0
|
|
for ext in gguf bin onnx yaml; do
|
|
[ -f "$models_path/$model_name.$ext" ] && rm -f "$models_path/$model_name.$ext" && found=1
|
|
done
|
|
|
|
for file in "$models_path"/*"$model_name"*; do
|
|
[ -f "$file" ] && rm -f "$file" && found=1
|
|
done
|
|
|
|
[ "$found" -eq 1 ] && log_info "Model removed: $model_name" || log_warn "Model not found: $model_name"
|
|
}
|
|
|
|
# =============================================================================
|
|
# MAIN
|
|
# =============================================================================
|
|
|
|
case "${1:-}" in
|
|
install) shift; cmd_install "$@" ;;
|
|
check) cmd_check ;;
|
|
update) cmd_update ;;
|
|
status) cmd_status ;;
|
|
logs) shift; cmd_logs "$@" ;;
|
|
shell) cmd_shell ;;
|
|
models) cmd_models ;;
|
|
model-install) shift; cmd_model_install "$@" ;;
|
|
model-remove) shift; cmd_model_remove "$@" ;;
|
|
service-run) cmd_run ;;
|
|
service-stop) cmd_stop ;;
|
|
help|--help|-h|'') usage ;;
|
|
*) echo "Unknown: $1" >&2; usage >&2; exit 1 ;;
|
|
esac
|