secubox-openwrt/package/secubox/secubox-app-ollama/files/etc/init.d/ollama
CyberMind-FR b245fdb3e7 feat(localai,ollama): Switch LocalAI to Docker and add Ollama package
LocalAI changes:
- Rewrite localaictl to use Docker/Podman instead of standalone binary
- Use localai/localai:v2.25.0-ffmpeg image with all backends included
- Fix llama-cpp backend not found issue
- Auto-detect podman or docker runtime
- Update UCI config with Docker settings

New Ollama package:
- Add secubox-app-ollama as lighter alternative to LocalAI
- Native ARM64 support with backends included
- Simple CLI: ollamactl pull/run/list
- Docker image ~1GB vs 2-4GB for LocalAI

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-21 17:56:40 +01:00

41 lines
666 B
Bash

#!/bin/sh /etc/rc.common
# SecuBox Ollama - Local LLM runtime
# Copyright (C) 2025 CyberMind.fr
START=95
STOP=10
USE_PROCD=1
PROG=/usr/sbin/ollamactl
start_service() {
local enabled
config_load ollama
config_get enabled main enabled '0'
[ "$enabled" = "1" ] || {
echo "Ollama is disabled. Enable with: uci set ollama.main.enabled=1"
return 0
}
procd_open_instance
procd_set_param command $PROG service-run
procd_set_param respawn 3600 5 5
procd_set_param stdout 1
procd_set_param stderr 1
procd_close_instance
}
stop_service() {
$PROG service-stop
}
service_triggers() {
procd_add_reload_trigger "ollama"
}
reload_service() {
stop
start
}