#!/bin/sh /etc/rc.common
# LocalAI-WB init script
# Copyright (C) 2025 CyberMind.fr

START=99
STOP=10
USE_PROCD=1

PROG=/usr/bin/local-ai-wb
CONFIG=localai-wb
BACKEND_ASSETS=/usr/share/localai/backend-assets

start_service() {
	local enabled
	config_load "$CONFIG"
	config_get enabled main enabled '0'

	[ "$enabled" = "1" ] || return 0

	# Find binary
	local binary=""
	if [ -x "$PROG" ]; then
		binary="$PROG"
	elif [ -x "/opt/localai/bin/local-ai" ]; then
		binary="/opt/localai/bin/local-ai"
	elif [ -x "/usr/bin/local-ai" ]; then
		binary="/usr/bin/local-ai"
	else
		logger -t localai-wb -p err "LocalAI binary not found"
		return 1
	fi

	# Load configuration
	local api_port api_host models_path threads context_size debug cors
	config_get api_port main api_port '8080'
	config_get api_host main api_host '0.0.0.0'
	config_get models_path main models_path '/srv/localai/models'
	config_get threads main threads '4'
	config_get context_size main context_size '2048'
	config_get debug main debug '0'
	config_get cors main cors '1'

	# Ensure models directory exists
	mkdir -p "$models_path"

	# Build command arguments
	local args="--address ${api_host}:${api_port}"
	args="$args --models-path $models_path"
	args="$args --threads $threads"
	args="$args --context-size $context_size"
	[ "$cors" = "1" ] && args="$args --cors"
	[ "$debug" = "1" ] && args="$args --debug"

	# Point to backend assets if they exist
	if [ -d "$BACKEND_ASSETS" ]; then
		args="$args --backend-assets-path $BACKEND_ASSETS"
	fi

	procd_open_instance
	procd_set_param command "$binary" $args
	procd_set_param respawn ${respawn_threshold:-3600} ${respawn_timeout:-5} ${respawn_retry:-5}
	procd_set_param stdout 1
	procd_set_param stderr 1
	procd_set_param pidfile /var/run/localai-wb.pid
	procd_close_instance

	logger -t localai-wb "Started LocalAI on ${api_host}:${api_port}"
}

stop_service() {
	logger -t localai-wb "Stopping LocalAI"
}

service_triggers() {
	procd_add_reload_trigger "$CONFIG"
}

reload_service() {
	stop
	start
}
