The bare `except:` clause catches SystemExit which is raised by sys.exit(0), causing the script to fall through to sys.exit(1). Changed to `except Exception:` which doesn't catch SystemExit, allowing proper exit code propagation. Also: - Simplified Python extraction script - Use double quotes for string literals (shell compatibility) - Write Python script to temp file instead of heredoc (RPCD stdin conflict) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2320 lines
62 KiB
Bash
Executable File
2320 lines
62 KiB
Bash
Executable File
#!/bin/sh
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
# LuCI RPC backend for Streamlit Platform
|
|
# Copyright (C) 2025 CyberMind.fr
|
|
|
|
. /lib/functions.sh
|
|
. /usr/share/libubox/jshn.sh
|
|
|
|
# Extract ZIP with flatten for single root directories
|
|
extract_zip_flatten() {
|
|
local zip_file="$1"
|
|
local target_dir="$2"
|
|
local tmpextract="/tmp/streamlit_extract_$$"
|
|
|
|
mkdir -p "$tmpextract" "$target_dir"
|
|
unzip -o "$zip_file" -d "$tmpextract" >/dev/null 2>&1
|
|
|
|
local root_items=$(ls -1 "$tmpextract" 2>/dev/null | wc -l)
|
|
if [ "$root_items" = "1" ]; then
|
|
local single_dir="$tmpextract/$(ls -1 "$tmpextract" | head -1)"
|
|
if [ -d "$single_dir" ]; then
|
|
mv "$single_dir"/* "$target_dir/" 2>/dev/null
|
|
mv "$single_dir"/.* "$target_dir/" 2>/dev/null
|
|
else
|
|
mv "$single_dir" "$target_dir/"
|
|
fi
|
|
else
|
|
mv "$tmpextract"/* "$target_dir/" 2>/dev/null
|
|
mv "$tmpextract"/.* "$target_dir/" 2>/dev/null
|
|
fi
|
|
rm -rf "$tmpextract"
|
|
}
|
|
|
|
CONFIG="streamlit"
|
|
LXC_NAME="streamlit"
|
|
LXC_PATH="/srv/lxc"
|
|
APPS_PATH="/srv/streamlit/apps"
|
|
|
|
# JSON helpers
|
|
json_init_obj() { json_init; json_add_object "result"; }
|
|
json_close_obj() { json_close_object; json_dump; }
|
|
|
|
json_error() {
|
|
json_init_obj
|
|
json_add_boolean "success" 0
|
|
json_add_string "message" "$1"
|
|
json_close_obj
|
|
}
|
|
|
|
json_success() {
|
|
json_init_obj
|
|
json_add_boolean "success" 1
|
|
[ -n "$1" ] && json_add_string "message" "$1"
|
|
json_close_obj
|
|
}
|
|
|
|
# Check if container is running
|
|
lxc_running() {
|
|
lxc-info -n "$LXC_NAME" -s 2>/dev/null | grep -q "RUNNING"
|
|
}
|
|
|
|
# Check if container exists
|
|
lxc_exists() {
|
|
[ -f "$LXC_PATH/$LXC_NAME/config" ] && [ -d "$LXC_PATH/$LXC_NAME/rootfs" ]
|
|
}
|
|
|
|
# Get service status
|
|
get_status() {
|
|
local enabled running installed uptime
|
|
local http_port data_path memory_limit active_app
|
|
|
|
config_load "$CONFIG"
|
|
config_get enabled main enabled "0"
|
|
config_get http_port main http_port "8501"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
config_get memory_limit main memory_limit "512M"
|
|
config_get active_app main active_app "hello"
|
|
|
|
running="false"
|
|
installed="false"
|
|
uptime=""
|
|
|
|
if lxc_exists; then
|
|
installed="true"
|
|
fi
|
|
|
|
if lxc_running; then
|
|
running="true"
|
|
uptime=$(lxc-info -n "$LXC_NAME" 2>/dev/null | grep -i "cpu use" | head -1 | awk '{print $3}')
|
|
fi
|
|
|
|
# Count apps
|
|
local app_count=0
|
|
APPS_PATH="$data_path/apps"
|
|
if [ -d "$APPS_PATH" ]; then
|
|
app_count=$(ls -1 "$APPS_PATH"/*.py 2>/dev/null | wc -l)
|
|
fi
|
|
|
|
# Get LAN IP for URL
|
|
local lan_ip
|
|
lan_ip=$(uci -q get network.lan.ipaddr || echo "192.168.1.1")
|
|
|
|
json_init_obj
|
|
json_add_boolean "enabled" "$( [ "$enabled" = "1" ] && echo 1 || echo 0 )"
|
|
json_add_boolean "running" "$( [ "$running" = "true" ] && echo 1 || echo 0 )"
|
|
json_add_boolean "installed" "$( [ "$installed" = "true" ] && echo 1 || echo 0 )"
|
|
json_add_string "uptime" "$uptime"
|
|
json_add_int "http_port" "$http_port"
|
|
json_add_string "data_path" "$data_path"
|
|
json_add_string "memory_limit" "$memory_limit"
|
|
json_add_string "active_app" "$active_app"
|
|
json_add_int "app_count" "$app_count"
|
|
json_add_string "web_url" "http://${lan_ip}:${http_port}"
|
|
json_add_string "container_name" "$LXC_NAME"
|
|
json_close_obj
|
|
}
|
|
|
|
# Get configuration
|
|
get_config() {
|
|
local http_port http_host data_path memory_limit enabled active_app
|
|
local headless gather_stats theme_base theme_primary
|
|
|
|
config_load "$CONFIG"
|
|
|
|
# Main settings
|
|
config_get http_port main http_port "8501"
|
|
config_get http_host main http_host "0.0.0.0"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
config_get memory_limit main memory_limit "512M"
|
|
config_get enabled main enabled "0"
|
|
config_get active_app main active_app "hello"
|
|
|
|
# Server settings
|
|
config_get headless server headless "true"
|
|
config_get gather_stats server browser_gather_usage_stats "false"
|
|
config_get theme_base server theme_base "dark"
|
|
config_get theme_primary server theme_primary_color "#0ff"
|
|
|
|
json_init_obj
|
|
json_add_object "main"
|
|
json_add_boolean "enabled" "$( [ "$enabled" = "1" ] && echo 1 || echo 0 )"
|
|
json_add_int "http_port" "$http_port"
|
|
json_add_string "http_host" "$http_host"
|
|
json_add_string "data_path" "$data_path"
|
|
json_add_string "memory_limit" "$memory_limit"
|
|
json_add_string "active_app" "$active_app"
|
|
json_close_object
|
|
|
|
json_add_object "server"
|
|
json_add_boolean "headless" "$( [ "$headless" = "true" ] && echo 1 || echo 0 )"
|
|
json_add_boolean "browser_gather_usage_stats" "$( [ "$gather_stats" = "true" ] && echo 1 || echo 0 )"
|
|
json_add_string "theme_base" "$theme_base"
|
|
json_add_string "theme_primary_color" "$theme_primary"
|
|
json_close_object
|
|
|
|
json_close_obj
|
|
}
|
|
|
|
# Save configuration
|
|
save_config() {
|
|
read -r input
|
|
|
|
local http_port http_host data_path memory_limit enabled active_app
|
|
local headless gather_stats theme_base theme_primary
|
|
|
|
http_port=$(echo "$input" | jsonfilter -e '@.http_port' 2>/dev/null)
|
|
http_host=$(echo "$input" | jsonfilter -e '@.http_host' 2>/dev/null)
|
|
data_path=$(echo "$input" | jsonfilter -e '@.data_path' 2>/dev/null)
|
|
memory_limit=$(echo "$input" | jsonfilter -e '@.memory_limit' 2>/dev/null)
|
|
enabled=$(echo "$input" | jsonfilter -e '@.enabled' 2>/dev/null)
|
|
active_app=$(echo "$input" | jsonfilter -e '@.active_app' 2>/dev/null)
|
|
headless=$(echo "$input" | jsonfilter -e '@.headless' 2>/dev/null)
|
|
gather_stats=$(echo "$input" | jsonfilter -e '@.browser_gather_usage_stats' 2>/dev/null)
|
|
theme_base=$(echo "$input" | jsonfilter -e '@.theme_base' 2>/dev/null)
|
|
theme_primary=$(echo "$input" | jsonfilter -e '@.theme_primary_color' 2>/dev/null)
|
|
|
|
[ -n "$http_port" ] && uci set "${CONFIG}.main.http_port=$http_port"
|
|
[ -n "$http_host" ] && uci set "${CONFIG}.main.http_host=$http_host"
|
|
[ -n "$data_path" ] && uci set "${CONFIG}.main.data_path=$data_path"
|
|
[ -n "$memory_limit" ] && uci set "${CONFIG}.main.memory_limit=$memory_limit"
|
|
[ -n "$enabled" ] && uci set "${CONFIG}.main.enabled=$enabled"
|
|
[ -n "$active_app" ] && uci set "${CONFIG}.main.active_app=$active_app"
|
|
[ -n "$headless" ] && uci set "${CONFIG}.server.headless=$headless"
|
|
[ -n "$gather_stats" ] && uci set "${CONFIG}.server.browser_gather_usage_stats=$gather_stats"
|
|
[ -n "$theme_base" ] && uci set "${CONFIG}.server.theme_base=$theme_base"
|
|
[ -n "$theme_primary" ] && uci set "${CONFIG}.server.theme_primary_color=$theme_primary"
|
|
|
|
uci commit "$CONFIG"
|
|
|
|
json_success "Configuration saved"
|
|
}
|
|
|
|
# Start service
|
|
start_service() {
|
|
if lxc_running; then
|
|
json_error "Service is already running"
|
|
return
|
|
fi
|
|
|
|
if ! lxc_exists; then
|
|
json_error "Container not installed. Run install first."
|
|
return
|
|
fi
|
|
|
|
/etc/init.d/streamlit start >/dev/null 2>&1 &
|
|
|
|
sleep 2
|
|
if lxc_running; then
|
|
json_success "Service started"
|
|
else
|
|
json_error "Failed to start service"
|
|
fi
|
|
}
|
|
|
|
# Stop service
|
|
stop_service() {
|
|
if ! lxc_running; then
|
|
json_error "Service is not running"
|
|
return
|
|
fi
|
|
|
|
/etc/init.d/streamlit stop >/dev/null 2>&1
|
|
|
|
sleep 2
|
|
if ! lxc_running; then
|
|
json_success "Service stopped"
|
|
else
|
|
json_error "Failed to stop service"
|
|
fi
|
|
}
|
|
|
|
# Restart service
|
|
restart_service() {
|
|
/etc/init.d/streamlit restart >/dev/null 2>&1 &
|
|
|
|
sleep 3
|
|
if lxc_running; then
|
|
json_success "Service restarted"
|
|
else
|
|
json_error "Service restart failed"
|
|
fi
|
|
}
|
|
|
|
# Install Streamlit
|
|
install() {
|
|
if lxc_exists; then
|
|
json_error "Already installed. Use update to refresh."
|
|
return
|
|
fi
|
|
|
|
# Run install in background
|
|
/usr/sbin/streamlitctl install >/var/log/streamlit-install.log 2>&1 &
|
|
|
|
json_init_obj
|
|
json_add_boolean "started" 1
|
|
json_add_string "message" "Installation started in background"
|
|
json_add_string "log_file" "/var/log/streamlit-install.log"
|
|
json_close_obj
|
|
}
|
|
|
|
# Uninstall Streamlit
|
|
uninstall() {
|
|
/usr/sbin/streamlitctl uninstall >/dev/null 2>&1
|
|
|
|
if ! lxc_exists; then
|
|
json_success "Uninstalled successfully"
|
|
else
|
|
json_error "Uninstall failed"
|
|
fi
|
|
}
|
|
|
|
# Update Streamlit
|
|
update() {
|
|
if ! lxc_exists; then
|
|
json_error "Not installed. Run install first."
|
|
return
|
|
fi
|
|
|
|
# Run update in background
|
|
/usr/sbin/streamlitctl update >/var/log/streamlit-update.log 2>&1 &
|
|
|
|
json_init_obj
|
|
json_add_boolean "started" 1
|
|
json_add_string "message" "Update started in background"
|
|
json_add_string "log_file" "/var/log/streamlit-update.log"
|
|
json_close_obj
|
|
}
|
|
|
|
# Get logs
|
|
get_logs() {
|
|
read -r input
|
|
local lines
|
|
lines=$(echo "$input" | jsonfilter -e '@.lines' 2>/dev/null)
|
|
[ -z "$lines" ] && lines=100
|
|
|
|
local data_path
|
|
config_load "$CONFIG"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
|
|
json_init_obj
|
|
json_add_array "logs"
|
|
|
|
# Get container logs from data path
|
|
if [ -d "$data_path/logs" ]; then
|
|
local logfile
|
|
for logfile in "$data_path/logs"/*.log; do
|
|
[ -f "$logfile" ] || continue
|
|
tail -n "$lines" "$logfile" 2>/dev/null | while IFS= read -r line; do
|
|
json_add_string "" "$line"
|
|
done
|
|
done
|
|
fi
|
|
|
|
# Also check install/update logs
|
|
for logfile in /var/log/streamlit-install.log /var/log/streamlit-update.log; do
|
|
[ -f "$logfile" ] || continue
|
|
tail -n 50 "$logfile" 2>/dev/null | while IFS= read -r line; do
|
|
json_add_string "" "$line"
|
|
done
|
|
done
|
|
|
|
json_close_array
|
|
json_close_obj
|
|
}
|
|
|
|
# List apps
|
|
list_apps() {
|
|
local data_path active_app
|
|
config_load "$CONFIG"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
config_get active_app main active_app "hello"
|
|
|
|
APPS_PATH="$data_path/apps"
|
|
local seen=""
|
|
|
|
json_init_obj
|
|
json_add_array "apps"
|
|
|
|
if [ -d "$APPS_PATH" ]; then
|
|
# Scan top-level .py files
|
|
for app in "$APPS_PATH"/*.py; do
|
|
[ -f "$app" ] || continue
|
|
local name=$(basename "$app" .py)
|
|
local display_name=$(uci -q get "${CONFIG}.${name}.name")
|
|
[ -z "$display_name" ] && display_name="$name"
|
|
local size=$(ls -la "$app" 2>/dev/null | awk '{print $5}')
|
|
local mtime=$(stat -c %Y "$app" 2>/dev/null || echo "0")
|
|
|
|
local is_active=0
|
|
[ "$name" = "$active_app" ] && is_active=1
|
|
seen="$seen $name "
|
|
|
|
json_add_object ""
|
|
json_add_string "id" "$name"
|
|
json_add_string "name" "$display_name"
|
|
json_add_string "path" "$app"
|
|
json_add_string "size" "$size"
|
|
json_add_int "mtime" "$mtime"
|
|
json_add_boolean "active" "$is_active"
|
|
json_close_object
|
|
done
|
|
|
|
# Scan subdirectories (ZIP-uploaded apps)
|
|
for dir in "$APPS_PATH"/*/; do
|
|
[ -d "$dir" ] || continue
|
|
local dirname=$(basename "$dir")
|
|
# Skip Streamlit multi-page convention dir and hidden dirs
|
|
case "$dirname" in pages|.*) continue ;; esac
|
|
# Skip if already seen as a top-level .py
|
|
case "$seen" in *" $dirname "*) continue ;; esac
|
|
|
|
# Prefer app.py as main entry point, fall back to first .py
|
|
local main_py=""
|
|
[ -f "$dir/app.py" ] && main_py="$dir/app.py"
|
|
[ -z "$main_py" ] && main_py=$(find "$dir" -maxdepth 1 -name "*.py" -type f | head -1)
|
|
[ -z "$main_py" ] && main_py=$(find "$dir" -maxdepth 2 -name "*.py" -type f | head -1)
|
|
[ -z "$main_py" ] && continue
|
|
|
|
local display_name=$(uci -q get "${CONFIG}.${dirname}.name")
|
|
[ -z "$display_name" ] && display_name="$dirname"
|
|
local size=$(stat -c %s "$main_py" 2>/dev/null || echo "0")
|
|
local mtime=$(stat -c %Y "$main_py" 2>/dev/null || echo "0")
|
|
|
|
local is_active=0
|
|
[ "$dirname" = "$active_app" ] && is_active=1
|
|
|
|
json_add_object ""
|
|
json_add_string "id" "$dirname"
|
|
json_add_string "name" "$display_name"
|
|
json_add_string "path" "$main_py"
|
|
json_add_string "size" "$size"
|
|
json_add_int "mtime" "$mtime"
|
|
json_add_boolean "active" "$is_active"
|
|
json_close_object
|
|
done
|
|
fi
|
|
|
|
json_close_array
|
|
json_add_string "active_app" "$active_app"
|
|
json_add_string "apps_path" "$APPS_PATH"
|
|
json_close_obj
|
|
}
|
|
|
|
# Add app
|
|
add_app() {
|
|
read -r input
|
|
local name path
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
path=$(echo "$input" | jsonfilter -e '@.path' 2>/dev/null)
|
|
|
|
if [ -z "$name" ] || [ -z "$path" ]; then
|
|
json_error "Missing name or path"
|
|
return
|
|
fi
|
|
|
|
# Sanitize name for UCI
|
|
name=$(echo "$name" | sed 's/[^a-zA-Z0-9_]/_/g; s/^_*//; s/_*$//')
|
|
|
|
uci set "${CONFIG}.${name}=app"
|
|
uci set "${CONFIG}.${name}.name=$name"
|
|
uci set "${CONFIG}.${name}.path=$path"
|
|
uci set "${CONFIG}.${name}.enabled=1"
|
|
uci commit "$CONFIG"
|
|
|
|
json_success "App added: $name"
|
|
}
|
|
|
|
# Remove app
|
|
remove_app() {
|
|
read -r input
|
|
local name
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
|
|
if [ -z "$name" ]; then
|
|
json_error "Missing app name"
|
|
return
|
|
fi
|
|
|
|
local data_path
|
|
config_load "$CONFIG"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
|
|
# Remove app files (top-level .py or subdirectory)
|
|
if [ -f "$data_path/apps/${name}.py" ]; then
|
|
rm -f "$data_path/apps/${name}.py"
|
|
rm -f "$data_path/apps/${name}.requirements.txt"
|
|
fi
|
|
if [ -d "$data_path/apps/${name}" ]; then
|
|
rm -rf "$data_path/apps/${name}"
|
|
fi
|
|
|
|
# Remove UCI config
|
|
uci -q delete "${CONFIG}.${name}"
|
|
uci commit "$CONFIG"
|
|
|
|
json_success "App removed: $name"
|
|
}
|
|
|
|
# Set active app
|
|
set_active_app() {
|
|
read -r input
|
|
local name
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
|
|
if [ -z "$name" ]; then
|
|
json_error "Missing app name"
|
|
return
|
|
fi
|
|
|
|
uci set "${CONFIG}.main.active_app=$name"
|
|
uci commit "$CONFIG"
|
|
|
|
if [ $? -eq 0 ]; then
|
|
json_success "Active app set to: $name"
|
|
else
|
|
json_error "Failed to set active app"
|
|
fi
|
|
}
|
|
|
|
# Get app details
|
|
get_app() {
|
|
read -r input
|
|
local name
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
|
|
if [ -z "$name" ]; then
|
|
json_error "Missing app name"
|
|
return
|
|
fi
|
|
|
|
local data_path active_app
|
|
config_load "$CONFIG"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
config_get active_app main active_app "hello"
|
|
|
|
local app_file="$data_path/apps/${name}.py"
|
|
|
|
if [ ! -f "$app_file" ]; then
|
|
json_error "App not found"
|
|
return
|
|
fi
|
|
|
|
local size=$(ls -la "$app_file" 2>/dev/null | awk '{print $5}')
|
|
local mtime=$(stat -c %Y "$app_file" 2>/dev/null || echo "0")
|
|
local lines=$(wc -l < "$app_file" 2>/dev/null || echo "0")
|
|
|
|
local is_active=0
|
|
[ "$name" = "$active_app" ] && is_active=1
|
|
|
|
json_init_obj
|
|
json_add_string "name" "$name"
|
|
json_add_string "path" "$app_file"
|
|
json_add_string "size" "$size"
|
|
json_add_int "mtime" "$mtime"
|
|
json_add_int "lines" "$lines"
|
|
json_add_boolean "active" "$is_active"
|
|
json_close_obj
|
|
}
|
|
|
|
# Upload app (receive base64 content) - KISS: auto-detects ZIP or .py
|
|
# NOTE: uhttpd-mod-ubus has a 64KB JSON body limit.
|
|
# Small files (<40KB) go through RPC directly.
|
|
# Larger files use chunked upload: upload_chunk + upload_finalize.
|
|
upload_app() {
|
|
local tmpinput="/tmp/rpcd_upload_$$.json"
|
|
cat > "$tmpinput"
|
|
|
|
local name
|
|
name=$(jsonfilter -i "$tmpinput" -e '@.name' 2>/dev/null)
|
|
name=$(echo "$name" | sed 's/[^a-zA-Z0-9_]/_/g; s/^_*//; s/_*$//')
|
|
|
|
if [ -z "$name" ]; then
|
|
rm -f "$tmpinput"
|
|
json_error "Missing name"
|
|
return
|
|
fi
|
|
|
|
local b64file="/tmp/rpcd_b64_$$.txt"
|
|
jsonfilter -i "$tmpinput" -e '@.content' > "$b64file" 2>/dev/null
|
|
rm -f "$tmpinput"
|
|
|
|
if [ ! -s "$b64file" ]; then
|
|
rm -f "$b64file"
|
|
json_error "Missing content"
|
|
return
|
|
fi
|
|
|
|
local data_path
|
|
config_load "$CONFIG"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
mkdir -p "$data_path/apps"
|
|
|
|
# Decode content to temp file first
|
|
local tmpfile="/tmp/upload_${name}_$$.bin"
|
|
base64 -d < "$b64file" > "$tmpfile" 2>/dev/null
|
|
local rc=$?
|
|
rm -f "$b64file"
|
|
|
|
if [ $rc -ne 0 ] || [ ! -s "$tmpfile" ]; then
|
|
rm -f "$tmpfile"
|
|
json_error "Failed to decode content"
|
|
return
|
|
fi
|
|
|
|
# KISS: Auto-detect ZIP by magic bytes (PK = 0x504B)
|
|
local is_zip_file=0
|
|
local magic=$(head -c2 "$tmpfile" 2>/dev/null)
|
|
[ "$magic" = "PK" ] && is_zip_file=1
|
|
|
|
local app_file="$data_path/apps/${name}.py"
|
|
|
|
if [ "$is_zip_file" = "1" ]; then
|
|
# Extract app.py from ZIP archive
|
|
local tmpdir="/tmp/extract_${name}_$$"
|
|
mkdir -p "$tmpdir"
|
|
|
|
# Use Python to extract (write script to file to avoid stdin conflict with RPCD)
|
|
local pyscript="/tmp/extract_$$.py"
|
|
cat > "$pyscript" << 'PYEOF'
|
|
import zipfile, sys, os
|
|
tmpfile, app_file, tmpdir = sys.argv[1], sys.argv[2], sys.argv[3]
|
|
try:
|
|
z = zipfile.ZipFile(tmpfile)
|
|
app_py = None
|
|
req_txt = None
|
|
for n in z.namelist():
|
|
bn = os.path.basename(n)
|
|
if bn == "app.py":
|
|
app_py = n
|
|
elif bn == "requirements.txt":
|
|
req_txt = n
|
|
elif bn.endswith(".py") and not app_py:
|
|
app_py = n
|
|
if not app_py:
|
|
sys.exit(1)
|
|
content = z.read(app_py).decode("utf-8", errors="replace")
|
|
if not content.startswith("# -*- coding"):
|
|
content = "# -*- coding: utf-8 -*-\n" + content
|
|
with open(app_file, "w") as f:
|
|
f.write(content)
|
|
if req_txt:
|
|
try:
|
|
z.extract(req_txt, tmpdir)
|
|
except:
|
|
pass
|
|
sys.exit(0)
|
|
except Exception:
|
|
sys.exit(1)
|
|
PYEOF
|
|
python3 "$pyscript" "$tmpfile" "$app_file" "$tmpdir" 2>/dev/null
|
|
rc=$?
|
|
rm -f "$tmpfile" "$pyscript"
|
|
|
|
if [ $rc -ne 0 ] || [ ! -s "$app_file" ]; then
|
|
rm -rf "$tmpdir"
|
|
json_error "No Python file found in ZIP"
|
|
return
|
|
fi
|
|
|
|
# Install requirements if found
|
|
if [ -f "$tmpdir/requirements.txt" ] && lxc_running; then
|
|
cp "$tmpdir/requirements.txt" "$data_path/apps/${name}_requirements.txt"
|
|
lxc-attach -n "$LXC_NAME" -- pip3 install --break-system-packages \
|
|
-r "/srv/apps/${name}_requirements.txt" >/dev/null 2>&1 &
|
|
fi
|
|
rm -rf "$tmpdir"
|
|
else
|
|
# Plain .py file - add encoding declaration if needed
|
|
local first_line=$(head -c 50 "$tmpfile" 2>/dev/null)
|
|
if ! echo "$first_line" | grep -q "coding"; then
|
|
printf '# -*- coding: utf-8 -*-\n' > "$app_file"
|
|
cat "$tmpfile" >> "$app_file"
|
|
else
|
|
mv "$tmpfile" "$app_file"
|
|
fi
|
|
rm -f "$tmpfile"
|
|
fi
|
|
|
|
if [ ! -s "$app_file" ]; then
|
|
json_error "Failed to create app file"
|
|
return
|
|
fi
|
|
|
|
uci set "${CONFIG}.${name}=app"
|
|
uci set "${CONFIG}.${name}.name=$name"
|
|
uci set "${CONFIG}.${name}.path=${name}.py"
|
|
uci set "${CONFIG}.${name}.enabled=1"
|
|
uci commit "$CONFIG"
|
|
|
|
# Restart instance if running (for re-uploads)
|
|
if lxc_running; then
|
|
local port=$(uci -q get "${CONFIG}.${name}.port")
|
|
if [ -n "$port" ]; then
|
|
lxc-attach -n "$LXC_NAME" -- pkill -f "port=$port" 2>/dev/null
|
|
sleep 1
|
|
streamlitctl instance start "$name" >/dev/null 2>&1 &
|
|
fi
|
|
fi
|
|
|
|
# Auto-create Gitea repo and push (background)
|
|
streamlitctl gitea push "$name" >/dev/null 2>&1 &
|
|
json_success "App uploaded: $name"
|
|
}
|
|
|
|
# Chunked upload: receive a base64 chunk and append to temp file
|
|
upload_chunk() {
|
|
local tmpinput="/tmp/rpcd_chunk_$$.json"
|
|
cat > "$tmpinput"
|
|
|
|
local name chunk_data chunk_index
|
|
name=$(jsonfilter -i "$tmpinput" -e '@.name' 2>/dev/null)
|
|
chunk_data=$(jsonfilter -i "$tmpinput" -e '@.data' 2>/dev/null)
|
|
chunk_index=$(jsonfilter -i "$tmpinput" -e '@.index' 2>/dev/null)
|
|
rm -f "$tmpinput"
|
|
|
|
name=$(echo "$name" | sed 's/[^a-zA-Z0-9_]/_/g; s/^_*//; s/_*$//')
|
|
|
|
if [ -z "$name" ] || [ -z "$chunk_data" ]; then
|
|
json_error "Missing name or data"
|
|
return
|
|
fi
|
|
|
|
local staging="/tmp/streamlit_upload_${name}.b64"
|
|
|
|
# First chunk: create new file; subsequent: append
|
|
if [ "$chunk_index" = "0" ]; then
|
|
printf '%s' "$chunk_data" > "$staging"
|
|
else
|
|
printf '%s' "$chunk_data" >> "$staging"
|
|
fi
|
|
|
|
json_success "Chunk $chunk_index received"
|
|
}
|
|
|
|
# Finalize chunked upload: decode accumulated base64 and save
|
|
upload_finalize() {
|
|
local tmpinput="/tmp/rpcd_finalize_$$.json"
|
|
cat > "$tmpinput"
|
|
|
|
local name is_zip
|
|
name=$(jsonfilter -i "$tmpinput" -e '@.name' 2>/dev/null)
|
|
is_zip=$(jsonfilter -i "$tmpinput" -e '@.is_zip' 2>/dev/null)
|
|
rm -f "$tmpinput"
|
|
|
|
name=$(echo "$name" | sed 's/[^a-zA-Z0-9_]/_/g; s/^_*//; s/_*$//')
|
|
|
|
if [ -z "$name" ]; then
|
|
json_error "Missing name"
|
|
return
|
|
fi
|
|
|
|
local staging="/tmp/streamlit_upload_${name}.b64"
|
|
if [ ! -s "$staging" ]; then
|
|
json_error "No upload data found for $name"
|
|
return
|
|
fi
|
|
|
|
local data_path
|
|
config_load "$CONFIG"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
mkdir -p "$data_path/apps"
|
|
|
|
if [ "$is_zip" = "1" ] || [ "$is_zip" = "true" ]; then
|
|
# Decode as ZIP and extract
|
|
local tmpzip="/tmp/upload_${name}_$$.zip"
|
|
base64 -d < "$staging" > "$tmpzip" 2>/dev/null
|
|
rm -f "$staging"
|
|
|
|
if [ ! -s "$tmpzip" ]; then
|
|
rm -f "$tmpzip"
|
|
json_error "Failed to decode ZIP"
|
|
return
|
|
fi
|
|
|
|
local app_dir="$data_path/apps/$name"
|
|
mkdir -p "$app_dir"
|
|
extract_zip_flatten "$tmpzip" "$app_dir"
|
|
rm -f "$tmpzip"
|
|
|
|
local main_py
|
|
main_py=$(find "$app_dir" -maxdepth 2 -name "*.py" -type f | head -1)
|
|
if [ -n "$main_py" ]; then
|
|
# Install requirements if found (requirements.txt or requirements*.txt)
|
|
if lxc_running; then
|
|
local req_file=""
|
|
if [ -f "$app_dir/requirements.txt" ]; then
|
|
req_file="requirements.txt"
|
|
else
|
|
req_file=$(ls -1 "$app_dir"/requirements*.txt 2>/dev/null | head -1 | xargs basename 2>/dev/null)
|
|
fi
|
|
if [ -n "$req_file" ]; then
|
|
lxc-attach -n "$LXC_NAME" -- pip3 install --break-system-packages -r "/srv/apps/${name}/${req_file}" >/dev/null 2>&1 &
|
|
fi
|
|
fi
|
|
uci set "${CONFIG}.${name}=app"
|
|
uci set "${CONFIG}.${name}.name=$name"
|
|
uci set "${CONFIG}.${name}.path=$main_py"
|
|
uci set "${CONFIG}.${name}.enabled=1"
|
|
uci commit "$CONFIG"
|
|
# Auto-push to Gitea if configured (background)
|
|
streamlitctl gitea push "$name" >/dev/null 2>&1 &
|
|
json_success "ZIP app deployed: $name"
|
|
else
|
|
json_error "No Python files found in archive"
|
|
fi
|
|
else
|
|
# Decode as .py file
|
|
local app_file="$data_path/apps/${name}.py"
|
|
base64 -d < "$staging" > "$app_file" 2>/dev/null
|
|
local rc=$?
|
|
rm -f "$staging"
|
|
|
|
if [ $rc -eq 0 ] && [ -s "$app_file" ]; then
|
|
uci set "${CONFIG}.${name}=app"
|
|
uci set "${CONFIG}.${name}.name=$name"
|
|
uci set "${CONFIG}.${name}.path=${name}.py"
|
|
uci set "${CONFIG}.${name}.enabled=1"
|
|
uci commit "$CONFIG"
|
|
# Auto-push to Gitea if configured (background)
|
|
streamlitctl gitea push "$name" >/dev/null 2>&1 &
|
|
json_success "App uploaded: $name"
|
|
else
|
|
rm -f "$app_file"
|
|
json_error "Failed to decode app content"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
# List instances
|
|
list_instances() {
|
|
json_init_obj
|
|
json_add_array "instances"
|
|
|
|
config_load "$CONFIG"
|
|
|
|
_add_instance_json() {
|
|
local section="$1"
|
|
local name app port enabled autostart inst_name
|
|
|
|
config_get inst_name "$section" name ""
|
|
config_get app "$section" app ""
|
|
config_get port "$section" port ""
|
|
config_get enabled "$section" enabled "0"
|
|
config_get autostart "$section" autostart "0"
|
|
|
|
[ -z "$app" ] && return
|
|
|
|
json_add_object ""
|
|
json_add_string "id" "$section"
|
|
json_add_string "name" "$inst_name"
|
|
json_add_string "app" "$app"
|
|
json_add_int "port" "$port"
|
|
json_add_boolean "enabled" "$( [ "$enabled" = "1" ] && echo 1 || echo 0 )"
|
|
json_add_boolean "autostart" "$( [ "$autostart" = "1" ] && echo 1 || echo 0 )"
|
|
json_close_object
|
|
}
|
|
|
|
config_foreach _add_instance_json instance
|
|
|
|
json_close_array
|
|
json_close_obj
|
|
}
|
|
|
|
# Add instance
|
|
add_instance() {
|
|
read -r input
|
|
local id name app port
|
|
id=$(echo "$input" | jsonfilter -e '@.id' 2>/dev/null)
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
app=$(echo "$input" | jsonfilter -e '@.app' 2>/dev/null)
|
|
port=$(echo "$input" | jsonfilter -e '@.port' 2>/dev/null)
|
|
|
|
if [ -z "$id" ] || [ -z "$app" ] || [ -z "$port" ]; then
|
|
json_error "Missing id, app, or port"
|
|
return
|
|
fi
|
|
|
|
[ -z "$name" ] && name="$id"
|
|
|
|
# Validate port number
|
|
if ! echo "$port" | grep -qE '^[0-9]+$'; then
|
|
json_error "Invalid port number"
|
|
return
|
|
fi
|
|
|
|
# Check if instance already exists
|
|
local existing
|
|
existing=$(uci -q get "${CONFIG}.${id}")
|
|
if [ -n "$existing" ]; then
|
|
json_error "Instance $id already exists"
|
|
return
|
|
fi
|
|
|
|
uci set "${CONFIG}.${id}=instance"
|
|
uci set "${CONFIG}.${id}.name=$name"
|
|
uci set "${CONFIG}.${id}.app=$app"
|
|
uci set "${CONFIG}.${id}.port=$port"
|
|
uci set "${CONFIG}.${id}.enabled=1"
|
|
uci set "${CONFIG}.${id}.autostart=1"
|
|
uci commit "$CONFIG"
|
|
|
|
json_success "Instance added: $id"
|
|
}
|
|
|
|
# Remove instance
|
|
remove_instance() {
|
|
read -r input
|
|
local id
|
|
id=$(echo "$input" | jsonfilter -e '@.id' 2>/dev/null)
|
|
|
|
if [ -z "$id" ]; then
|
|
json_error "Missing instance id"
|
|
return
|
|
fi
|
|
|
|
# Check if instance exists
|
|
local existing
|
|
existing=$(uci -q get "${CONFIG}.${id}")
|
|
if [ -z "$existing" ]; then
|
|
json_error "Instance $id not found"
|
|
return
|
|
fi
|
|
|
|
uci delete "${CONFIG}.${id}"
|
|
uci commit "$CONFIG"
|
|
|
|
json_success "Instance removed: $id"
|
|
}
|
|
|
|
# Rename app (updates both display name and file/folder name)
|
|
rename_app() {
|
|
read -r input
|
|
local id new_name new_id
|
|
id=$(echo "$input" | jsonfilter -e '@.id' 2>/dev/null)
|
|
new_name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
new_id=$(echo "$input" | jsonfilter -e '@.new_id' 2>/dev/null)
|
|
|
|
if [ -z "$id" ] || [ -z "$new_name" ]; then
|
|
json_error "Missing id or name"
|
|
return
|
|
fi
|
|
|
|
# If new_id not provided, sanitize new_name to create it
|
|
[ -z "$new_id" ] && new_id=$(echo "$new_name" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9]/_/g')
|
|
|
|
local data_path=$(uci -q get "${CONFIG}.main.data_path")
|
|
[ -z "$data_path" ] && data_path="/srv/streamlit"
|
|
local apps_path="$data_path/apps"
|
|
|
|
# Check if renaming filesystem (folder or file)
|
|
local old_path=""
|
|
local new_path=""
|
|
if [ -d "$apps_path/$id" ]; then
|
|
old_path="$apps_path/$id"
|
|
new_path="$apps_path/$new_id"
|
|
elif [ -f "$apps_path/${id}.py" ]; then
|
|
old_path="$apps_path/${id}.py"
|
|
new_path="$apps_path/${new_id}.py"
|
|
fi
|
|
|
|
# Rename filesystem if different
|
|
if [ -n "$old_path" ] && [ "$id" != "$new_id" ]; then
|
|
if [ -e "$new_path" ]; then
|
|
json_error "Destination already exists: $new_id"
|
|
return
|
|
fi
|
|
mv "$old_path" "$new_path"
|
|
fi
|
|
|
|
# Update display name in UCI
|
|
local existing
|
|
existing=$(uci -q get "${CONFIG}.${id}")
|
|
if [ -z "$existing" ]; then
|
|
uci set "${CONFIG}.${id}=app"
|
|
uci set "${CONFIG}.${id}.enabled=1"
|
|
fi
|
|
uci set "${CONFIG}.${id}.name=$new_name"
|
|
|
|
# If id changed, update all instance references
|
|
if [ "$id" != "$new_id" ]; then
|
|
config_load "$CONFIG"
|
|
_update_instance_refs() {
|
|
local section="$1"
|
|
local app
|
|
app=$(uci -q get "${CONFIG}.${section}.app")
|
|
if [ "$app" = "$id" ] || [ "$app" = "${id}.py" ]; then
|
|
# Update to new app reference
|
|
if [ -d "$new_path" ]; then
|
|
uci set "${CONFIG}.${section}.app=$new_id"
|
|
else
|
|
uci set "${CONFIG}.${section}.app=${new_id}.py"
|
|
fi
|
|
fi
|
|
}
|
|
config_foreach _update_instance_refs instance
|
|
fi
|
|
|
|
uci commit "$CONFIG"
|
|
|
|
# Auto-push to Gitea if configured
|
|
local gitea_enabled=$(uci -q get "${CONFIG}.gitea.enabled")
|
|
if [ "$gitea_enabled" = "1" ]; then
|
|
streamlitctl gitea push "$new_id" >/dev/null 2>&1 &
|
|
fi
|
|
|
|
json_init_obj
|
|
json_add_boolean "success" 1
|
|
json_add_string "message" "App renamed"
|
|
json_add_string "old_id" "$id"
|
|
json_add_string "new_id" "$new_id"
|
|
json_add_string "name" "$new_name"
|
|
json_close_obj
|
|
}
|
|
|
|
# Rename instance (updates display name and optionally domain/vhost)
|
|
rename_instance() {
|
|
read -r input
|
|
local id name new_domain
|
|
id=$(echo "$input" | jsonfilter -e '@.id' 2>/dev/null)
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
new_domain=$(echo "$input" | jsonfilter -e '@.domain' 2>/dev/null)
|
|
|
|
if [ -z "$id" ] || [ -z "$name" ]; then
|
|
json_error "Missing id or name"
|
|
return
|
|
fi
|
|
|
|
local existing
|
|
existing=$(uci -q get "${CONFIG}.${id}")
|
|
if [ -z "$existing" ]; then
|
|
json_error "Instance $id not found"
|
|
return
|
|
fi
|
|
|
|
# Update display name
|
|
uci set "${CONFIG}.${id}.name=$name"
|
|
|
|
# If new domain provided and instance is emancipated, update vhost
|
|
local emancipated=$(uci -q get "${CONFIG}.${id}.emancipated")
|
|
local old_domain=$(uci -q get "${CONFIG}.${id}.domain")
|
|
local port=$(uci -q get "${CONFIG}.${id}.port")
|
|
|
|
if [ "$emancipated" = "1" ] && [ -n "$new_domain" ] && [ "$new_domain" != "$old_domain" ]; then
|
|
# Use tr '.-' '_' to match streamlitctl vhost naming
|
|
local old_vhost=$(echo "$old_domain" | tr '.-' '_')
|
|
local new_vhost=$(echo "$new_domain" | tr '.-' '_')
|
|
local backend_name="streamlit_${id}"
|
|
|
|
# Remove old vhost and cert entries
|
|
uci delete "haproxy.${old_vhost}" 2>/dev/null
|
|
uci delete "haproxy.cert_${old_vhost}" 2>/dev/null
|
|
|
|
# Create new vhost with WAF routing
|
|
uci set "haproxy.${new_vhost}=vhost"
|
|
uci set "haproxy.${new_vhost}.domain=${new_domain}"
|
|
uci set "haproxy.${new_vhost}.backend=mitmproxy_inspector"
|
|
uci set "haproxy.${new_vhost}.ssl=1"
|
|
uci set "haproxy.${new_vhost}.ssl_redirect=1"
|
|
uci set "haproxy.${new_vhost}.acme=1"
|
|
uci set "haproxy.${new_vhost}.enabled=1"
|
|
|
|
# Create new certificate entry
|
|
uci set "haproxy.cert_${new_vhost}=certificate"
|
|
uci set "haproxy.cert_${new_vhost}.domain=${new_domain}"
|
|
uci set "haproxy.cert_${new_vhost}.type=acme"
|
|
uci set "haproxy.cert_${new_vhost}.enabled=1"
|
|
|
|
uci commit haproxy
|
|
|
|
# Update instance domain FIRST (before slow operations)
|
|
uci set "${CONFIG}.${id}.domain=${new_domain}"
|
|
uci commit "$CONFIG"
|
|
|
|
# Update mitmproxy routes (use sed, jq may not be available)
|
|
local routes_file="/srv/mitmproxy/haproxy-routes.json"
|
|
if [ -f "$routes_file" ]; then
|
|
# Remove old route
|
|
sed -i "s/,\"${old_domain}\":\[\"[^\"]*\",[0-9]*\]//g" "$routes_file" 2>/dev/null || true
|
|
sed -i "s/\"${old_domain}\":\[\"[^\"]*\",[0-9]*\],//g" "$routes_file" 2>/dev/null || true
|
|
# Add new route
|
|
sed -i "s/}$/,\"${new_domain}\":[\"192.168.255.1\",${port}]}/" "$routes_file" 2>/dev/null || true
|
|
fi
|
|
|
|
# Reload HAProxy and mitmproxy in background (don't block RPC)
|
|
(
|
|
haproxyctl generate >/dev/null 2>&1
|
|
haproxyctl reload >/dev/null 2>&1
|
|
/etc/init.d/mitmproxy restart >/dev/null 2>&1
|
|
# Request new certificate if not on gk2 wildcard
|
|
case "$new_domain" in
|
|
*.gk2.secubox.in) ;;
|
|
*) haproxyctl cert add "$new_domain" >/dev/null 2>&1 ;;
|
|
esac
|
|
) &
|
|
fi
|
|
|
|
# Commit any remaining changes (display name if domain wasn't changed)
|
|
uci commit "$CONFIG" 2>/dev/null || true
|
|
|
|
json_init_obj
|
|
json_add_boolean "success" 1
|
|
json_add_string "message" "Instance renamed"
|
|
json_add_string "id" "$id"
|
|
json_add_string "name" "$name"
|
|
[ -n "$new_domain" ] && json_add_string "domain" "$new_domain"
|
|
json_close_obj
|
|
}
|
|
|
|
# Enable instance
|
|
enable_instance() {
|
|
read -r input
|
|
local id
|
|
id=$(echo "$input" | jsonfilter -e '@.id' 2>/dev/null)
|
|
|
|
if [ -z "$id" ]; then
|
|
json_error "Missing instance id"
|
|
return
|
|
fi
|
|
|
|
uci set "${CONFIG}.${id}.enabled=1"
|
|
uci commit "$CONFIG"
|
|
|
|
json_success "Instance enabled: $id"
|
|
}
|
|
|
|
# Disable instance
|
|
disable_instance() {
|
|
read -r input
|
|
local id
|
|
id=$(echo "$input" | jsonfilter -e '@.id' 2>/dev/null)
|
|
|
|
if [ -z "$id" ]; then
|
|
json_error "Missing instance id"
|
|
return
|
|
fi
|
|
|
|
uci set "${CONFIG}.${id}.enabled=0"
|
|
uci commit "$CONFIG"
|
|
|
|
json_success "Instance disabled: $id"
|
|
}
|
|
|
|
# Preview ZIP contents
|
|
preview_zip() {
|
|
# Write stdin to temp file to avoid shell variable size limits
|
|
local tmpinput="/tmp/rpcd_preview_$$.json"
|
|
cat > "$tmpinput"
|
|
|
|
local tmpzip="/tmp/preview_$$.zip"
|
|
jsonfilter -i "$tmpinput" -e '@.content' 2>/dev/null | base64 -d > "$tmpzip" 2>/dev/null
|
|
rm -f "$tmpinput"
|
|
|
|
if [ ! -s "$tmpzip" ]; then
|
|
rm -f "$tmpzip"
|
|
json_error "Failed to decode ZIP"
|
|
return
|
|
fi
|
|
|
|
json_init_obj
|
|
json_add_array "files"
|
|
|
|
# Use unzip to list contents
|
|
unzip -l "$tmpzip" 2>/dev/null | tail -n +4 | head -n -2 | while read -r size date time name; do
|
|
[ -z "$name" ] && continue
|
|
local is_dir=0
|
|
echo "$name" | grep -q '/$' && is_dir=1
|
|
|
|
json_add_object ""
|
|
json_add_string "path" "$name"
|
|
json_add_int "size" "$size"
|
|
json_add_boolean "is_dir" "$is_dir"
|
|
json_close_object
|
|
done
|
|
|
|
json_close_array
|
|
json_close_obj
|
|
|
|
rm -f "$tmpzip"
|
|
}
|
|
|
|
# Upload ZIP with selected files
|
|
upload_zip() {
|
|
# Write stdin to temp file to avoid shell variable size limits
|
|
local tmpinput="/tmp/rpcd_zipinput_$$.json"
|
|
cat > "$tmpinput"
|
|
|
|
local name selected_files
|
|
name=$(jsonfilter -i "$tmpinput" -e '@.name' 2>/dev/null)
|
|
selected_files=$(jsonfilter -i "$tmpinput" -e '@.selected_files' 2>/dev/null)
|
|
|
|
# Sanitize name for UCI compatibility (alphanumeric and underscores only)
|
|
name=$(echo "$name" | sed 's/[^a-zA-Z0-9_]/_/g; s/^_*//; s/_*$//')
|
|
|
|
if [ -z "$name" ]; then
|
|
rm -f "$tmpinput"
|
|
json_error "Missing name"
|
|
return
|
|
fi
|
|
|
|
local data_path
|
|
config_load "$CONFIG"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
|
|
local app_dir="$data_path/apps/$name"
|
|
local tmpzip="/tmp/upload_$$.zip"
|
|
|
|
# Extract base64 content and decode directly to zip file
|
|
jsonfilter -i "$tmpinput" -e '@.content' 2>/dev/null | base64 -d > "$tmpzip" 2>/dev/null
|
|
rm -f "$tmpinput"
|
|
|
|
if [ ! -s "$tmpzip" ]; then
|
|
rm -f "$tmpzip"
|
|
json_error "Failed to decode ZIP"
|
|
return
|
|
fi
|
|
|
|
mkdir -p "$app_dir"
|
|
|
|
# Extract selected files or all if none specified
|
|
local file_count=$(echo "$selected_files" | jsonfilter -e '@[*]' 2>/dev/null | wc -l)
|
|
if [ -n "$selected_files" ] && [ "$file_count" -gt 0 ] 2>/dev/null; then
|
|
# Parse selected files array and extract each
|
|
echo "$selected_files" | jsonfilter -e '@[*]' 2>/dev/null | while read -r filepath; do
|
|
[ -z "$filepath" ] && continue
|
|
unzip -o "$tmpzip" "$filepath" -d "$app_dir" >/dev/null 2>&1
|
|
done
|
|
else
|
|
# Extract all
|
|
extract_zip_flatten "$tmpzip" "$app_dir"
|
|
fi
|
|
|
|
rm -f "$tmpzip"
|
|
|
|
# Find main .py file for registration
|
|
local main_py
|
|
main_py=$(find "$app_dir" -maxdepth 2 -name "*.py" -type f | head -1)
|
|
|
|
if [ -n "$main_py" ]; then
|
|
# Install requirements if found (requirements.txt or requirements*.txt)
|
|
if lxc_running; then
|
|
local req_file=""
|
|
if [ -f "$app_dir/requirements.txt" ]; then
|
|
req_file="requirements.txt"
|
|
else
|
|
req_file=$(ls -1 "$app_dir"/requirements*.txt 2>/dev/null | head -1 | xargs basename 2>/dev/null)
|
|
fi
|
|
if [ -n "$req_file" ]; then
|
|
lxc-attach -n "$LXC_NAME" -- pip3 install --break-system-packages -r "/srv/apps/${name}/${req_file}" >/dev/null 2>&1 &
|
|
fi
|
|
fi
|
|
|
|
# Register in UCI
|
|
uci set "${CONFIG}.${name}=app"
|
|
uci set "${CONFIG}.${name}.name=$name"
|
|
uci set "${CONFIG}.${name}.path=$main_py"
|
|
uci set "${CONFIG}.${name}.enabled=1"
|
|
uci commit "$CONFIG"
|
|
|
|
# Auto-create Gitea repo and push (background)
|
|
streamlitctl gitea push "$name" >/dev/null 2>&1 &
|
|
|
|
json_init_obj
|
|
json_add_boolean "success" 1
|
|
json_add_string "message" "App deployed: $name"
|
|
json_add_string "path" "$app_dir"
|
|
json_add_string "main_file" "$main_py"
|
|
json_close_obj
|
|
else
|
|
json_error "No Python files found in extracted archive"
|
|
fi
|
|
}
|
|
|
|
# Get Gitea config
|
|
get_gitea_config() {
|
|
config_load "$CONFIG"
|
|
local enabled url user token
|
|
|
|
config_get enabled gitea enabled "0"
|
|
config_get url gitea url ""
|
|
config_get user gitea user ""
|
|
config_get token gitea token ""
|
|
|
|
json_init_obj
|
|
json_add_boolean "enabled" "$( [ "$enabled" = "1" ] && echo 1 || echo 0 )"
|
|
json_add_string "url" "$url"
|
|
json_add_string "user" "$user"
|
|
json_add_boolean "has_token" "$( [ -n "$token" ] && echo 1 || echo 0 )"
|
|
json_close_obj
|
|
}
|
|
|
|
# Save Gitea config
|
|
save_gitea_config() {
|
|
read -r input
|
|
local enabled url user token
|
|
|
|
enabled=$(echo "$input" | jsonfilter -e '@.enabled' 2>/dev/null)
|
|
url=$(echo "$input" | jsonfilter -e '@.url' 2>/dev/null)
|
|
user=$(echo "$input" | jsonfilter -e '@.user' 2>/dev/null)
|
|
token=$(echo "$input" | jsonfilter -e '@.token' 2>/dev/null)
|
|
|
|
# Ensure gitea section exists
|
|
uci -q get "${CONFIG}.gitea" >/dev/null || uci set "${CONFIG}.gitea=gitea"
|
|
|
|
[ -n "$enabled" ] && uci set "${CONFIG}.gitea.enabled=$enabled"
|
|
[ -n "$url" ] && uci set "${CONFIG}.gitea.url=$url"
|
|
[ -n "$user" ] && uci set "${CONFIG}.gitea.user=$user"
|
|
[ -n "$token" ] && uci set "${CONFIG}.gitea.token=$token"
|
|
|
|
uci commit "$CONFIG"
|
|
|
|
json_success "Gitea configuration saved"
|
|
}
|
|
|
|
# Clone app from Gitea
|
|
gitea_clone() {
|
|
read -r input
|
|
local name repo
|
|
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
repo=$(echo "$input" | jsonfilter -e '@.repo' 2>/dev/null)
|
|
|
|
if [ -z "$name" ] || [ -z "$repo" ]; then
|
|
json_error "Missing name or repo"
|
|
return
|
|
fi
|
|
|
|
# Run clone in background
|
|
/usr/sbin/streamlitctl gitea clone "$name" "$repo" >/var/log/streamlit-gitea.log 2>&1 &
|
|
|
|
json_init_obj
|
|
json_add_boolean "success" 1
|
|
json_add_string "message" "Cloning $repo to $name in background"
|
|
json_close_obj
|
|
}
|
|
|
|
# Pull app from Gitea
|
|
gitea_pull() {
|
|
read -r input
|
|
local name
|
|
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
|
|
if [ -z "$name" ]; then
|
|
json_error "Missing app name"
|
|
return
|
|
fi
|
|
|
|
# Run pull
|
|
/usr/sbin/streamlitctl gitea pull "$name" >/var/log/streamlit-gitea.log 2>&1
|
|
|
|
if [ $? -eq 0 ]; then
|
|
json_success "App updated from Gitea: $name"
|
|
else
|
|
json_error "Failed to pull app from Gitea"
|
|
fi
|
|
}
|
|
|
|
# List Gitea repositories
|
|
gitea_list_repos() {
|
|
config_load "$CONFIG"
|
|
local enabled url user token
|
|
|
|
config_get enabled gitea enabled "0"
|
|
config_get url gitea url ""
|
|
config_get user gitea user ""
|
|
config_get token gitea token ""
|
|
|
|
if [ "$enabled" != "1" ] || [ -z "$url" ] || [ -z "$token" ]; then
|
|
json_error "Gitea not configured"
|
|
return
|
|
fi
|
|
|
|
# Call Gitea API to list user repos
|
|
local api_url="${url}/api/v1/user/repos"
|
|
local response
|
|
|
|
response=$(curl -s -H "Authorization: token $token" "$api_url" 2>/dev/null)
|
|
|
|
if [ -z "$response" ]; then
|
|
json_error "Failed to connect to Gitea"
|
|
return
|
|
fi
|
|
|
|
json_init_obj
|
|
json_add_array "repos"
|
|
|
|
# Parse JSON response (simple extraction)
|
|
echo "$response" | jsonfilter -e '@[*].full_name' 2>/dev/null | while read -r repo; do
|
|
[ -z "$repo" ] && continue
|
|
json_add_string "" "$repo"
|
|
done
|
|
|
|
json_close_array
|
|
json_close_obj
|
|
}
|
|
|
|
# Get app source code for editing
|
|
get_source() {
|
|
read -r input
|
|
local name
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
|
|
if [ -z "$name" ]; then
|
|
json_error "Missing app name"
|
|
return
|
|
fi
|
|
|
|
local data_path
|
|
config_load "$CONFIG"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
|
|
# Find the app file (either top-level .py or subdirectory with app.py)
|
|
local app_file=""
|
|
if [ -f "$data_path/apps/${name}.py" ]; then
|
|
app_file="$data_path/apps/${name}.py"
|
|
elif [ -f "$data_path/apps/${name}/app.py" ]; then
|
|
app_file="$data_path/apps/${name}/app.py"
|
|
elif [ -d "$data_path/apps/${name}" ]; then
|
|
app_file=$(find "$data_path/apps/${name}" -maxdepth 2 -name "*.py" -type f | head -1)
|
|
fi
|
|
|
|
if [ -z "$app_file" ] || [ ! -f "$app_file" ]; then
|
|
json_error "App source not found"
|
|
return
|
|
fi
|
|
|
|
# Build JSON output manually to avoid jshn argument size limits
|
|
local tmpfile="/tmp/source_output_$$.json"
|
|
printf '{"result":{"success":true,"name":"%s","path":"%s","content":"' "$name" "$app_file" > "$tmpfile"
|
|
# Encode source as base64 to handle special characters
|
|
base64 -w 0 < "$app_file" >> "$tmpfile"
|
|
printf '"}}\n' >> "$tmpfile"
|
|
cat "$tmpfile"
|
|
rm -f "$tmpfile"
|
|
}
|
|
|
|
# Save edited app source code
|
|
save_source() {
|
|
local tmpinput="/tmp/rpcd_save_$$.json"
|
|
cat > "$tmpinput"
|
|
|
|
local name content
|
|
name=$(jsonfilter -i "$tmpinput" -e '@.name' 2>/dev/null)
|
|
name=$(echo "$name" | sed 's/[^a-zA-Z0-9_]/_/g; s/^_*//; s/_*$//')
|
|
|
|
if [ -z "$name" ]; then
|
|
rm -f "$tmpinput"
|
|
json_error "Missing name"
|
|
return
|
|
fi
|
|
|
|
local data_path
|
|
config_load "$CONFIG"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
|
|
# Find the app file
|
|
local app_file=""
|
|
if [ -f "$data_path/apps/${name}.py" ]; then
|
|
app_file="$data_path/apps/${name}.py"
|
|
elif [ -f "$data_path/apps/${name}/app.py" ]; then
|
|
app_file="$data_path/apps/${name}/app.py"
|
|
elif [ -d "$data_path/apps/${name}" ]; then
|
|
app_file=$(find "$data_path/apps/${name}" -maxdepth 2 -name "*.py" -type f | head -1)
|
|
fi
|
|
|
|
if [ -z "$app_file" ]; then
|
|
# New app - create as top-level .py
|
|
app_file="$data_path/apps/${name}.py"
|
|
fi
|
|
|
|
# Extract and decode base64 content
|
|
local b64file="/tmp/rpcd_b64_save_$$.txt"
|
|
jsonfilter -i "$tmpinput" -e '@.content' > "$b64file" 2>/dev/null
|
|
rm -f "$tmpinput"
|
|
|
|
if [ ! -s "$b64file" ]; then
|
|
rm -f "$b64file"
|
|
json_error "Missing content"
|
|
return
|
|
fi
|
|
|
|
# Create backup before overwriting
|
|
[ -f "$app_file" ] && cp "$app_file" "${app_file}.bak"
|
|
|
|
mkdir -p "$(dirname "$app_file")"
|
|
base64 -d < "$b64file" > "$app_file" 2>/dev/null
|
|
local rc=$?
|
|
rm -f "$b64file"
|
|
|
|
if [ $rc -eq 0 ] && [ -s "$app_file" ]; then
|
|
# Auto-push to Gitea (background)
|
|
streamlitctl gitea push "$name" >/dev/null 2>&1 &
|
|
json_success "Source saved: $name"
|
|
else
|
|
# Restore backup on failure
|
|
[ -f "${app_file}.bak" ] && mv "${app_file}.bak" "$app_file"
|
|
json_error "Failed to save source"
|
|
fi
|
|
}
|
|
|
|
# Emancipate app - KISS ULTIME MODE multi-channel exposure
|
|
emancipate() {
|
|
read -r input
|
|
local name domain
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
domain=$(echo "$input" | jsonfilter -e '@.domain' 2>/dev/null)
|
|
|
|
if [ -z "$name" ]; then
|
|
json_error "Missing app name"
|
|
return
|
|
fi
|
|
|
|
# Check if app has an instance with a port
|
|
config_load "$CONFIG"
|
|
local port
|
|
port=$(uci -q get "${CONFIG}.${name}.port")
|
|
if [ -z "$port" ]; then
|
|
# Try to find instance with matching app
|
|
for section in $(uci -q show "$CONFIG" | grep "\.app=" | grep "='${name}'" | cut -d. -f2); do
|
|
port=$(uci -q get "${CONFIG}.${section}.port")
|
|
[ -n "$port" ] && break
|
|
done
|
|
fi
|
|
|
|
if [ -z "$port" ]; then
|
|
json_error "No instance found for app. Create an instance first."
|
|
return
|
|
fi
|
|
|
|
# Run emancipate in background
|
|
/usr/sbin/streamlitctl emancipate "$name" "$domain" >/var/log/streamlit-emancipate.log 2>&1 &
|
|
local pid=$!
|
|
|
|
json_init_obj
|
|
json_add_boolean "success" 1
|
|
json_add_string "message" "Emancipation started for $name"
|
|
json_add_string "domain" "$domain"
|
|
json_add_int "port" "$port"
|
|
json_add_int "pid" "$pid"
|
|
json_close_obj
|
|
}
|
|
|
|
# Test uploaded app - validate syntax and imports before finalize
|
|
test_upload() {
|
|
read -r input
|
|
local name
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
name=$(echo "$name" | sed 's/[^a-zA-Z0-9_]/_/g; s/^_*//; s/_*$//')
|
|
|
|
if [ -z "$name" ]; then
|
|
json_error "Missing app name"
|
|
return
|
|
fi
|
|
|
|
# Check if staging file exists
|
|
local staging="/tmp/streamlit_upload_${name}.b64"
|
|
if [ ! -s "$staging" ]; then
|
|
json_error "No pending upload for $name"
|
|
return
|
|
fi
|
|
|
|
# Decode to temp file for testing
|
|
local tmppy="/tmp/test_upload_${name}.py"
|
|
base64 -d < "$staging" > "$tmppy" 2>/dev/null
|
|
|
|
if [ ! -s "$tmppy" ]; then
|
|
rm -f "$tmppy"
|
|
json_error "Failed to decode upload data"
|
|
return
|
|
fi
|
|
|
|
local errors=""
|
|
local warnings=""
|
|
local file_size=$(stat -c %s "$tmppy" 2>/dev/null || echo "0")
|
|
local line_count=$(wc -l < "$tmppy" 2>/dev/null || echo "0")
|
|
|
|
# Check 1: Basic file validation
|
|
if [ "$file_size" -lt 10 ]; then
|
|
errors="File too small (${file_size} bytes)"
|
|
rm -f "$tmppy"
|
|
json_init_obj
|
|
json_add_boolean "valid" 0
|
|
json_add_string "errors" "$errors"
|
|
json_close_obj
|
|
return
|
|
fi
|
|
|
|
# Check 2: Python syntax validation (inside container if running)
|
|
local syntax_valid=1
|
|
local syntax_error=""
|
|
if lxc_running; then
|
|
# Copy file into container for validation
|
|
cp "$tmppy" "$LXC_PATH/$LXC_NAME/rootfs/tmp/test_syntax.py" 2>/dev/null
|
|
syntax_error=$(lxc-attach -n "$LXC_NAME" -- python3 -m py_compile /tmp/test_syntax.py 2>&1)
|
|
if [ $? -ne 0 ]; then
|
|
syntax_valid=0
|
|
errors="Python syntax error: $syntax_error"
|
|
fi
|
|
rm -f "$LXC_PATH/$LXC_NAME/rootfs/tmp/test_syntax.py"
|
|
else
|
|
# Container not running - just check for obvious issues
|
|
# Check for shebang or encoding issues
|
|
if head -1 "$tmppy" | grep -q '^\xef\xbb\xbf'; then
|
|
warnings="File has UTF-8 BOM marker"
|
|
fi
|
|
fi
|
|
|
|
# Check 3: Look for Streamlit import
|
|
local has_streamlit=0
|
|
if grep -qE '^\s*(import streamlit|from streamlit)' "$tmppy"; then
|
|
has_streamlit=1
|
|
fi
|
|
if [ "$has_streamlit" = "0" ]; then
|
|
warnings="${warnings:+$warnings; }No streamlit import found - may not be a Streamlit app"
|
|
fi
|
|
|
|
# Check 4: Check for obvious security issues (informational)
|
|
if grep -qE 'subprocess\.(call|run|Popen)|os\.system|eval\(' "$tmppy"; then
|
|
warnings="${warnings:+$warnings; }Contains shell/eval calls - review code"
|
|
fi
|
|
|
|
rm -f "$tmppy"
|
|
|
|
json_init_obj
|
|
json_add_boolean "valid" "$syntax_valid"
|
|
json_add_string "errors" "$errors"
|
|
json_add_string "warnings" "$warnings"
|
|
json_add_int "size" "$file_size"
|
|
json_add_int "lines" "$line_count"
|
|
json_add_boolean "has_streamlit_import" "$has_streamlit"
|
|
json_add_boolean "container_running" "$( lxc_running && echo 1 || echo 0 )"
|
|
json_close_obj
|
|
}
|
|
|
|
# Get emancipation status for an app
|
|
get_emancipation() {
|
|
read -r input
|
|
local name
|
|
name=$(echo "$input" | jsonfilter -e '@.name' 2>/dev/null)
|
|
|
|
if [ -z "$name" ]; then
|
|
json_error "Missing app name"
|
|
return
|
|
fi
|
|
|
|
config_load "$CONFIG"
|
|
local emancipated emancipated_at domain port auth_required
|
|
emancipated=$(uci -q get "${CONFIG}.${name}.emancipated")
|
|
emancipated_at=$(uci -q get "${CONFIG}.${name}.emancipated_at")
|
|
domain=$(uci -q get "${CONFIG}.${name}.domain")
|
|
port=$(uci -q get "${CONFIG}.${name}.port")
|
|
auth_required=$(uci -q get "${CONFIG}.${name}.auth_required")
|
|
|
|
# Also check instances
|
|
if [ -z "$port" ] || [ -z "$domain" ]; then
|
|
for section in $(uci -q show "$CONFIG" | grep "\.app=" | grep "='${name}'" | cut -d. -f2); do
|
|
[ -z "$port" ] && port=$(uci -q get "${CONFIG}.${section}.port")
|
|
[ -z "$domain" ] && domain=$(uci -q get "${CONFIG}.${section}.domain")
|
|
[ -z "$emancipated" ] && emancipated=$(uci -q get "${CONFIG}.${section}.emancipated")
|
|
[ -z "$auth_required" ] && auth_required=$(uci -q get "${CONFIG}.${section}.auth_required")
|
|
[ -n "$port" ] && [ -n "$domain" ] && break
|
|
done
|
|
fi
|
|
|
|
# Check certificate status if emancipated
|
|
local cert_valid=0
|
|
local cert_expires=""
|
|
if [ "$emancipated" = "1" ] && [ -n "$domain" ]; then
|
|
local cert_file="/srv/haproxy/certs/${domain}.pem"
|
|
if [ -f "$cert_file" ]; then
|
|
cert_valid=1
|
|
cert_expires=$(openssl x509 -enddate -noout -in "$cert_file" 2>/dev/null | cut -d= -f2)
|
|
fi
|
|
fi
|
|
|
|
json_init_obj
|
|
json_add_boolean "emancipated" "$( [ "$emancipated" = "1" ] && echo 1 || echo 0 )"
|
|
json_add_string "emancipated_at" "$emancipated_at"
|
|
json_add_string "domain" "$domain"
|
|
json_add_int "port" "${port:-0}"
|
|
json_add_boolean "auth_required" "$( [ "$auth_required" = "1" ] && echo 1 || echo 0 )"
|
|
json_add_boolean "cert_valid" "$cert_valid"
|
|
json_add_string "cert_expires" "$cert_expires"
|
|
json_close_obj
|
|
}
|
|
|
|
# One-click upload with auto instance creation (KISS: handles ZIP or .py)
|
|
upload_and_deploy() {
|
|
local tmpinput="/tmp/rpcd_deploy_$$.json"
|
|
cat > "$tmpinput"
|
|
|
|
local name content is_zip
|
|
name=$(jsonfilter -i "$tmpinput" -e '@.name' 2>/dev/null)
|
|
is_zip=$(jsonfilter -i "$tmpinput" -e '@.is_zip' 2>/dev/null)
|
|
name=$(echo "$name" | sed 's/[^a-zA-Z0-9_]/_/g; s/^_*//; s/_*$//')
|
|
|
|
if [ -z "$name" ]; then
|
|
rm -f "$tmpinput"
|
|
json_error "Missing name"
|
|
return
|
|
fi
|
|
|
|
local b64file="/tmp/rpcd_b64_$$.txt"
|
|
jsonfilter -i "$tmpinput" -e '@.content' > "$b64file" 2>/dev/null
|
|
rm -f "$tmpinput"
|
|
|
|
if [ ! -s "$b64file" ]; then
|
|
rm -f "$b64file"
|
|
json_error "Missing content"
|
|
return
|
|
fi
|
|
|
|
local data_path
|
|
config_load "$CONFIG"
|
|
config_get data_path main data_path "/srv/streamlit"
|
|
mkdir -p "$data_path/apps"
|
|
|
|
# Decode content to temp file first
|
|
local tmpfile="/tmp/upload_${name}_$$.bin"
|
|
base64 -d < "$b64file" > "$tmpfile" 2>/dev/null
|
|
local rc=$?
|
|
rm -f "$b64file"
|
|
|
|
if [ $rc -ne 0 ] || [ ! -s "$tmpfile" ]; then
|
|
rm -f "$tmpfile"
|
|
json_error "Failed to decode content"
|
|
return
|
|
fi
|
|
|
|
# KISS: Auto-detect ZIP by magic bytes (PK = 0x504B)
|
|
local is_zip_file=0
|
|
local magic=$(head -c2 "$tmpfile" 2>/dev/null)
|
|
[ "$magic" = "PK" ] && is_zip_file=1
|
|
|
|
local app_file="$data_path/apps/${name}.py"
|
|
|
|
if [ "$is_zip_file" = "1" ]; then
|
|
# Extract app.py from ZIP archive
|
|
local tmpdir="/tmp/extract_${name}_$$"
|
|
mkdir -p "$tmpdir"
|
|
|
|
# Use Python to extract (write script to file to avoid stdin conflict with RPCD)
|
|
local pyscript="/tmp/extract_$$.py"
|
|
cat > "$pyscript" << 'PYEOF'
|
|
import zipfile, sys, os
|
|
tmpfile, app_file, tmpdir = sys.argv[1], sys.argv[2], sys.argv[3]
|
|
try:
|
|
z = zipfile.ZipFile(tmpfile)
|
|
app_py = None
|
|
req_txt = None
|
|
for n in z.namelist():
|
|
bn = os.path.basename(n)
|
|
if bn == "app.py":
|
|
app_py = n
|
|
elif bn == "requirements.txt":
|
|
req_txt = n
|
|
elif bn.endswith(".py") and not app_py:
|
|
app_py = n
|
|
if not app_py:
|
|
sys.exit(1)
|
|
content = z.read(app_py).decode("utf-8", errors="replace")
|
|
if not content.startswith("# -*- coding"):
|
|
content = "# -*- coding: utf-8 -*-\n" + content
|
|
with open(app_file, "w") as f:
|
|
f.write(content)
|
|
if req_txt:
|
|
try:
|
|
z.extract(req_txt, tmpdir)
|
|
except:
|
|
pass
|
|
sys.exit(0)
|
|
except Exception:
|
|
sys.exit(1)
|
|
PYEOF
|
|
python3 "$pyscript" "$tmpfile" "$app_file" "$tmpdir" 2>/dev/null
|
|
rc=$?
|
|
rm -f "$tmpfile" "$pyscript"
|
|
|
|
if [ $rc -ne 0 ] || [ ! -s "$app_file" ]; then
|
|
rm -rf "$tmpdir"
|
|
json_error "No Python file found in ZIP"
|
|
return
|
|
fi
|
|
|
|
# Install requirements if found
|
|
if [ -f "$tmpdir/requirements.txt" ] && lxc_running; then
|
|
cp "$tmpdir/requirements.txt" "$data_path/apps/${name}_requirements.txt"
|
|
lxc-attach -n "$LXC_NAME" -- pip3 install --break-system-packages \
|
|
-r "/srv/apps/${name}_requirements.txt" >/dev/null 2>&1 &
|
|
fi
|
|
rm -rf "$tmpdir"
|
|
else
|
|
# Plain .py file - add encoding declaration if needed
|
|
local first_line=$(head -c 50 "$tmpfile" 2>/dev/null)
|
|
if ! echo "$first_line" | grep -q "coding"; then
|
|
printf '# -*- coding: utf-8 -*-\n' > "$app_file"
|
|
cat "$tmpfile" >> "$app_file"
|
|
else
|
|
mv "$tmpfile" "$app_file"
|
|
fi
|
|
rm -f "$tmpfile"
|
|
fi
|
|
|
|
if [ ! -s "$app_file" ]; then
|
|
json_error "Failed to create app file"
|
|
return
|
|
fi
|
|
|
|
# Register app in UCI
|
|
uci set "${CONFIG}.${name}=app"
|
|
uci set "${CONFIG}.${name}.name=$name"
|
|
uci set "${CONFIG}.${name}.path=${name}.py"
|
|
uci set "${CONFIG}.${name}.enabled=1"
|
|
|
|
# Find next available port
|
|
local next_port=8501
|
|
local used_ports=$(uci -q show "$CONFIG" | grep "\.port=" | cut -d= -f2 | tr -d "'" | sort -n)
|
|
while echo "$used_ports" | grep -qw "$next_port"; do
|
|
next_port=$((next_port + 1))
|
|
done
|
|
|
|
# Create instance automatically
|
|
uci set "${CONFIG}.${name}=instance"
|
|
uci set "${CONFIG}.${name}.name=$name"
|
|
uci set "${CONFIG}.${name}.app=$name"
|
|
uci set "${CONFIG}.${name}.port=$next_port"
|
|
uci set "${CONFIG}.${name}.enabled=1"
|
|
uci set "${CONFIG}.${name}.autostart=1"
|
|
uci commit "$CONFIG"
|
|
|
|
# Start the instance
|
|
if lxc_running; then
|
|
streamlitctl instance start "$name" >/dev/null 2>&1 &
|
|
fi
|
|
|
|
json_init_obj
|
|
json_add_boolean "success" 1
|
|
json_add_string "message" "App deployed with instance on port $next_port"
|
|
json_add_string "name" "$name"
|
|
json_add_int "port" "$next_port"
|
|
json_close_obj
|
|
}
|
|
|
|
# Unpublish/revoke emancipation
|
|
unpublish() {
|
|
read -r input
|
|
local id
|
|
id=$(echo "$input" | jsonfilter -e '@.id' 2>/dev/null)
|
|
|
|
if [ -z "$id" ]; then
|
|
json_error "Missing instance id"
|
|
return
|
|
fi
|
|
|
|
config_load "$CONFIG"
|
|
local domain
|
|
domain=$(uci -q get "${CONFIG}.${id}.domain")
|
|
|
|
if [ -z "$domain" ]; then
|
|
json_error "Instance not emancipated"
|
|
return
|
|
fi
|
|
|
|
# Remove HAProxy vhost
|
|
local vhost_section=$(echo "$domain" | sed 's/\./_/g')
|
|
uci -q delete "haproxy.${vhost_section}" 2>/dev/null
|
|
|
|
# Remove certificate entry
|
|
uci -q delete "haproxy.cert_${vhost_section}" 2>/dev/null
|
|
uci commit haproxy
|
|
|
|
# Regenerate and reload HAProxy
|
|
haproxyctl generate >/dev/null 2>&1
|
|
haproxyctl reload >/dev/null 2>&1
|
|
|
|
# Update instance UCI
|
|
uci delete "${CONFIG}.${id}.emancipated" 2>/dev/null
|
|
uci delete "${CONFIG}.${id}.emancipated_at" 2>/dev/null
|
|
uci delete "${CONFIG}.${id}.domain" 2>/dev/null
|
|
uci commit "$CONFIG"
|
|
|
|
json_init_obj
|
|
json_add_boolean "success" 1
|
|
json_add_string "message" "Exposure revoked for $id"
|
|
json_add_string "domain" "$domain"
|
|
json_close_obj
|
|
}
|
|
|
|
# Set authentication requirement
|
|
set_auth_required() {
|
|
read -r input
|
|
local id auth_required
|
|
id=$(echo "$input" | jsonfilter -e '@.id' 2>/dev/null)
|
|
auth_required=$(echo "$input" | jsonfilter -e '@.auth_required' 2>/dev/null)
|
|
|
|
if [ -z "$id" ]; then
|
|
json_error "Missing instance id"
|
|
return
|
|
fi
|
|
|
|
config_load "$CONFIG"
|
|
local domain
|
|
domain=$(uci -q get "${CONFIG}.${id}.domain")
|
|
|
|
# Update UCI
|
|
uci set "${CONFIG}.${id}.auth_required=$auth_required"
|
|
uci commit "$CONFIG"
|
|
|
|
# Update HAProxy vhost if emancipated
|
|
if [ -n "$domain" ]; then
|
|
local vhost_section=$(echo "$domain" | sed 's/\./_/g')
|
|
if [ "$auth_required" = "1" ]; then
|
|
uci set "haproxy.${vhost_section}.auth_required=1"
|
|
else
|
|
uci -q delete "haproxy.${vhost_section}.auth_required"
|
|
fi
|
|
uci commit haproxy
|
|
haproxyctl generate >/dev/null 2>&1
|
|
haproxyctl reload >/dev/null 2>&1
|
|
fi
|
|
|
|
json_init_obj
|
|
json_add_boolean "success" 1
|
|
json_add_string "message" "Auth requirement updated"
|
|
json_add_boolean "auth_required" "$( [ "$auth_required" = "1" ] && echo 1 || echo 0 )"
|
|
json_close_obj
|
|
}
|
|
|
|
# One-click emancipate for instance
|
|
emancipate_instance() {
|
|
read -r input
|
|
local id domain
|
|
id=$(echo "$input" | jsonfilter -e '@.id' 2>/dev/null)
|
|
domain=$(echo "$input" | jsonfilter -e '@.domain' 2>/dev/null)
|
|
|
|
if [ -z "$id" ]; then
|
|
json_error "Missing instance id"
|
|
return
|
|
fi
|
|
|
|
config_load "$CONFIG"
|
|
local app port
|
|
app=$(uci -q get "${CONFIG}.${id}.app")
|
|
port=$(uci -q get "${CONFIG}.${id}.port")
|
|
|
|
if [ -z "$port" ]; then
|
|
json_error "Instance has no port configured"
|
|
return
|
|
fi
|
|
|
|
# Auto-generate domain if not provided
|
|
if [ -z "$domain" ]; then
|
|
local wildcard_domain=$(uci -q get vortex.main.wildcard_domain)
|
|
[ -z "$wildcard_domain" ] && wildcard_domain="gk2.secubox.in"
|
|
domain="${id}.${wildcard_domain}"
|
|
fi
|
|
|
|
# Create HAProxy vhost
|
|
local vhost_section=$(echo "$domain" | sed 's/\./_/g')
|
|
local backend_name="streamlit_${id}"
|
|
|
|
# Create backend for direct routing (used by mitmproxy)
|
|
uci set "haproxy.${backend_name}=backend"
|
|
uci set "haproxy.${backend_name}.name=${backend_name}"
|
|
uci set "haproxy.${backend_name}.mode=http"
|
|
uci set "haproxy.${backend_name}.balance=roundrobin"
|
|
uci set "haproxy.${backend_name}.enabled=1"
|
|
|
|
# Add server - use 192.168.255.1 (host network, not loopback)
|
|
uci set "haproxy.${backend_name}_srv=server"
|
|
uci set "haproxy.${backend_name}_srv.backend=${backend_name}"
|
|
uci set "haproxy.${backend_name}_srv.name=streamlit"
|
|
uci set "haproxy.${backend_name}_srv.address=192.168.255.1"
|
|
uci set "haproxy.${backend_name}_srv.port=${port}"
|
|
uci set "haproxy.${backend_name}_srv.weight=100"
|
|
uci set "haproxy.${backend_name}_srv.check=1"
|
|
uci set "haproxy.${backend_name}_srv.enabled=1"
|
|
|
|
# Create vhost - Route through mitmproxy_inspector for WAF protection
|
|
uci set "haproxy.${vhost_section}=vhost"
|
|
uci set "haproxy.${vhost_section}.domain=${domain}"
|
|
uci set "haproxy.${vhost_section}.backend=mitmproxy_inspector"
|
|
uci set "haproxy.${vhost_section}.ssl=1"
|
|
uci set "haproxy.${vhost_section}.ssl_redirect=1"
|
|
uci set "haproxy.${vhost_section}.acme=1"
|
|
uci set "haproxy.${vhost_section}.enabled=1"
|
|
|
|
# Create certificate entry
|
|
uci set "haproxy.cert_${vhost_section}=certificate"
|
|
uci set "haproxy.cert_${vhost_section}.domain=${domain}"
|
|
uci set "haproxy.cert_${vhost_section}.type=acme"
|
|
uci set "haproxy.cert_${vhost_section}.enabled=1"
|
|
|
|
uci commit haproxy
|
|
|
|
# Add mitmproxy route for this domain -> streamlit backend
|
|
local routes_file="/srv/mitmproxy/haproxy-routes.json"
|
|
local routes_file_in="/srv/mitmproxy-in/haproxy-routes.json"
|
|
if [ -f "$routes_file" ]; then
|
|
# Add route entry: "domain": ["192.168.255.1", port]
|
|
local tmp_routes="/tmp/routes_$$.json"
|
|
if command -v jq >/dev/null 2>&1; then
|
|
jq --arg domain "$domain" --argjson port "$port" \
|
|
'. + {($domain): ["192.168.255.1", $port]}' "$routes_file" > "$tmp_routes" 2>/dev/null && \
|
|
mv "$tmp_routes" "$routes_file"
|
|
else
|
|
# Fallback: append using sed (for OpenWrt without jq)
|
|
sed -i "s/}$/,\"${domain}\":[\"192.168.255.1\",${port}]}/" "$routes_file" 2>/dev/null
|
|
fi
|
|
fi
|
|
# Same for inbound mitmproxy
|
|
if [ -f "$routes_file_in" ]; then
|
|
if command -v jq >/dev/null 2>&1; then
|
|
jq --arg domain "$domain" --argjson port "$port" \
|
|
'. + {($domain): ["192.168.255.1", $port]}' "$routes_file_in" > "/tmp/routes_in_$$.json" 2>/dev/null && \
|
|
mv "/tmp/routes_in_$$.json" "$routes_file_in"
|
|
else
|
|
sed -i "s/}$/,\"${domain}\":[\"192.168.255.1\",${port}]}/" "$routes_file_in" 2>/dev/null
|
|
fi
|
|
fi
|
|
|
|
# Restart mitmproxy to pick up routes
|
|
/etc/init.d/mitmproxy restart >/dev/null 2>&1 &
|
|
|
|
# Regenerate and reload HAProxy
|
|
haproxyctl generate >/dev/null 2>&1
|
|
haproxyctl reload >/dev/null 2>&1
|
|
|
|
# Request certificate via ACME (wildcard covers *.gk2.secubox.in)
|
|
case "$domain" in
|
|
*.gk2.secubox.in)
|
|
# Wildcard covers this domain
|
|
;;
|
|
*)
|
|
haproxyctl cert add "$domain" >/dev/null 2>&1 &
|
|
;;
|
|
esac
|
|
|
|
# Update instance UCI
|
|
uci set "${CONFIG}.${id}.emancipated=1"
|
|
uci set "${CONFIG}.${id}.emancipated_at=$(date -Iseconds)"
|
|
uci set "${CONFIG}.${id}.domain=${domain}"
|
|
uci set "${CONFIG}.${id}.waf_enabled=1"
|
|
uci commit "$CONFIG"
|
|
|
|
# Auto-push to Gitea if configured
|
|
local gitea_enabled=$(uci -q get "${CONFIG}.gitea.enabled")
|
|
if [ "$gitea_enabled" = "1" ]; then
|
|
streamlitctl gitea push "$app" >/dev/null 2>&1 &
|
|
fi
|
|
|
|
json_init_obj
|
|
json_add_boolean "success" 1
|
|
json_add_string "message" "Instance exposed at https://${domain} (WAF protected)"
|
|
json_add_string "domain" "$domain"
|
|
json_add_string "url" "https://${domain}"
|
|
json_add_int "port" "$port"
|
|
json_add_boolean "waf_enabled" 1
|
|
json_close_obj
|
|
}
|
|
|
|
# Get exposure status for all instances
|
|
get_exposure_status() {
|
|
json_init_obj
|
|
json_add_array "instances"
|
|
|
|
config_load "$CONFIG"
|
|
|
|
_add_exposure_json() {
|
|
local section="$1"
|
|
local app port enabled domain emancipated auth_required waf_enabled
|
|
|
|
config_get app "$section" app ""
|
|
config_get port "$section" port ""
|
|
config_get enabled "$section" enabled "0"
|
|
config_get domain "$section" domain ""
|
|
config_get emancipated "$section" emancipated "0"
|
|
config_get auth_required "$section" auth_required "0"
|
|
config_get waf_enabled "$section" waf_enabled "0"
|
|
|
|
[ -z "$app" ] && return
|
|
|
|
local cert_valid=0
|
|
local cert_expires=""
|
|
if [ "$emancipated" = "1" ] && [ -n "$domain" ]; then
|
|
local cert_file="/srv/haproxy/certs/${domain}.pem"
|
|
if [ -f "$cert_file" ]; then
|
|
cert_valid=1
|
|
cert_expires=$(openssl x509 -enddate -noout -in "$cert_file" 2>/dev/null | cut -d= -f2)
|
|
fi
|
|
# Check WAF status from HAProxy vhost config
|
|
local vhost_section=$(echo "$domain" | sed 's/\./_/g')
|
|
local vhost_backend=$(uci -q get "haproxy.${vhost_section}.backend" 2>/dev/null)
|
|
if [ "$vhost_backend" = "mitmproxy_inspector" ]; then
|
|
waf_enabled=1
|
|
fi
|
|
fi
|
|
|
|
json_add_object ""
|
|
json_add_string "id" "$section"
|
|
json_add_string "app" "$app"
|
|
json_add_int "port" "$port"
|
|
json_add_boolean "enabled" "$( [ "$enabled" = "1" ] && echo 1 || echo 0 )"
|
|
json_add_boolean "emancipated" "$( [ "$emancipated" = "1" ] && echo 1 || echo 0 )"
|
|
json_add_string "domain" "$domain"
|
|
json_add_boolean "auth_required" "$( [ "$auth_required" = "1" ] && echo 1 || echo 0 )"
|
|
json_add_boolean "cert_valid" "$cert_valid"
|
|
json_add_string "cert_expires" "$cert_expires"
|
|
json_add_boolean "waf_enabled" "$( [ "$waf_enabled" = "1" ] && echo 1 || echo 0 )"
|
|
json_close_object
|
|
}
|
|
|
|
config_foreach _add_exposure_json instance
|
|
|
|
json_close_array
|
|
json_close_obj
|
|
}
|
|
|
|
# Check install progress
|
|
get_install_progress() {
|
|
local log_file="/var/log/streamlit-install.log"
|
|
local status="unknown"
|
|
local progress=0
|
|
local message=""
|
|
|
|
if [ -f "$log_file" ]; then
|
|
# Check for completion markers
|
|
if grep -q "Installation complete" "$log_file" 2>/dev/null; then
|
|
status="completed"
|
|
progress=100
|
|
message="Installation completed successfully"
|
|
elif grep -q "ERROR" "$log_file" 2>/dev/null; then
|
|
status="error"
|
|
message=$(grep "ERROR" "$log_file" | tail -1)
|
|
else
|
|
status="running"
|
|
# Estimate progress based on log content
|
|
if grep -q "Rootfs created" "$log_file" 2>/dev/null; then
|
|
progress=80
|
|
message="Setting up container..."
|
|
elif grep -q "Extracting rootfs" "$log_file" 2>/dev/null; then
|
|
progress=60
|
|
message="Extracting container rootfs..."
|
|
elif grep -q "Downloading Alpine" "$log_file" 2>/dev/null; then
|
|
progress=40
|
|
message="Downloading Alpine rootfs..."
|
|
elif grep -q "Installing Streamlit" "$log_file" 2>/dev/null; then
|
|
progress=20
|
|
message="Starting installation..."
|
|
else
|
|
progress=10
|
|
message="Initializing..."
|
|
fi
|
|
fi
|
|
else
|
|
status="not_started"
|
|
message="Installation has not been started"
|
|
fi
|
|
|
|
# Check if process is still running
|
|
if pgrep -f "streamlitctl install" >/dev/null 2>&1; then
|
|
status="running"
|
|
fi
|
|
|
|
json_init_obj
|
|
json_add_string "status" "$status"
|
|
json_add_int "progress" "$progress"
|
|
json_add_string "message" "$message"
|
|
json_close_obj
|
|
}
|
|
|
|
# Main RPC handler
|
|
case "$1" in
|
|
list)
|
|
cat <<-EOF
|
|
{
|
|
"get_status": {},
|
|
"get_config": {},
|
|
"save_config": {"http_port": 8501, "http_host": "str", "data_path": "str", "memory_limit": "str", "enabled": "str", "active_app": "str", "headless": "str", "browser_gather_usage_stats": "str", "theme_base": "str", "theme_primary_color": "str"},
|
|
"start": {},
|
|
"stop": {},
|
|
"restart": {},
|
|
"install": {},
|
|
"uninstall": {},
|
|
"update": {},
|
|
"get_logs": {"lines": 100},
|
|
"list_apps": {},
|
|
"get_app": {"name": "str"},
|
|
"add_app": {"name": "str", "path": "str"},
|
|
"remove_app": {"name": "str"},
|
|
"set_active_app": {"name": "str"},
|
|
"upload_app": {"name": "str", "content": "str"},
|
|
"upload_chunk": {"name": "str", "data": "str", "index": 0},
|
|
"upload_finalize": {"name": "str", "is_zip": "str"},
|
|
"test_upload": {"name": "str"},
|
|
"preview_zip": {"content": "str"},
|
|
"upload_zip": {"name": "str", "content": "str", "selected_files": []},
|
|
"get_install_progress": {},
|
|
"list_instances": {},
|
|
"add_instance": {"id": "str", "name": "str", "app": "str", "port": 8501},
|
|
"remove_instance": {"id": "str"},
|
|
"enable_instance": {"id": "str"},
|
|
"disable_instance": {"id": "str"},
|
|
"rename_app": {"id": "str", "name": "str"},
|
|
"rename_instance": {"id": "str", "name": "str"},
|
|
"get_gitea_config": {},
|
|
"save_gitea_config": {"enabled": "str", "url": "str", "user": "str", "token": "str"},
|
|
"gitea_clone": {"name": "str", "repo": "str"},
|
|
"gitea_pull": {"name": "str"},
|
|
"gitea_list_repos": {},
|
|
"get_source": {"name": "str"},
|
|
"save_source": {"name": "str", "content": "str"},
|
|
"emancipate": {"name": "str", "domain": "str"},
|
|
"get_emancipation": {"name": "str"},
|
|
"upload_and_deploy": {"name": "str", "content": "str", "is_zip": "str"},
|
|
"emancipate_instance": {"id": "str", "domain": "str"},
|
|
"unpublish": {"id": "str"},
|
|
"set_auth_required": {"id": "str", "auth_required": "str"},
|
|
"get_exposure_status": {}
|
|
}
|
|
EOF
|
|
;;
|
|
call)
|
|
case "$2" in
|
|
get_status)
|
|
get_status
|
|
;;
|
|
get_config)
|
|
get_config
|
|
;;
|
|
save_config)
|
|
save_config
|
|
;;
|
|
start)
|
|
start_service
|
|
;;
|
|
stop)
|
|
stop_service
|
|
;;
|
|
restart)
|
|
restart_service
|
|
;;
|
|
install)
|
|
install
|
|
;;
|
|
uninstall)
|
|
uninstall
|
|
;;
|
|
update)
|
|
update
|
|
;;
|
|
get_logs)
|
|
get_logs
|
|
;;
|
|
list_apps)
|
|
list_apps
|
|
;;
|
|
get_app)
|
|
get_app
|
|
;;
|
|
add_app)
|
|
add_app
|
|
;;
|
|
remove_app)
|
|
remove_app
|
|
;;
|
|
set_active_app)
|
|
set_active_app
|
|
;;
|
|
upload_app)
|
|
upload_app
|
|
;;
|
|
upload_chunk)
|
|
upload_chunk
|
|
;;
|
|
upload_finalize)
|
|
upload_finalize
|
|
;;
|
|
test_upload)
|
|
test_upload
|
|
;;
|
|
preview_zip)
|
|
preview_zip
|
|
;;
|
|
upload_zip)
|
|
upload_zip
|
|
;;
|
|
get_install_progress)
|
|
get_install_progress
|
|
;;
|
|
list_instances)
|
|
list_instances
|
|
;;
|
|
add_instance)
|
|
add_instance
|
|
;;
|
|
remove_instance)
|
|
remove_instance
|
|
;;
|
|
enable_instance)
|
|
enable_instance
|
|
;;
|
|
disable_instance)
|
|
disable_instance
|
|
;;
|
|
rename_app)
|
|
rename_app
|
|
;;
|
|
rename_instance)
|
|
rename_instance
|
|
;;
|
|
get_gitea_config)
|
|
get_gitea_config
|
|
;;
|
|
save_gitea_config)
|
|
save_gitea_config
|
|
;;
|
|
gitea_clone)
|
|
gitea_clone
|
|
;;
|
|
gitea_pull)
|
|
gitea_pull
|
|
;;
|
|
gitea_list_repos)
|
|
gitea_list_repos
|
|
;;
|
|
get_source)
|
|
get_source
|
|
;;
|
|
save_source)
|
|
save_source
|
|
;;
|
|
emancipate)
|
|
emancipate
|
|
;;
|
|
get_emancipation)
|
|
get_emancipation
|
|
;;
|
|
upload_and_deploy)
|
|
upload_and_deploy
|
|
;;
|
|
emancipate_instance)
|
|
emancipate_instance
|
|
;;
|
|
unpublish)
|
|
unpublish
|
|
;;
|
|
set_auth_required)
|
|
set_auth_required
|
|
;;
|
|
get_exposure_status)
|
|
get_exposure_status
|
|
;;
|
|
*)
|
|
json_error "Unknown method: $2"
|
|
;;
|
|
esac
|
|
;;
|
|
esac
|