secubox-openwrt/package/secubox/luci-app-metablogizer/root/usr/libexec/rpcd/luci.metablogizer
CyberMind-FR 36fbff3958 fix(metablogizer): Resolve HAProxy stability and add WAF status display
- Fixed random 404 errors caused by multiple HAProxy instances (container + host)
- Disabled host HAProxy service, container HAProxy now sole traffic handler
- Added auto-republish on upload for emancipated sites
- Added waf_enabled and emancipated fields to list_sites RPCD response
- Added WAF badge in LuCI dashboard Exposure column

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-02-25 10:19:33 +01:00

2718 lines
74 KiB
Bash
Executable File

#!/bin/sh
# SPDX-License-Identifier: GPL-2.0-only
# RPCD backend for MetaBlogizer Static Site Publisher
# Copyright (C) 2025 CyberMind.fr
. /lib/functions.sh
. /usr/share/libubox/jshn.sh
UCI_CONFIG="metablogizer"
SITES_ROOT="/srv/metablogizer/sites"
NGINX_CONTAINER="nginx"
PORT_BASE=8900
TOR_DATA="/var/lib/tor"
# Helper: Get UCI value with default
get_uci() {
local section="$1"
local option="$2"
local default="$3"
local value
value=$(uci -q get "$UCI_CONFIG.$section.$option")
echo "${value:-$default}"
}
# Runtime detection (uhttpd preferred, nginx fallback)
detect_runtime() {
local configured=$(get_uci main runtime "auto")
case "$configured" in
uhttpd) [ -x /etc/init.d/uhttpd ] && echo "uhttpd" || echo "none" ;;
nginx) lxc-info -n "$NGINX_CONTAINER" >/dev/null 2>&1 && echo "nginx" || echo "none" ;;
auto|*) [ -x /etc/init.d/uhttpd ] && echo "uhttpd" || \
(lxc-info -n "$NGINX_CONTAINER" >/dev/null 2>&1 && echo "nginx" || echo "none") ;;
esac
}
# Get next available port for uhttpd
get_next_port() {
local port=$PORT_BASE
while uci show uhttpd 2>/dev/null | grep -q "listen_http='0.0.0.0:$port'"; do
port=$((port + 1))
done
echo $port
}
# Fix permissions for web serving (755 for dirs, 644 for files)
# Also ensure proper ownership for uhttpd
fix_permissions() {
local dir="$1"
[ -d "$dir" ] || return 1
# Set umask to ensure files are world-readable
umask 022
# Fix directory permissions (755 = rwxr-xr-x)
chmod 755 "$dir"
find "$dir" -type d -exec chmod 755 {} \; 2>/dev/null
# Fix file permissions (644 = rw-r--r--)
find "$dir" -type f -exec chmod 644 {} \; 2>/dev/null
# Ensure parent directories are traversable
local parent_dir=$(dirname "$dir")
while [ "$parent_dir" != "/" ] && [ -d "$parent_dir" ]; do
chmod a+rx "$parent_dir" 2>/dev/null || true
parent_dir=$(dirname "$parent_dir")
done
}
# Check if HAProxy is available and running
haproxy_available() {
# Check if HAProxy container is running (preferred method)
if command -v lxc-info >/dev/null 2>&1; then
lxc-info -n haproxy -s 2>/dev/null | grep -q "RUNNING" && return 0
fi
# Fallback: check if haproxy process is running
pgrep haproxy >/dev/null 2>&1 && return 0
# Fallback: check if init script exists and service is enabled
if [ -x /etc/init.d/haproxy ]; then
/etc/init.d/haproxy status >/dev/null 2>&1 && return 0
fi
return 1
}
# Reload HAProxy configuration properly (with availability check)
reload_haproxy() {
if ! haproxy_available; then
logger -t metablogizer "HAProxy not available, skipping reload"
return 1
fi
# Generate new config
/usr/sbin/haproxyctl generate >/dev/null 2>&1
# Sync generated config to all locations HAProxy might read from
local src_cfg="/srv/haproxy/config/haproxy.cfg"
if [ -f "$src_cfg" ]; then
cp "$src_cfg" /etc/haproxy.cfg 2>/dev/null
[ -d /opt/haproxy/config ] && cp "$src_cfg" /opt/haproxy/config/haproxy.cfg 2>/dev/null
fi
# Sync mitmproxy routes from HAProxy config and trigger reload
# This ensures WAF routing is properly configured for new vhosts
if command -v mitmproxyctl >/dev/null 2>&1; then
mitmproxyctl sync-routes >/dev/null 2>&1
logger -t metablogizer "Synced mitmproxy routes"
fi
# Reload HAProxy
if [ -x /etc/init.d/haproxy ]; then
/etc/init.d/haproxy reload >/dev/null 2>&1
fi
}
# Add mitmproxy route for domain -> backend mapping
add_mitmproxy_route() {
local domain="$1"
local address="$2"
local port="$3"
# Add to both mitmproxy and mitmproxy-in routes files
for routes_file in /srv/mitmproxy/haproxy-routes.json /srv/mitmproxy-in/haproxy-routes.json; do
[ -f "$routes_file" ] || continue
python3 - "$routes_file" "$domain" "$address" "$port" 2>/dev/null <<'PYEOF'
import json, sys
routes_file, domain, address, port = sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4])
try:
with open(routes_file) as f:
routes = json.load(f)
routes[domain] = [address, port]
with open(routes_file, 'w') as f:
json.dump(routes, f, indent=2)
except Exception as e:
pass
PYEOF
done
}
# Add HAProxy cert entry for wildcard SSL mapping
add_haproxy_cert() {
local domain="$1"
# Extract base domain for wildcard cert (e.g., gk2.secubox.in from sub.gk2.secubox.in)
local base_domain=$(echo "$domain" | sed 's/^[^.]*\.//')
local cert_name=$(echo "$domain" | sed 's/[^a-zA-Z0-9]/_/g')
# Check if wildcard cert exists
local wildcard_cert="/opt/haproxy/certs/*.${base_domain}.pem"
if lxc-attach -n haproxy -- ls "$wildcard_cert" >/dev/null 2>&1; then
uci set "haproxy.cert_${cert_name}=cert"
uci set "haproxy.cert_${cert_name}.domain=$domain"
uci set "haproxy.cert_${cert_name}.cert_file=$wildcard_cert"
fi
}
# Get .onion address for a site if Tor hidden service exists
get_onion_address() {
local site_name="$1"
local hs_name="metablog_$(echo "$site_name" | tr -cd 'a-zA-Z0-9_')"
local hostname_file="$TOR_DATA/hidden_service_$hs_name/hostname"
if [ -f "$hostname_file" ]; then
cat "$hostname_file"
fi
}
# Check if Tor hidden service exists for site
has_tor_service() {
local site_name="$1"
local hs_name="metablog_$(echo "$site_name" | tr -cd 'a-zA-Z0-9_')"
uci -q get "tor-shield.hs_$hs_name" >/dev/null 2>&1
}
# Create Tor hidden service for a site
create_tor_hidden_service() {
local site_name="$1"
local local_port="$2"
local hs_name="metablog_$(echo "$site_name" | tr -cd 'a-zA-Z0-9_')"
# Create hidden service in tor-shield config
uci set "tor-shield.hs_$hs_name=hidden_service"
uci set "tor-shield.hs_$hs_name.name=$hs_name"
uci set "tor-shield.hs_$hs_name.enabled=1"
uci set "tor-shield.hs_$hs_name.local_port=$local_port"
uci set "tor-shield.hs_$hs_name.virtual_port=80"
uci commit tor-shield
# Restart Tor to generate .onion address
/etc/init.d/tor-shield restart >/dev/null 2>&1 &
return 0
}
# Remove Tor hidden service for a site
remove_tor_hidden_service() {
local site_name="$1"
local hs_name="metablog_$(echo "$site_name" | tr -cd 'a-zA-Z0-9_')"
uci delete "tor-shield.hs_$hs_name" 2>/dev/null
uci commit tor-shield
# Remove hidden service data directory
rm -rf "$TOR_DATA/hidden_service_$hs_name" 2>/dev/null
# Restart Tor
/etc/init.d/tor-shield restart >/dev/null 2>&1 &
}
# Check if Tor is running and ready
is_tor_ready() {
pgrep -f "/usr/sbin/tor" >/dev/null 2>&1 && \
[ -S "/var/run/tor/control" ]
}
# Status method - get overall status and list all sites
method_status() {
local enabled runtime detected_runtime nginx_running site_count
enabled=$(get_uci main enabled 0)
runtime=$(get_uci main runtime "auto")
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
NGINX_CONTAINER=$(get_uci main nginx_container "$NGINX_CONTAINER")
# Detect runtime
detected_runtime=$(detect_runtime)
# Check nginx container if using nginx
nginx_running="0"
if lxc-info -n "$NGINX_CONTAINER" -s 2>/dev/null | grep -q "RUNNING"; then
nginx_running="1"
fi
# Count sites
site_count=0
config_load "$UCI_CONFIG"
config_foreach _count_site site
site_count="$_site_count"
json_init
json_add_boolean "enabled" "$enabled"
json_add_string "runtime" "$runtime"
json_add_string "detected_runtime" "$detected_runtime"
json_add_boolean "nginx_running" "$nginx_running"
json_add_int "site_count" "$site_count"
json_add_string "sites_root" "$SITES_ROOT"
json_add_string "nginx_container" "$NGINX_CONTAINER"
json_dump
}
_site_count=0
_count_site() {
_site_count=$((_site_count + 1))
}
# List all sites with their status
method_list_sites() {
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
json_init
json_add_array "sites"
config_load "$UCI_CONFIG"
config_foreach _add_site site
json_close_array
json_dump
}
_add_site() {
local section="$1"
local name domain gitea_repo ssl enabled description tor_enabled port runtime
local has_content last_sync onion_address backend_running
config_get name "$section" name ""
config_get domain "$section" domain ""
config_get gitea_repo "$section" gitea_repo ""
config_get ssl "$section" ssl "1"
config_get enabled "$section" enabled "1"
config_get description "$section" description ""
config_get tor_enabled "$section" tor_enabled "0"
config_get port "$section" port ""
config_get runtime "$section" runtime ""
# Check if site has content
has_content="0"
if [ -d "$SITES_ROOT/$name" ] && [ -f "$SITES_ROOT/$name/index.html" ]; then
has_content="1"
fi
# Get last sync time
last_sync=""
if [ -d "$SITES_ROOT/$name/.git" ]; then
last_sync=$(cd "$SITES_ROOT/$name" && git log -1 --format="%ci" 2>/dev/null || echo "")
fi
# Get Tor .onion address if available
onion_address=""
if [ "$tor_enabled" = "1" ] || has_tor_service "$name"; then
onion_address=$(get_onion_address "$name")
fi
# Check if backend is running (uhttpd listening on port)
backend_running="0"
if [ -n "$port" ]; then
# Check if port is listening using /proc/net/tcp (hex port)
local hex_port=$(printf '%04X' "$port" 2>/dev/null)
if grep -qi ":${hex_port}" /proc/net/tcp 2>/dev/null; then
backend_running="1"
fi
fi
# Check WAF status (is site routed through mitmproxy_inspector?)
local waf_enabled="0"
local vhost_name=$(echo "$domain" | sed 's/[^a-zA-Z0-9]/_/g')
local vhost_backend=$(uci -q get "haproxy.${vhost_name}.backend" 2>/dev/null)
if [ "$vhost_backend" = "mitmproxy_inspector" ]; then
waf_enabled="1"
fi
# Check emancipated status
local emancipated=$(uci -q get "${UCI_CONFIG}.${section}.emancipated" 2>/dev/null)
[ -z "$emancipated" ] && emancipated="0"
json_add_object
json_add_string "id" "$section"
json_add_string "name" "$name"
json_add_string "domain" "$domain"
json_add_string "gitea_repo" "$gitea_repo"
json_add_string "description" "$description"
json_add_boolean "ssl" "$ssl"
json_add_boolean "enabled" "$enabled"
json_add_boolean "has_content" "$has_content"
json_add_string "last_sync" "$last_sync"
json_add_string "url" "https://$domain"
[ -n "$port" ] && json_add_int "port" "$port"
[ -n "$runtime" ] && json_add_string "runtime" "$runtime"
json_add_boolean "backend_running" "$backend_running"
json_add_boolean "waf_enabled" "$waf_enabled"
json_add_boolean "emancipated" "$emancipated"
# Tor hidden service info
json_add_boolean "tor_enabled" "$(has_tor_service "$name" && echo 1 || echo 0)"
[ -n "$onion_address" ] && json_add_string "onion_address" "$onion_address"
[ -n "$onion_address" ] && json_add_string "onion_url" "http://$onion_address"
json_close_object
}
# Create a new site with auto-vhost
method_create_site() {
local name domain gitea_repo ssl description
local section_id gitea_url
read -r input
json_load "$input"
json_get_var name name
json_get_var domain domain
json_get_var gitea_repo gitea_repo
json_get_var ssl ssl "1"
json_get_var description description ""
# Validate required fields
if [ -z "$name" ] || [ -z "$domain" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Name and domain are required"
json_dump
return
fi
# Sanitize name for section ID
section_id="site_$(echo "$name" | sed 's/[^a-zA-Z0-9]/_/g')"
# Check if site already exists
if uci -q get "$UCI_CONFIG.$section_id" >/dev/null 2>&1; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site with this name already exists"
json_dump
return
fi
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
NGINX_CONTAINER=$(get_uci main nginx_container "$NGINX_CONTAINER")
# 1. Create UCI site config
uci set "$UCI_CONFIG.$section_id=site"
uci set "$UCI_CONFIG.$section_id.name=$name"
uci set "$UCI_CONFIG.$section_id.domain=$domain"
uci set "$UCI_CONFIG.$section_id.ssl=$ssl"
uci set "$UCI_CONFIG.$section_id.enabled=1"
[ -n "$gitea_repo" ] && uci set "$UCI_CONFIG.$section_id.gitea_repo=$gitea_repo"
[ -n "$description" ] && uci set "$UCI_CONFIG.$section_id.description=$description"
# 2. Create site directory
mkdir -p "$SITES_ROOT/$name"
# 3. Clone from Gitea if repo specified
if [ -n "$gitea_repo" ]; then
gitea_url=$(uci -q get gitea.main.url || echo "http://192.168.255.1:3000")
git clone "$gitea_url/$gitea_repo.git" "$SITES_ROOT/$name" 2>/dev/null || true
fi
# 4. Fix permissions for web serving
fix_permissions "$SITES_ROOT/$name"
# 5. Create default index.html with OG tags if no content
if [ ! -f "$SITES_ROOT/$name/index.html" ]; then
cat > "$SITES_ROOT/$name/index.html" <<EOF
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>$name</title>
<meta property="og:title" content="$name">
<meta property="og:url" content="https://$domain">
<meta property="og:type" content="website">
<meta property="og:description" content="$description">
<meta name="twitter:card" content="summary">
<meta name="twitter:title" content="$name">
<meta name="twitter:url" content="https://$domain">
<style>
body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
display: flex; justify-content: center; align-items: center;
min-height: 100vh; margin: 0; background: #f5f5f5; }
.container { text-align: center; padding: 2rem; }
h1 { color: #333; }
p { color: #666; }
</style>
</head>
<body>
<div class="container">
<h1>$name</h1>
<p>Site published with MetaBlogizer</p>
</div>
</body>
</html>
EOF
fi
# 6. Detect runtime and configure accordingly
local current_runtime=$(detect_runtime)
local port=""
local server_address="127.0.0.1"
local server_port="80"
if [ "$current_runtime" = "uhttpd" ]; then
# Create uhttpd instance
port=$(get_next_port)
uci set "uhttpd.metablog_${section_id}=uhttpd"
uci set "uhttpd.metablog_${section_id}.listen_http=0.0.0.0:$port"
uci set "uhttpd.metablog_${section_id}.home=$SITES_ROOT/$name"
uci set "uhttpd.metablog_${section_id}.index_page=index.html"
uci set "uhttpd.metablog_${section_id}.error_page=/index.html"
uci commit uhttpd
/etc/init.d/uhttpd reload 2>/dev/null
# Use LAN IP for HAProxy backend (HAProxy runs in LXC and can't reach 127.0.0.1)
server_address=$(uci -q get network.lan.ipaddr || echo "192.168.255.1")
server_port="$port"
else
# Configure nginx location in container
_configure_nginx "$name"
local nginx_ip
nginx_ip=$(lxc-info -n "$NGINX_CONTAINER" -iH 2>/dev/null | head -1)
[ -n "$nginx_ip" ] && server_address="$nginx_ip"
fi
# Save port to site config
[ -n "$port" ] && uci set "$UCI_CONFIG.$section_id.port=$port"
uci set "$UCI_CONFIG.$section_id.runtime=$current_runtime"
# 7. Create HAProxy backend (if HAProxy is available)
local haproxy_configured=0
if haproxy_available; then
local backend_name="metablog_$(echo "$name" | sed 's/[^a-zA-Z0-9]/_/g')"
uci set "haproxy.$backend_name=backend"
uci set "haproxy.$backend_name.name=$backend_name"
uci set "haproxy.$backend_name.mode=http"
uci set "haproxy.$backend_name.balance=roundrobin"
uci set "haproxy.$backend_name.enabled=1"
# Create server
local server_name="${backend_name}_srv"
uci set "haproxy.$server_name=server"
uci set "haproxy.$server_name.backend=$backend_name"
uci set "haproxy.$server_name.name=srv"
uci set "haproxy.$server_name.address=$server_address"
uci set "haproxy.$server_name.port=$server_port"
uci set "haproxy.$server_name.weight=100"
uci set "haproxy.$server_name.check=1"
uci set "haproxy.$server_name.enabled=1"
# 8. Create HAProxy vhost (route through mitmproxy WAF)
local vhost_name=$(echo "$domain" | sed 's/[^a-zA-Z0-9]/_/g')
local acme_val="0"
[ "$ssl" = "1" ] && acme_val="1"
uci set "haproxy.$vhost_name=vhost"
uci set "haproxy.$vhost_name.domain=$domain"
uci set "haproxy.$vhost_name.backend=mitmproxy_inspector"
uci set "haproxy.$vhost_name.original_backend=$backend_name"
uci set "haproxy.$vhost_name.ssl=$ssl"
uci set "haproxy.$vhost_name.ssl_redirect=$ssl"
uci set "haproxy.$vhost_name.acme=$acme_val"
uci set "haproxy.$vhost_name.enabled=1"
uci set "haproxy.$vhost_name.priority=50"
# Add cert entry for wildcard SSL
add_haproxy_cert "$domain"
uci commit haproxy
# Add mitmproxy route for WAF inspection
add_mitmproxy_route "$domain" "$server_address" "$server_port"
# Regenerate HAProxy config and reload
reload_haproxy &
haproxy_configured=1
else
logger -t metablogizer "HAProxy not available, site created without proxy config"
fi
uci commit "$UCI_CONFIG"
json_init
json_add_boolean "success" 1
json_add_string "id" "$section_id"
json_add_string "url" "https://$domain"
json_add_string "name" "$name"
json_add_string "domain" "$domain"
json_dump
}
# Configure nginx location for a site
_configure_nginx() {
local site_name="$1"
local nginx_conf="/var/lib/lxc/$NGINX_CONTAINER/rootfs/etc/nginx/sites.d"
# Create nginx config for site
mkdir -p "$nginx_conf"
cat > "$nginx_conf/metablog-$site_name.conf" <<EOF
location /$site_name/ {
alias /srv/sites/$site_name/;
index index.html;
try_files \$uri \$uri/ /index.html;
}
EOF
# Reload nginx in container
lxc-attach -n "$NGINX_CONTAINER" -- nginx -s reload 2>/dev/null || true
}
# Delete a site and cleanup
method_delete_site() {
local id
read -r input
json_load "$input"
json_get_var id id
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name domain
name=$(get_uci "$id" name "")
domain=$(get_uci "$id" domain "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
NGINX_CONTAINER=$(get_uci main nginx_container "$NGINX_CONTAINER")
# Get site runtime
local site_runtime=$(get_uci "$id" runtime "")
# 1. Delete HAProxy vhost (if HAProxy config exists)
if uci -q get haproxy >/dev/null 2>&1; then
local vhost_id=$(echo "$domain" | sed 's/[^a-zA-Z0-9]/_/g')
uci delete "haproxy.$vhost_id" 2>/dev/null
# 2. Delete HAProxy backend and server
local backend_name="metablog_$(echo "$name" | sed 's/[^a-zA-Z0-9]/_/g')"
uci delete "haproxy.$backend_name" 2>/dev/null
uci delete "haproxy.${backend_name}_srv" 2>/dev/null
uci commit haproxy
# Only reload if HAProxy is actually running
if haproxy_available; then
reload_haproxy &
fi
fi
# 3. Remove runtime config
if [ "$site_runtime" = "uhttpd" ]; then
# Remove uhttpd instance
uci delete "uhttpd.metablog_$id" 2>/dev/null
uci commit uhttpd
/etc/init.d/uhttpd reload 2>/dev/null
else
# Remove nginx config
rm -f "/var/lib/lxc/$NGINX_CONTAINER/rootfs/etc/nginx/sites.d/metablog-$name.conf"
lxc-attach -n "$NGINX_CONTAINER" -- nginx -s reload 2>/dev/null || true
fi
# 4. Remove site directory
rm -rf "$SITES_ROOT/$name"
# 5. Delete UCI config
uci delete "$UCI_CONFIG.$id"
uci commit "$UCI_CONFIG"
json_init
json_add_boolean "success" 1
json_dump
}
# Sync site from Gitea (git pull)
method_sync_site() {
local id
read -r input
json_load "$input"
json_get_var id id
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name gitea_repo
name=$(get_uci "$id" name "")
gitea_repo=$(get_uci "$id" gitea_repo "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
local site_path="$SITES_ROOT/$name"
if [ -d "$site_path/.git" ]; then
# Pull latest changes
cd "$site_path"
local result
result=$(git pull 2>&1)
local rc=$?
# Fix permissions after pull
fix_permissions "$site_path"
json_init
if [ $rc -eq 0 ]; then
json_add_boolean "success" 1
json_add_string "message" "$result"
else
json_add_boolean "success" 0
json_add_string "error" "$result"
fi
json_dump
elif [ -n "$gitea_repo" ]; then
# Clone if not exists
local gitea_url
gitea_url=$(uci -q get gitea.main.url || echo "http://192.168.255.1:3000")
rm -rf "$site_path"
local result
result=$(git clone "$gitea_url/$gitea_repo.git" "$site_path" 2>&1)
local rc=$?
# Fix permissions after clone
fix_permissions "$site_path"
json_init
if [ $rc -eq 0 ]; then
json_add_boolean "success" 1
json_add_string "message" "Cloned from $gitea_repo"
else
json_add_boolean "success" 0
json_add_string "error" "$result"
fi
json_dump
else
json_init
json_add_boolean "success" 0
json_add_string "error" "No git repository configured for this site"
json_dump
fi
}
# Get publish info for QR code generation
method_get_publish_info() {
local id
read -r input
json_load "$input"
json_get_var id id
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name domain description ssl
name=$(get_uci "$id" name "")
domain=$(get_uci "$id" domain "")
description=$(get_uci "$id" description "")
ssl=$(get_uci "$id" ssl "1")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
local protocol="http"
[ "$ssl" = "1" ] && protocol="https"
local url="${protocol}://${domain}"
json_init
json_add_boolean "success" 1
json_add_string "id" "$id"
json_add_string "name" "$name"
json_add_string "domain" "$domain"
json_add_string "url" "$url"
json_add_string "description" "$description"
json_add_string "title" "$name - Published with SecuBox"
json_dump
}
# Get site details
method_get_site() {
local id
read -r input
json_load "$input"
json_get_var id id
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name domain gitea_repo ssl enabled description
name=$(get_uci "$id" name "")
domain=$(get_uci "$id" domain "")
gitea_repo=$(get_uci "$id" gitea_repo "")
ssl=$(get_uci "$id" ssl "1")
enabled=$(get_uci "$id" enabled "1")
description=$(get_uci "$id" description "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
local has_content last_sync
has_content="0"
if [ -d "$SITES_ROOT/$name" ] && [ -f "$SITES_ROOT/$name/index.html" ]; then
has_content="1"
fi
last_sync=""
if [ -d "$SITES_ROOT/$name/.git" ]; then
last_sync=$(cd "$SITES_ROOT/$name" && git log -1 --format="%ci" 2>/dev/null || echo "")
fi
local protocol="http"
[ "$ssl" = "1" ] && protocol="https"
json_init
json_add_boolean "success" 1
json_add_string "id" "$id"
json_add_string "name" "$name"
json_add_string "domain" "$domain"
json_add_string "gitea_repo" "$gitea_repo"
json_add_string "description" "$description"
json_add_boolean "ssl" "$ssl"
json_add_boolean "enabled" "$enabled"
json_add_boolean "has_content" "$has_content"
json_add_string "last_sync" "$last_sync"
json_add_string "url" "${protocol}://${domain}"
json_dump
}
# Update site settings
method_update_site() {
local id name domain gitea_repo ssl enabled description
read -r input
json_load "$input"
json_get_var id id
json_get_var name name
json_get_var domain domain
json_get_var gitea_repo gitea_repo
json_get_var ssl ssl
json_get_var enabled enabled
json_get_var description description
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
# Check site exists and get current values
local current_name current_domain
current_name=$(get_uci "$id" name "")
current_domain=$(get_uci "$id" domain "")
if [ -z "$current_name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
# Track if domain changed
local domain_changed=0
if [ -n "$domain" ] && [ "$domain" != "$current_domain" ]; then
domain_changed=1
fi
[ -n "$name" ] && uci set "$UCI_CONFIG.$id.name=$name"
[ -n "$domain" ] && uci set "$UCI_CONFIG.$id.domain=$domain"
[ -n "$gitea_repo" ] && uci set "$UCI_CONFIG.$id.gitea_repo=$gitea_repo"
[ -n "$ssl" ] && uci set "$UCI_CONFIG.$id.ssl=$ssl"
[ -n "$enabled" ] && uci set "$UCI_CONFIG.$id.enabled=$enabled"
[ -n "$description" ] && uci set "$UCI_CONFIG.$id.description=$description"
uci commit "$UCI_CONFIG"
# If domain changed and vhost exists, republish
if [ "$domain_changed" = "1" ] && [ -n "$current_domain" ]; then
# Remove old vhost
local old_vhost_name=$(echo "$current_domain" | sed 's/[^a-zA-Z0-9]/_/g')
uci delete "haproxy.$old_vhost_name" 2>/dev/null
uci commit haproxy
# Create new vhost if new domain is set
if [ -n "$domain" ]; then
local port=$(get_uci "$id" port "")
if [ -n "$port" ]; then
local site_name="${name:-$current_name}"
local backend_name="meta_$(echo "$site_name" | tr -cd 'a-zA-Z0-9_' | head -c 20)"
local vhost_name=$(echo "$domain" | sed 's/[^a-zA-Z0-9]/_/g')
# Create vhost
uci set "haproxy.$vhost_name=vhost"
uci set "haproxy.$vhost_name.domain=$domain"
uci set "haproxy.$vhost_name.backend=$backend_name"
uci set "haproxy.$vhost_name.priority=50"
uci set "haproxy.$vhost_name.ssl=1"
uci set "haproxy.$vhost_name.ssl_redirect=1"
uci set "haproxy.$vhost_name.acme=1"
uci set "haproxy.$vhost_name.enabled=1"
# Add cert entry for wildcard SSL
add_haproxy_cert "$domain"
uci commit haproxy
# Regenerate and reload HAProxy
reload_haproxy
fi
fi
fi
json_init
json_add_boolean "success" 1
json_add_boolean "republished" "$domain_changed"
json_dump
}
# Upload file to site (single request for small files)
method_upload_file() {
local id filename content
read -r input
json_load "$input"
json_get_var id id
json_get_var filename filename
json_get_var content content
if [ -z "$id" ] || [ -z "$filename" ] || [ -z "$content" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing required fields (id, filename, content)"
json_dump
return
fi
local name
name=$(get_uci "$id" name "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
local site_path="$SITES_ROOT/$name"
local file_path="$site_path/$filename"
# Create directory structure if needed with proper permissions
local dir_path=$(dirname "$file_path")
# CRITICAL: Set umask BEFORE any file operations
umask 022
mkdir -p "$dir_path"
chmod 755 "$dir_path"
# Write file - umask 022 ensures 644 permissions
echo "$content" | base64 -d > "$file_path" 2>/dev/null
local rc=$?
# ALWAYS set readable permissions immediately after write
chmod 644 "$file_path" 2>/dev/null
# Also ensure parent dirs are traversable
chmod 755 "$site_path" 2>/dev/null
if [ $rc -eq 0 ]; then
# Fix permissions for entire site directory
fix_permissions "$site_path"
json_init
json_add_boolean "success" 1
json_add_string "filename" "$filename"
json_add_string "path" "$file_path"
json_dump
else
json_init
json_add_boolean "success" 0
json_add_string "error" "Failed to write file"
json_dump
fi
}
# Chunked upload: receive base64 chunk and append to staging file
# Used for files > 40KB to bypass uhttpd's 64KB JSON limit
method_upload_chunk() {
local tmpinput="/tmp/rpcd_mb_chunk_$$.json"
cat > "$tmpinput"
local upload_id chunk_data chunk_index
upload_id=$(jsonfilter -i "$tmpinput" -e '@.upload_id' 2>/dev/null)
chunk_data=$(jsonfilter -i "$tmpinput" -e '@.data' 2>/dev/null)
chunk_index=$(jsonfilter -i "$tmpinput" -e '@.index' 2>/dev/null)
rm -f "$tmpinput"
# Sanitize upload_id (alphanumeric + underscore only)
upload_id=$(echo "$upload_id" | sed 's/[^a-zA-Z0-9_]/_/g; s/^_*//; s/_*$//')
if [ -z "$upload_id" ] || [ -z "$chunk_data" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing upload_id or data"
json_dump
return
fi
local staging="/tmp/metablogizer_upload_${upload_id}.b64"
# First chunk: create new file; subsequent: append
if [ "$chunk_index" = "0" ]; then
printf '%s' "$chunk_data" > "$staging"
else
printf '%s' "$chunk_data" >> "$staging"
fi
json_init
json_add_boolean "success" 1
json_add_string "message" "Chunk $chunk_index received"
json_dump
}
# Finalize chunked upload: decode accumulated base64 and save to site
method_upload_finalize() {
local tmpinput="/tmp/rpcd_mb_finalize_$$.json"
cat > "$tmpinput"
local upload_id site_id filename
upload_id=$(jsonfilter -i "$tmpinput" -e '@.upload_id' 2>/dev/null)
site_id=$(jsonfilter -i "$tmpinput" -e '@.site_id' 2>/dev/null)
filename=$(jsonfilter -i "$tmpinput" -e '@.filename' 2>/dev/null)
rm -f "$tmpinput"
# Sanitize upload_id
upload_id=$(echo "$upload_id" | sed 's/[^a-zA-Z0-9_]/_/g; s/^_*//; s/_*$//')
if [ -z "$upload_id" ] || [ -z "$site_id" ] || [ -z "$filename" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing upload_id, site_id, or filename"
json_dump
return
fi
local staging="/tmp/metablogizer_upload_${upload_id}.b64"
if [ ! -s "$staging" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "No upload data found for $upload_id"
json_dump
return
fi
# Get site info
local name
name=$(get_uci "$site_id" name "")
if [ -z "$name" ]; then
rm -f "$staging"
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
local site_path="$SITES_ROOT/$name"
local file_path="$site_path/$filename"
local dir_path=$(dirname "$file_path")
# CRITICAL: Set umask BEFORE any file operations
umask 022
mkdir -p "$dir_path"
chmod 755 "$dir_path"
# Decode base64 staging file to final destination
base64 -d < "$staging" > "$file_path" 2>/dev/null
local rc=$?
rm -f "$staging"
# Set permissions
chmod 644 "$file_path" 2>/dev/null
chmod 755 "$site_path" 2>/dev/null
if [ $rc -eq 0 ]; then
fix_permissions "$site_path"
# Auto-push to Gitea if configured (background, use site name not UCI section id)
metablogizerctl gitea push "$name" >/dev/null 2>&1 &
# Auto-republish if site is emancipated (ensures HAProxy routing works after upload)
local is_emancipated=$(get_uci "$site_id" emancipated "0")
if [ "$is_emancipated" = "1" ]; then
metablogizerctl publish "$name" >/dev/null 2>&1 &
fi
json_init
json_add_boolean "success" 1
json_add_string "filename" "$filename"
json_add_string "path" "$file_path"
json_dump
else
json_init
json_add_boolean "success" 0
json_add_string "error" "Failed to decode and write file"
json_dump
fi
}
# Create site from chunked upload (for large files)
method_create_site_from_upload() {
local tmpinput="/tmp/rpcd_mb_create_upload_$$.json"
cat > "$tmpinput"
local upload_id name domain is_zip
upload_id=$(jsonfilter -i "$tmpinput" -e '@.upload_id' 2>/dev/null)
name=$(jsonfilter -i "$tmpinput" -e '@.name' 2>/dev/null)
domain=$(jsonfilter -i "$tmpinput" -e '@.domain' 2>/dev/null)
is_zip=$(jsonfilter -i "$tmpinput" -e '@.is_zip' 2>/dev/null)
rm -f "$tmpinput"
# Sanitize upload_id
upload_id=$(echo "$upload_id" | sed 's/[^a-zA-Z0-9_]/_/g; s/^_*//; s/_*$//')
if [ -z "$upload_id" ] || [ -z "$name" ] || [ -z "$domain" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing upload_id, name, or domain"
json_dump
return
fi
local staging="/tmp/metablogizer_upload_${upload_id}.b64"
if [ ! -s "$staging" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "No upload data found for $upload_id"
json_dump
return
fi
# Sanitize name
local section_id="site_$(echo "$name" | sed 's/[^a-zA-Z0-9]/_/g')"
# Check if site already exists
if uci -q get "$UCI_CONFIG.$section_id" >/dev/null 2>&1; then
rm -f "$staging"
json_init
json_add_boolean "success" 0
json_add_string "error" "Site with this name already exists"
json_dump
return
fi
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
# Create site directory
mkdir -p "$SITES_ROOT/$name"
umask 022
# Decode staged content and save
if [ "$is_zip" = "1" ]; then
local tmpzip="/tmp/metablog_upload_$$.zip"
base64 -d < "$staging" > "$tmpzip" 2>/dev/null
unzip -o "$tmpzip" -d "$SITES_ROOT/$name" >/dev/null 2>&1
rm -f "$tmpzip"
else
base64 -d < "$staging" > "$SITES_ROOT/$name/index.html" 2>/dev/null
fi
rm -f "$staging"
# Fix permissions
fix_permissions "$SITES_ROOT/$name"
# Create default index if none exists
if [ ! -f "$SITES_ROOT/$name/index.html" ]; then
cat > "$SITES_ROOT/$name/index.html" <<EOF
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>$name</title>
</head>
<body>
<h1>$name</h1>
<p>Site published with MetaBlogizer</p>
</body>
</html>
EOF
chmod 644 "$SITES_ROOT/$name/index.html"
fi
# Get next port and create uhttpd instance
local port=$(get_next_port)
local server_address=$(uci -q get network.lan.ipaddr || echo "192.168.255.1")
uci set "uhttpd.metablog_${section_id}=uhttpd"
uci set "uhttpd.metablog_${section_id}.listen_http=0.0.0.0:$port"
uci set "uhttpd.metablog_${section_id}.home=$SITES_ROOT/$name"
uci set "uhttpd.metablog_${section_id}.index_page=index.html"
uci set "uhttpd.metablog_${section_id}.error_page=/index.html"
uci commit uhttpd
/etc/init.d/uhttpd reload 2>/dev/null
# Create UCI site config
uci set "$UCI_CONFIG.$section_id=site"
uci set "$UCI_CONFIG.$section_id.name=$name"
uci set "$UCI_CONFIG.$section_id.domain=$domain"
uci set "$UCI_CONFIG.$section_id.ssl=1"
uci set "$UCI_CONFIG.$section_id.enabled=1"
uci set "$UCI_CONFIG.$section_id.port=$port"
uci set "$UCI_CONFIG.$section_id.runtime=uhttpd"
# Create HAProxy backend if available
if haproxy_available; then
local backend_name="metablog_$(echo "$name" | sed 's/[^a-zA-Z0-9]/_/g')"
uci set "haproxy.$backend_name=backend"
uci set "haproxy.$backend_name.name=$backend_name"
uci set "haproxy.$backend_name.mode=http"
uci set "haproxy.$backend_name.balance=roundrobin"
uci set "haproxy.$backend_name.enabled=1"
local server_name="${backend_name}_srv"
uci set "haproxy.$server_name=server"
uci set "haproxy.$server_name.backend=$backend_name"
uci set "haproxy.$server_name.name=srv"
uci set "haproxy.$server_name.address=$server_address"
uci set "haproxy.$server_name.port=$port"
uci set "haproxy.$server_name.weight=100"
uci set "haproxy.$server_name.check=1"
uci set "haproxy.$server_name.enabled=1"
# Create HAProxy vhost (route through mitmproxy WAF)
local vhost_name=$(echo "$domain" | sed 's/[^a-zA-Z0-9]/_/g')
uci set "haproxy.$vhost_name=vhost"
uci set "haproxy.$vhost_name.domain=$domain"
uci set "haproxy.$vhost_name.backend=mitmproxy_inspector"
uci set "haproxy.$vhost_name.original_backend=$backend_name"
uci set "haproxy.$vhost_name.ssl=1"
uci set "haproxy.$vhost_name.ssl_redirect=1"
uci set "haproxy.$vhost_name.acme=1"
uci set "haproxy.$vhost_name.enabled=1"
uci set "haproxy.$vhost_name.priority=50"
# Add cert entry for wildcard SSL
add_haproxy_cert "$domain"
uci commit haproxy
# Add mitmproxy route for WAF inspection
add_mitmproxy_route "$domain" "$server_address" "$port"
reload_haproxy &
fi
uci commit "$UCI_CONFIG"
json_init
json_add_boolean "success" 1
json_add_string "id" "$section_id"
json_add_string "name" "$name"
json_add_string "domain" "$domain"
json_add_int "port" "$port"
json_add_string "url" "https://$domain"
json_dump
}
# List files in a site
method_list_files() {
local id
read -r input
json_load "$input"
json_get_var id id
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name
name=$(get_uci "$id" name "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
local site_path="$SITES_ROOT/$name"
json_init
json_add_boolean "success" 1
json_add_array "files"
if [ -d "$site_path" ]; then
find "$site_path" -type f 2>/dev/null | while read -r file; do
local rel_path="${file#$site_path/}"
local size=$(stat -c%s "$file" 2>/dev/null || echo "0")
json_add_object ""
json_add_string "name" "$rel_path"
json_add_int "size" "$size"
json_close_object
done
fi
json_close_array
json_dump
}
# Get global settings
method_get_settings() {
json_init
json_add_boolean "enabled" "$(get_uci main enabled 0)"
json_add_string "runtime" "$(get_uci main runtime auto)"
json_add_string "detected_runtime" "$(detect_runtime)"
json_add_string "nginx_container" "$(get_uci main nginx_container nginx)"
json_add_string "sites_root" "$(get_uci main sites_root /srv/metablogizer/sites)"
json_add_string "gitea_url" "$(get_uci main gitea_url http://localhost:3000)"
json_dump
}
# Helper: Check DNS resolution for a domain (KISS approach)
# Returns the resolved IP address, not the DNS server
check_dns_resolution() {
local domain="$1"
local resolved_ip=""
# Method 1: Use external DNS API (most reliable, avoids local resolver issues)
resolved_ip=$(wget -qO- -T 3 "https://dns.google/resolve?name=${domain}&type=A" 2>/dev/null | \
jsonfilter -e '@.Answer[0].data' 2>/dev/null)
# Method 2: Fallback to nslookup with careful parsing
if [ -z "$resolved_ip" ] && command -v nslookup >/dev/null 2>&1; then
# Get last Address line (skip DNS server which comes first)
resolved_ip=$(nslookup "$domain" 2>/dev/null | \
grep "Address" | tail -1 | \
sed 's/.*: *//' | sed 's/#.*//' | \
grep -v '^127\.')
fi
# Method 3: Fallback to host command
if [ -z "$resolved_ip" ] && command -v host >/dev/null 2>&1; then
resolved_ip=$(host -t A "$domain" 2>/dev/null | \
grep "has address" | head -1 | awk '{print $NF}')
fi
echo "$resolved_ip"
}
# Helper: Get public IPv4 address
get_public_ipv4() {
local ip=""
ip=$(wget -qO- -T 5 "http://ipv4.icanhazip.com" 2>/dev/null | tr -d '\n')
[ -z "$ip" ] && ip=$(wget -qO- -T 5 "http://api.ipify.org" 2>/dev/null | tr -d '\n')
echo "$ip"
}
# Helper: Check certificate expiry (BusyBox compatible)
check_cert_expiry() {
local domain="$1"
local cert_file=""
# Check multiple possible certificate locations
# 1. HAProxy LXC container certs
if [ -f "/srv/lxc/haproxy/rootfs/srv/haproxy/certs/${domain}.pem" ]; then
cert_file="/srv/lxc/haproxy/rootfs/srv/haproxy/certs/${domain}.pem"
# 2. HAProxy host path (if not containerized)
elif [ -f "/srv/haproxy/certs/${domain}.pem" ]; then
cert_file="/srv/haproxy/certs/${domain}.pem"
# 3. ACME shared certs
elif [ -f "/usr/share/haproxy/certs/${domain}.pem" ]; then
cert_file="/usr/share/haproxy/certs/${domain}.pem"
# 4. ACME ECC certs
elif [ -f "/etc/acme/${domain}_ecc/${domain}.cer" ]; then
cert_file="/etc/acme/${domain}_ecc/${domain}.cer"
# 5. ACME RSA certs
elif [ -f "/etc/acme/${domain}/${domain}.cer" ]; then
cert_file="/etc/acme/${domain}/${domain}.cer"
# 6. ACME fullchain
elif [ -f "/etc/acme/${domain}_ecc/fullchain.cer" ]; then
cert_file="/etc/acme/${domain}_ecc/fullchain.cer"
elif [ -f "/etc/acme/${domain}/fullchain.cer" ]; then
cert_file="/etc/acme/${domain}/fullchain.cer"
fi
if [ -z "$cert_file" ] || [ ! -f "$cert_file" ]; then
return 1
fi
# Use openssl x509 -checkend to determine days until expiry
# This is BusyBox compatible and doesn't rely on date parsing
local days=0
# Check if certificate is already expired
if ! openssl x509 -checkend 0 -noout -in "$cert_file" >/dev/null 2>&1; then
echo "-1"
return 0
fi
# Binary search to find approximate days until expiry (0-730 days range)
local low=0 high=730 mid seconds
while [ $low -lt $high ]; do
mid=$(( (low + high + 1) / 2 ))
seconds=$((mid * 86400))
if openssl x509 -checkend "$seconds" -noout -in "$cert_file" >/dev/null 2>&1; then
low=$mid
else
high=$((mid - 1))
fi
done
echo "$low"
return 0
}
# Helper: HTTP health check - returns status code
check_http_health() {
local url="$1"
local timeout="${2:-5}"
local status_code=""
# Use wget to get HTTP status code (available on OpenWrt)
# wget -S prints headers, we extract the status code
status_code=$(wget --spider -S -T "$timeout" -t 1 "$url" 2>&1 | \
grep "HTTP/" | tail -1 | awk '{print $2}')
# If wget spider failed, try with -O /dev/null
if [ -z "$status_code" ]; then
status_code=$(wget -q -O /dev/null -S -T "$timeout" -t 1 "$url" 2>&1 | \
grep "HTTP/" | tail -1 | awk '{print $2}')
fi
echo "${status_code:-0}"
}
# Helper: Check backend (local uhttpd) and frontend (HAProxy) health
check_site_http_health() {
local port="$1"
local domain="$2"
local ssl="$3"
local backend_code="" frontend_code=""
# Check backend (local uhttpd instance)
if [ -n "$port" ]; then
backend_code=$(check_http_health "http://127.0.0.1:${port}/" 3)
fi
# Check frontend (through HAProxy)
if [ -n "$domain" ]; then
local protocol="http"
[ "$ssl" = "1" ] && protocol="https"
frontend_code=$(check_http_health "${protocol}://${domain}/" 5)
fi
echo "${backend_code:-0}:${frontend_code:-0}"
}
# Get hosting status for all sites with DNS and cert health
method_get_hosting_status() {
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
# Get public IP once
local public_ip
public_ip=$(get_public_ipv4)
json_init
json_add_boolean "success" 1
json_add_string "public_ip" "$public_ip"
# HAProxy status
local haproxy_running="stopped"
if lxc-info -n haproxy -s 2>/dev/null | grep -q "RUNNING"; then
haproxy_running="running"
fi
json_add_string "haproxy_status" "$haproxy_running"
json_add_array "sites"
config_load "$UCI_CONFIG"
config_foreach _add_site_health site "$public_ip"
json_close_array
json_dump
}
_add_site_health() {
local section="$1"
local public_ip="$2"
local name domain ssl enabled has_content port runtime
config_get name "$section" name ""
config_get domain "$section" domain ""
config_get ssl "$section" ssl "1"
config_get enabled "$section" enabled "1"
config_get port "$section" port ""
config_get runtime "$section" runtime ""
[ -z "$name" ] && return
# Check content
has_content="0"
if [ -d "$SITES_ROOT/$name" ] && [ -f "$SITES_ROOT/$name/index.html" ]; then
has_content="1"
fi
json_add_object
json_add_string "id" "$section"
json_add_string "name" "$name"
json_add_string "domain" "$domain"
json_add_boolean "enabled" "$enabled"
json_add_boolean "has_content" "$has_content"
[ -n "$port" ] && json_add_int "port" "$port"
json_add_string "runtime" "$runtime"
# DNS check
local dns_status="none"
if [ -n "$domain" ]; then
local resolved_ip
resolved_ip=$(check_dns_resolution "$domain")
if [ -n "$resolved_ip" ]; then
json_add_string "dns_ip" "$resolved_ip"
# Check if resolves to public IP
case "$resolved_ip" in
10.*|172.16.*|172.17.*|172.18.*|172.19.*|172.2*|172.30.*|172.31.*|192.168.*)
dns_status="private"
;;
*)
if [ "$resolved_ip" = "$public_ip" ]; then
dns_status="ok"
else
dns_status="mismatch"
fi
;;
esac
else
dns_status="failed"
fi
json_add_string "dns_status" "$dns_status"
# Certificate check
if [ "$ssl" = "1" ]; then
local days_left
days_left=$(check_cert_expiry "$domain")
if [ -n "$days_left" ]; then
if [ "$days_left" -lt 0 ]; then
json_add_string "cert_status" "expired"
elif [ "$days_left" -lt 7 ]; then
json_add_string "cert_status" "critical"
elif [ "$days_left" -lt 30 ]; then
json_add_string "cert_status" "warning"
else
json_add_string "cert_status" "ok"
fi
json_add_int "cert_days" "$days_left"
else
json_add_string "cert_status" "missing"
fi
else
json_add_string "cert_status" "none"
fi
else
json_add_string "dns_status" "none"
json_add_string "cert_status" "none"
fi
# HTTP health check (backend and frontend)
# Skip frontend check if DNS is not pointing to us (avoids timeouts)
local check_domain=""
[ "$dns_status" = "ok" ] && check_domain="$domain"
if [ -n "$port" ] || [ -n "$check_domain" ]; then
local http_result backend_code frontend_code
http_result=$(check_site_http_health "$port" "$check_domain" "$ssl")
backend_code="${http_result%%:*}"
frontend_code="${http_result##*:}"
# Backend status (local uhttpd)
if [ -n "$port" ]; then
json_add_int "http_backend" "$backend_code"
if [ "$backend_code" = "200" ]; then
json_add_string "backend_status" "ok"
elif [ "$backend_code" = "0" ]; then
json_add_string "backend_status" "down"
else
json_add_string "backend_status" "error"
fi
fi
# Frontend status (HAProxy)
if [ -n "$domain" ]; then
if [ -n "$check_domain" ]; then
json_add_int "http_frontend" "$frontend_code"
if [ "$frontend_code" = "200" ]; then
json_add_string "frontend_status" "ok"
elif [ "$frontend_code" = "0" ]; then
json_add_string "frontend_status" "down"
elif [ "$frontend_code" = "503" ]; then
json_add_string "frontend_status" "unavailable"
elif [ "$frontend_code" -ge 500 ] 2>/dev/null; then
json_add_string "frontend_status" "error"
elif [ "$frontend_code" -ge 400 ] 2>/dev/null; then
json_add_string "frontend_status" "client_error"
elif [ "$frontend_code" -ge 300 ] 2>/dev/null; then
json_add_string "frontend_status" "redirect"
else
json_add_string "frontend_status" "unknown"
fi
else
# DNS mismatch - skip external check to avoid timeout
json_add_int "http_frontend" 0
json_add_string "frontend_status" "dns_mismatch"
fi
fi
fi
# Publish status
local publish_status="draft"
if [ "$enabled" = "1" ] && [ "$has_content" = "1" ]; then
publish_status="published"
elif [ "$enabled" = "1" ]; then
publish_status="pending"
fi
json_add_string "publish_status" "$publish_status"
# URL
local protocol="http"
[ "$ssl" = "1" ] && protocol="https"
json_add_string "url" "${protocol}://${domain}"
json_close_object
}
# Check health for single site
method_check_site_health() {
local id
read -r input
json_load "$input"
json_get_var id id
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name domain ssl port
name=$(get_uci "$id" name "")
domain=$(get_uci "$id" domain "")
ssl=$(get_uci "$id" ssl "1")
port=$(get_uci "$id" port "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
# Get public IP
local public_ip
public_ip=$(get_public_ipv4)
json_init
json_add_boolean "success" 1
json_add_string "id" "$id"
json_add_string "name" "$name"
json_add_string "domain" "$domain"
json_add_string "public_ip" "$public_ip"
# DNS check
json_add_object "dns"
if [ -n "$domain" ]; then
local resolved_ip
resolved_ip=$(check_dns_resolution "$domain")
if [ -n "$resolved_ip" ]; then
json_add_string "resolved_ip" "$resolved_ip"
case "$resolved_ip" in
10.*|172.16.*|172.17.*|172.18.*|172.19.*|172.2*|172.30.*|172.31.*|192.168.*)
json_add_string "status" "private"
json_add_string "message" "DNS points to private IP"
;;
*)
if [ "$resolved_ip" = "$public_ip" ]; then
json_add_string "status" "ok"
else
json_add_string "status" "mismatch"
json_add_string "expected" "$public_ip"
fi
;;
esac
else
json_add_string "status" "failed"
json_add_string "message" "DNS resolution failed"
fi
else
json_add_string "status" "none"
fi
json_close_object
# Certificate check
json_add_object "certificate"
if [ -n "$domain" ] && [ "$ssl" = "1" ]; then
local days_left
days_left=$(check_cert_expiry "$domain")
if [ -n "$days_left" ]; then
json_add_int "days_left" "$days_left"
if [ "$days_left" -lt 0 ]; then
json_add_string "status" "expired"
elif [ "$days_left" -lt 7 ]; then
json_add_string "status" "critical"
elif [ "$days_left" -lt 30 ]; then
json_add_string "status" "warning"
else
json_add_string "status" "ok"
fi
else
json_add_string "status" "missing"
fi
else
json_add_string "status" "none"
fi
json_close_object
# Content check
json_add_object "content"
if [ -d "$SITES_ROOT/$name" ]; then
json_add_boolean "exists" 1
local file_count
file_count=$(find "$SITES_ROOT/$name" -type f 2>/dev/null | wc -l)
json_add_int "file_count" "$file_count"
if [ -f "$SITES_ROOT/$name/index.html" ]; then
json_add_boolean "has_index" 1
else
json_add_boolean "has_index" 0
fi
else
json_add_boolean "exists" 0
fi
json_close_object
# HAProxy status
json_add_object "haproxy"
if lxc-info -n haproxy -s 2>/dev/null | grep -q "RUNNING"; then
json_add_string "status" "running"
else
json_add_string "status" "stopped"
fi
json_close_object
# HTTP health check (backend and frontend)
json_add_object "http"
if [ -n "$port" ] || [ -n "$domain" ]; then
local http_result backend_code frontend_code
http_result=$(check_site_http_health "$port" "$domain" "$ssl")
backend_code="${http_result%%:*}"
frontend_code="${http_result##*:}"
# Backend (local uhttpd)
if [ -n "$port" ]; then
json_add_object "backend"
json_add_int "code" "$backend_code"
json_add_string "url" "http://127.0.0.1:${port}/"
if [ "$backend_code" = "200" ]; then
json_add_string "status" "ok"
elif [ "$backend_code" = "0" ]; then
json_add_string "status" "down"
json_add_string "message" "Connection failed"
else
json_add_string "status" "error"
json_add_string "message" "HTTP $backend_code"
fi
json_close_object
fi
# Frontend (through HAProxy)
if [ -n "$domain" ]; then
local protocol="http"
[ "$ssl" = "1" ] && protocol="https"
json_add_object "frontend"
json_add_int "code" "$frontend_code"
json_add_string "url" "${protocol}://${domain}/"
if [ "$frontend_code" = "200" ]; then
json_add_string "status" "ok"
elif [ "$frontend_code" = "0" ]; then
json_add_string "status" "down"
json_add_string "message" "Connection failed"
elif [ "$frontend_code" = "503" ]; then
json_add_string "status" "unavailable"
json_add_string "message" "Service unavailable (backend down)"
elif [ "$frontend_code" -ge 500 ] 2>/dev/null; then
json_add_string "status" "error"
json_add_string "message" "Server error $frontend_code"
elif [ "$frontend_code" -ge 400 ] 2>/dev/null; then
json_add_string "status" "client_error"
json_add_string "message" "HTTP $frontend_code"
elif [ "$frontend_code" -ge 300 ] 2>/dev/null; then
json_add_string "status" "redirect"
json_add_string "message" "Redirecting ($frontend_code)"
else
json_add_string "status" "unknown"
fi
json_close_object
fi
else
json_add_string "status" "no_endpoints"
fi
json_close_object
json_dump
}
# Repair site - fix permissions and restart backend
method_repair_site() {
local id
read -r input
json_load "$input"
json_get_var id id
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name domain port runtime
name=$(get_uci "$id" name "")
domain=$(get_uci "$id" domain "")
port=$(get_uci "$id" port "")
runtime=$(get_uci "$id" runtime "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
local site_path="$SITES_ROOT/$name"
local repairs=""
# 1. Fix permissions
if [ -d "$site_path" ]; then
fix_permissions "$site_path"
repairs="$repairs permissions_fixed"
else
json_init
json_add_boolean "success" 0
json_add_string "error" "Site directory not found: $site_path"
json_dump
return
fi
# 2. Ensure index.html exists
if [ ! -f "$site_path/index.html" ]; then
# Create minimal index
cat > "$site_path/index.html" <<EOF
<!DOCTYPE html>
<html>
<head><title>$name</title></head>
<body><h1>$name</h1><p>Site placeholder</p></body>
</html>
EOF
chmod 644 "$site_path/index.html"
repairs="$repairs index_created"
fi
# 3. Restart uhttpd if using it
if [ "$runtime" = "uhttpd" ] && [ -n "$port" ]; then
/etc/init.d/uhttpd reload 2>/dev/null
repairs="$repairs uhttpd_reloaded"
fi
# 4. Reload HAProxy
reload_haproxy &
repairs="$repairs haproxy_reloaded"
json_init
json_add_boolean "success" 1
json_add_string "repairs" "$repairs"
json_add_string "site_path" "$site_path"
json_dump
}
# One-click upload and create site
# Accepts: name, domain, content (base64), is_zip
method_upload_and_create_site() {
local tmpinput="/tmp/rpcd_mb_upload_create_$$.json"
cat > "$tmpinput"
local name domain content is_zip
name=$(jsonfilter -i "$tmpinput" -e '@.name' 2>/dev/null)
domain=$(jsonfilter -i "$tmpinput" -e '@.domain' 2>/dev/null)
content=$(jsonfilter -i "$tmpinput" -e '@.content' 2>/dev/null)
is_zip=$(jsonfilter -i "$tmpinput" -e '@.is_zip' 2>/dev/null)
rm -f "$tmpinput"
if [ -z "$name" ] || [ -z "$domain" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Name and domain are required"
json_dump
return
fi
# Sanitize name
local section_id="site_$(echo "$name" | sed 's/[^a-zA-Z0-9]/_/g')"
# Check if site already exists
if uci -q get "$UCI_CONFIG.$section_id" >/dev/null 2>&1; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site with this name already exists"
json_dump
return
fi
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
# 1. Create site directory
mkdir -p "$SITES_ROOT/$name"
# 2. Decode and save content
umask 022
if [ "$is_zip" = "1" ] && [ -n "$content" ]; then
# Handle ZIP upload
local tmpzip="/tmp/metablog_upload_$$.zip"
echo "$content" | base64 -d > "$tmpzip" 2>/dev/null
unzip -o "$tmpzip" -d "$SITES_ROOT/$name" >/dev/null 2>&1
rm -f "$tmpzip"
elif [ -n "$content" ]; then
# Single file - assume index.html
echo "$content" | base64 -d > "$SITES_ROOT/$name/index.html" 2>/dev/null
fi
# 3. Fix permissions
fix_permissions "$SITES_ROOT/$name"
# 4. Create default index if none exists
if [ ! -f "$SITES_ROOT/$name/index.html" ]; then
cat > "$SITES_ROOT/$name/index.html" <<EOF
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>$name</title>
<style>
body { font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
display: flex; justify-content: center; align-items: center;
min-height: 100vh; margin: 0; background: #f5f5f5; }
.container { text-align: center; padding: 2rem; }
h1 { color: #333; }
</style>
</head>
<body>
<div class="container">
<h1>$name</h1>
<p>Site published with MetaBlogizer</p>
</div>
</body>
</html>
EOF
chmod 644 "$SITES_ROOT/$name/index.html"
fi
# 5. Get next port and create uhttpd instance
local port=$(get_next_port)
local server_address=$(uci -q get network.lan.ipaddr || echo "192.168.255.1")
uci set "uhttpd.metablog_${section_id}=uhttpd"
uci set "uhttpd.metablog_${section_id}.listen_http=0.0.0.0:$port"
uci set "uhttpd.metablog_${section_id}.home=$SITES_ROOT/$name"
uci set "uhttpd.metablog_${section_id}.index_page=index.html"
uci set "uhttpd.metablog_${section_id}.error_page=/index.html"
uci commit uhttpd
/etc/init.d/uhttpd reload 2>/dev/null
# 6. Create UCI site config
uci set "$UCI_CONFIG.$section_id=site"
uci set "$UCI_CONFIG.$section_id.name=$name"
uci set "$UCI_CONFIG.$section_id.domain=$domain"
uci set "$UCI_CONFIG.$section_id.ssl=1"
uci set "$UCI_CONFIG.$section_id.enabled=1"
uci set "$UCI_CONFIG.$section_id.port=$port"
uci set "$UCI_CONFIG.$section_id.runtime=uhttpd"
# 7. Create HAProxy backend if available
if haproxy_available; then
local backend_name="metablog_$(echo "$name" | sed 's/[^a-zA-Z0-9]/_/g')"
uci set "haproxy.$backend_name=backend"
uci set "haproxy.$backend_name.name=$backend_name"
uci set "haproxy.$backend_name.mode=http"
uci set "haproxy.$backend_name.balance=roundrobin"
uci set "haproxy.$backend_name.enabled=1"
local server_name="${backend_name}_srv"
uci set "haproxy.$server_name=server"
uci set "haproxy.$server_name.backend=$backend_name"
uci set "haproxy.$server_name.name=srv"
uci set "haproxy.$server_name.address=$server_address"
uci set "haproxy.$server_name.port=$port"
uci set "haproxy.$server_name.weight=100"
uci set "haproxy.$server_name.check=1"
uci set "haproxy.$server_name.enabled=1"
# Create HAProxy vhost (route through mitmproxy WAF)
local vhost_name=$(echo "$domain" | sed 's/[^a-zA-Z0-9]/_/g')
uci set "haproxy.$vhost_name=vhost"
uci set "haproxy.$vhost_name.domain=$domain"
uci set "haproxy.$vhost_name.backend=mitmproxy_inspector"
uci set "haproxy.$vhost_name.original_backend=$backend_name"
uci set "haproxy.$vhost_name.ssl=1"
uci set "haproxy.$vhost_name.ssl_redirect=1"
uci set "haproxy.$vhost_name.acme=1"
uci set "haproxy.$vhost_name.enabled=1"
uci set "haproxy.$vhost_name.priority=50"
# Add cert entry for wildcard SSL
add_haproxy_cert "$domain"
uci commit haproxy
# Add mitmproxy route for WAF inspection
add_mitmproxy_route "$domain" "$server_address" "$port"
reload_haproxy &
fi
uci commit "$UCI_CONFIG"
json_init
json_add_boolean "success" 1
json_add_string "id" "$section_id"
json_add_string "name" "$name"
json_add_string "domain" "$domain"
json_add_int "port" "$port"
json_add_string "url" "https://$domain"
json_dump
}
# Unpublish/revoke site exposure (remove HAProxy vhost but keep site)
method_unpublish_site() {
local id
read -r input
json_load "$input"
json_get_var id id
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name domain
name=$(get_uci "$id" name "")
domain=$(get_uci "$id" domain "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
# Remove HAProxy vhost (keep backend for local access)
if uci -q get haproxy >/dev/null 2>&1; then
local vhost_id=$(echo "$domain" | sed 's/[^a-zA-Z0-9]/_/g')
uci delete "haproxy.$vhost_id" 2>/dev/null
# Remove cert entry if exists
uci delete "haproxy.cert_$vhost_id" 2>/dev/null
uci commit haproxy
reload_haproxy &
fi
# Mark as unpublished in UCI
uci set "$UCI_CONFIG.$id.emancipated=0"
uci commit "$UCI_CONFIG"
json_init
json_add_boolean "success" 1
json_add_string "message" "Site unpublished"
json_dump
}
# Set authentication requirement for a site
method_set_auth_required() {
local id auth_required
read -r input
json_load "$input"
json_get_var id id
json_get_var auth_required auth_required
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name domain
name=$(get_uci "$id" name "")
domain=$(get_uci "$id" domain "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
# Update UCI config
uci set "$UCI_CONFIG.$id.auth_required=$auth_required"
uci commit "$UCI_CONFIG"
# If site has HAProxy vhost, update it
if uci -q get haproxy >/dev/null 2>&1 && [ -n "$domain" ]; then
local vhost_id=$(echo "$domain" | sed 's/[^a-zA-Z0-9]/_/g')
if uci -q get "haproxy.$vhost_id" >/dev/null 2>&1; then
uci set "haproxy.$vhost_id.auth_required=$auth_required"
uci commit haproxy
reload_haproxy &
fi
fi
json_init
json_add_boolean "success" 1
json_add_string "auth_required" "$auth_required"
json_dump
}
# Get exposure status for all sites (cert info, emancipation state)
method_get_sites_exposure_status() {
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
json_init
json_add_array "sites"
config_load "$UCI_CONFIG"
config_foreach _add_site_exposure_status site
json_close_array
json_dump
}
_add_site_exposure_status() {
local section="$1"
local name domain ssl enabled emancipated auth_required port
config_get name "$section" name ""
config_get domain "$section" domain ""
config_get ssl "$section" ssl "1"
config_get enabled "$section" enabled "1"
config_get emancipated "$section" emancipated "0"
config_get auth_required "$section" auth_required "0"
config_get port "$section" port ""
[ -z "$name" ] && return
json_add_object
json_add_string "id" "$section"
json_add_string "name" "$name"
json_add_string "domain" "$domain"
json_add_boolean "enabled" "$enabled"
json_add_boolean "emancipated" "$emancipated"
json_add_boolean "auth_required" "$auth_required"
[ -n "$port" ] && json_add_int "port" "$port"
# Check if HAProxy vhost exists
local vhost_exists=0
if [ -n "$domain" ]; then
local vhost_id=$(echo "$domain" | sed 's/[^a-zA-Z0-9]/_/g')
if uci -q get "haproxy.$vhost_id" >/dev/null 2>&1; then
vhost_exists=1
fi
fi
json_add_boolean "vhost_exists" "$vhost_exists"
# Quick certificate check - just check if file exists
# Full expiry check is expensive, use get_hosting_status for that
if [ -n "$domain" ] && [ "$ssl" = "1" ]; then
local cert_file=""
if [ -f "/srv/lxc/haproxy/rootfs/srv/haproxy/certs/${domain}.pem" ]; then
cert_file="/srv/lxc/haproxy/rootfs/srv/haproxy/certs/${domain}.pem"
elif [ -f "/etc/acme/${domain}_ecc/fullchain.cer" ]; then
cert_file="/etc/acme/${domain}_ecc/fullchain.cer"
fi
if [ -n "$cert_file" ]; then
json_add_string "cert_status" "valid"
else
json_add_string "cert_status" "missing"
fi
else
json_add_string "cert_status" "none"
fi
# Backend running check
local backend_running="0"
if [ -n "$port" ]; then
local hex_port=$(printf '%04X' "$port" 2>/dev/null)
if grep -qi ":${hex_port}" /proc/net/tcp 2>/dev/null; then
backend_running="1"
fi
fi
json_add_boolean "backend_running" "$backend_running"
# Has content
local has_content="0"
if [ -d "$SITES_ROOT/$name" ] && [ -f "$SITES_ROOT/$name/index.html" ]; then
has_content="1"
fi
json_add_boolean "has_content" "$has_content"
json_close_object
}
# Emancipate site - KISS ULTIME MODE (DNS + Vortex + HAProxy + SSL)
# Runs asynchronously to avoid XHR timeout - use emancipate_status to poll
method_emancipate() {
local id
read -r input
json_load "$input"
json_get_var id id
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name
name=$(get_uci "$id" name "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
# Check if metablogizerctl exists
if [ ! -x /usr/sbin/metablogizerctl ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "metablogizerctl not installed"
json_dump
return
fi
# Generate job ID and output file
local job_id="emancipate_${name}_$$"
local job_dir="/tmp/metablogizer_jobs"
local output_file="$job_dir/${job_id}.log"
local status_file="$job_dir/${job_id}.status"
mkdir -p "$job_dir"
# Run emancipate command in background
(
echo "running" > "$status_file"
/usr/sbin/metablogizerctl emancipate "$name" > "$output_file" 2>&1
local rc=$?
if [ $rc -eq 0 ]; then
echo "success" > "$status_file"
else
echo "failed" > "$status_file"
fi
) &
json_init
json_add_boolean "success" 1
json_add_string "job_id" "$job_id"
json_add_string "status" "running"
json_add_string "site" "$name"
json_dump
}
# Check emancipate job status
method_emancipate_status() {
local job_id
read -r input
json_load "$input"
json_get_var job_id job_id
if [ -z "$job_id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing job_id"
json_dump
return
fi
local job_dir="/tmp/metablogizer_jobs"
local output_file="$job_dir/${job_id}.log"
local status_file="$job_dir/${job_id}.status"
if [ ! -f "$status_file" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Job not found"
json_dump
return
fi
local status=$(cat "$status_file")
local output=""
[ -f "$output_file" ] && output=$(cat "$output_file")
json_init
json_add_boolean "success" 1
json_add_string "status" "$status"
json_add_string "output" "$output"
# Clean up completed jobs
if [ "$status" = "success" ] || [ "$status" = "failed" ]; then
json_add_boolean "complete" 1
# Keep files for 5 minutes then cleanup handled by caller or cron
else
json_add_boolean "complete" 0
fi
json_dump
}
# Enable Tor hidden service for a site
method_enable_tor() {
local id
read -r input
json_load "$input"
json_get_var id id
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name port
name=$(get_uci "$id" name "")
port=$(get_uci "$id" port "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
# Default to port 80 if not set (site uses nginx)
[ -z "$port" ] && port="80"
# Create Tor hidden service
create_tor_hidden_service "$name" "$port"
# Mark site as Tor-enabled
uci set "$UCI_CONFIG.$id.tor_enabled=1"
uci commit "$UCI_CONFIG"
json_init
json_add_boolean "success" 1
json_add_string "message" "Tor hidden service created. Restart Tor Shield to get .onion address."
json_add_string "name" "$name"
json_add_int "port" "$port"
json_dump
}
# Disable Tor hidden service for a site
method_disable_tor() {
local id
read -r input
json_load "$input"
json_get_var id id
if [ -z "$id" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing site id"
json_dump
return
fi
local name
name=$(get_uci "$id" name "")
if [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Site not found"
json_dump
return
fi
# Remove Tor hidden service
remove_tor_hidden_service "$name"
# Mark site as Tor-disabled
uci set "$UCI_CONFIG.$id.tor_enabled=0"
uci commit "$UCI_CONFIG"
json_init
json_add_boolean "success" 1
json_add_string "message" "Tor hidden service removed"
json_dump
}
# Get Tor status for all sites
method_get_tor_status() {
json_init
json_add_boolean "tor_running" "$(is_tor_ready && echo 1 || echo 0)"
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
json_add_array "sites"
config_load "$UCI_CONFIG"
config_foreach _add_tor_status site
json_close_array
json_dump
}
_add_tor_status() {
local section="$1"
local name port
config_get name "$section" name ""
config_get port "$section" port ""
[ -z "$name" ] && return
local onion_address=""
local tor_enabled=0
if has_tor_service "$name"; then
tor_enabled=1
onion_address=$(get_onion_address "$name")
fi
json_add_object
json_add_string "id" "$section"
json_add_string "name" "$name"
json_add_boolean "tor_enabled" "$tor_enabled"
[ -n "$onion_address" ] && json_add_string "onion_address" "$onion_address"
[ -n "$onion_address" ] && json_add_boolean "onion_ready" 1 || json_add_boolean "onion_ready" 0
json_close_object
}
# Save global settings
method_save_settings() {
local enabled runtime nginx_container sites_root gitea_url
read -r input
json_load "$input"
json_get_var enabled enabled
json_get_var runtime runtime
json_get_var nginx_container nginx_container
json_get_var sites_root sites_root
json_get_var gitea_url gitea_url
# Ensure main section exists
uci -q get "$UCI_CONFIG.main" >/dev/null 2>&1 || uci set "$UCI_CONFIG.main=metablogizer"
[ -n "$enabled" ] && uci set "$UCI_CONFIG.main.enabled=$enabled"
[ -n "$runtime" ] && uci set "$UCI_CONFIG.main.runtime=$runtime"
[ -n "$nginx_container" ] && uci set "$UCI_CONFIG.main.nginx_container=$nginx_container"
[ -n "$sites_root" ] && uci set "$UCI_CONFIG.main.sites_root=$sites_root"
[ -n "$gitea_url" ] && uci set "$UCI_CONFIG.main.gitea_url=$gitea_url"
uci commit "$UCI_CONFIG"
json_init
json_add_boolean "success" 1
json_dump
}
# Discover uhttpd vhosts not tracked in metablogizer
method_discover_vhosts() {
SITES_ROOT=$(get_uci main sites_root "$SITES_ROOT")
json_init
json_add_array "discovered"
# Find all uhttpd instances with metablog prefix
uci show uhttpd 2>/dev/null | grep "=uhttpd" | while read -r line; do
local instance=$(echo "$line" | cut -d'=' -f1 | cut -d'.' -f2)
# Skip non-metablog instances
case "$instance" in
metablog_*) ;;
*) continue ;;
esac
# Extract site ID from instance name (metablog_site_xxx -> site_xxx)
local section_id="${instance#metablog_}"
# Check if this site exists in metablogizer config
local tracked_name=$(get_uci "$section_id" name "")
# If not tracked, discover it
if [ -z "$tracked_name" ]; then
local home=$(uci -q get "uhttpd.${instance}.home")
local listen=$(uci -q get "uhttpd.${instance}.listen_http")
local port=$(echo "$listen" | sed 's/.*://')
local name=$(basename "$home" 2>/dev/null)
if [ -n "$name" ] && [ -n "$port" ]; then
json_add_object
json_add_string "instance" "$instance"
json_add_string "section_id" "$section_id"
json_add_string "name" "$name"
json_add_string "home" "$home"
json_add_int "port" "$port"
json_add_boolean "has_content" "$([ -f "$home/index.html" ] && echo 1 || echo 0)"
json_close_object
fi
fi
done
json_close_array
json_dump
}
# Import a discovered uhttpd vhost into metablogizer
method_import_vhost() {
local instance name domain
read -r input
json_load "$input"
json_get_var instance instance
json_get_var name name
json_get_var domain domain
if [ -z "$instance" ] || [ -z "$name" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "Missing instance or name"
json_dump
return
fi
# Get uhttpd instance details
local home=$(uci -q get "uhttpd.${instance}.home")
local listen=$(uci -q get "uhttpd.${instance}.listen_http")
local port=$(echo "$listen" | sed 's/.*://')
if [ -z "$home" ] || [ -z "$port" ]; then
json_init
json_add_boolean "success" 0
json_add_string "error" "uhttpd instance not found or invalid"
json_dump
return
fi
# Extract section_id from instance name
local section_id="${instance#metablog_}"
# Create metablogizer site entry
uci set "$UCI_CONFIG.$section_id=site"
uci set "$UCI_CONFIG.$section_id.name=$name"
uci set "$UCI_CONFIG.$section_id.domain=${domain:-$name.local}"
uci set "$UCI_CONFIG.$section_id.port=$port"
uci set "$UCI_CONFIG.$section_id.runtime=uhttpd"
uci set "$UCI_CONFIG.$section_id.ssl=1"
uci set "$UCI_CONFIG.$section_id.enabled=1"
uci commit "$UCI_CONFIG"
json_init
json_add_boolean "success" 1
json_add_string "id" "$section_id"
json_add_string "name" "$name"
json_add_int "port" "$port"
json_dump
}
# Sync all sites - ensure all have correct port/runtime in UCI
method_sync_config() {
local fixed=0
# Iterate through all metablog uhttpd instances
uci show uhttpd 2>/dev/null | grep "=uhttpd" | while read -r line; do
local instance=$(echo "$line" | cut -d'=' -f1 | cut -d'.' -f2)
case "$instance" in
metablog_*) ;;
*) continue ;;
esac
local section_id="${instance#metablog_}"
local listen=$(uci -q get "uhttpd.${instance}.listen_http")
local port=$(echo "$listen" | sed 's/.*://')
local home=$(uci -q get "uhttpd.${instance}.home")
# Check if site exists in metablogizer
local tracked_name=$(get_uci "$section_id" name "")
if [ -n "$tracked_name" ]; then
# Update port and runtime if missing
local current_port=$(get_uci "$section_id" port "")
local current_runtime=$(get_uci "$section_id" runtime "")
if [ -z "$current_port" ] && [ -n "$port" ]; then
uci set "$UCI_CONFIG.$section_id.port=$port"
fixed=$((fixed + 1))
fi
if [ -z "$current_runtime" ]; then
uci set "$UCI_CONFIG.$section_id.runtime=uhttpd"
fixed=$((fixed + 1))
fi
fi
done
uci commit "$UCI_CONFIG"
json_init
json_add_boolean "success" 1
json_add_int "fixed" "$fixed"
json_dump
}
# Main RPC interface
case "$1" in
list)
cat <<'EOF'
{
"status": {},
"list_sites": {},
"get_site": { "id": "string" },
"create_site": { "name": "string", "domain": "string", "gitea_repo": "string", "ssl": "boolean", "description": "string" },
"update_site": { "id": "string", "name": "string", "domain": "string", "gitea_repo": "string", "ssl": "boolean", "enabled": "boolean", "description": "string" },
"delete_site": { "id": "string" },
"sync_site": { "id": "string" },
"get_publish_info": { "id": "string" },
"upload_file": { "id": "string", "filename": "string", "content": "string" },
"upload_chunk": { "upload_id": "string", "data": "string", "index": 0 },
"upload_finalize": { "upload_id": "string", "site_id": "string", "filename": "string" },
"create_site_from_upload": { "upload_id": "string", "name": "string", "domain": "string", "is_zip": "string" },
"list_files": { "id": "string" },
"get_settings": {},
"save_settings": { "enabled": "boolean", "nginx_container": "string", "sites_root": "string" },
"get_hosting_status": {},
"check_site_health": { "id": "string" },
"repair_site": { "id": "string" },
"enable_tor": { "id": "string" },
"disable_tor": { "id": "string" },
"get_tor_status": {},
"discover_vhosts": {},
"import_vhost": { "instance": "string", "name": "string", "domain": "string" },
"sync_config": {},
"emancipate": { "id": "string" },
"emancipate_status": { "job_id": "string" },
"upload_and_create_site": { "name": "string", "domain": "string", "content": "string", "is_zip": "string" },
"unpublish_site": { "id": "string" },
"set_auth_required": { "id": "string", "auth_required": "string" },
"get_sites_exposure_status": {}
}
EOF
;;
call)
case "$2" in
status) method_status ;;
list_sites) method_list_sites ;;
get_site) method_get_site ;;
create_site) method_create_site ;;
update_site) method_update_site ;;
delete_site) method_delete_site ;;
sync_site) method_sync_site ;;
get_publish_info) method_get_publish_info ;;
upload_file) method_upload_file ;;
upload_chunk) method_upload_chunk ;;
upload_finalize) method_upload_finalize ;;
create_site_from_upload) method_create_site_from_upload ;;
list_files) method_list_files ;;
get_settings) method_get_settings ;;
save_settings) method_save_settings ;;
get_hosting_status) method_get_hosting_status ;;
check_site_health) method_check_site_health ;;
repair_site) method_repair_site ;;
enable_tor) method_enable_tor ;;
disable_tor) method_disable_tor ;;
get_tor_status) method_get_tor_status ;;
discover_vhosts) method_discover_vhosts ;;
import_vhost) method_import_vhost ;;
sync_config) method_sync_config ;;
emancipate) method_emancipate ;;
emancipate_status) method_emancipate_status ;;
upload_and_create_site) method_upload_and_create_site ;;
unpublish_site) method_unpublish_site ;;
set_auth_required) method_set_auth_required ;;
get_sites_exposure_status) method_get_sites_exposure_status ;;
*) echo '{"error": "unknown method"}' ;;
esac
;;
esac