fix(droplet,dpi): Resolve publish hang and broken pipe errors

- dropletctl: Remove pipe to grep that blocked on background children
- metablogizerctl: Background HAProxy generate/reload (~90s with 95 certs)
- dpi-lan-collector: Pre-compute flow counts in single pass instead of
  spawning grep per client (eliminates broken pipe errors)

Publish time reduced from ~2 min to ~35 seconds.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
CyberMind-FR 2026-03-17 07:39:09 +01:00
parent ece237d194
commit ddf480e6ed
3 changed files with 39 additions and 15 deletions

View File

@ -181,9 +181,10 @@ NFOEOF
log_info "Registered MetaBlog site on port $port" log_info "Registered MetaBlog site on port $port"
# Use metablogizerctl to fully publish (creates uhttpd, HAProxy, mitmproxy routes) # Use metablogizerctl to fully publish (creates uhttpd, HAProxy, mitmproxy routes)
# Note: HAProxy config regeneration runs in background (~90s), publish returns immediately
if command -v metablogizerctl >/dev/null 2>&1; then if command -v metablogizerctl >/dev/null 2>&1; then
log_info "Running metablogizerctl publish..." log_info "Running metablogizerctl publish..."
metablogizerctl publish "$name" 2>&1 | grep -E "^\[" || true metablogizerctl publish "$name" 2>&1
fi fi
fi fi

View File

@ -507,11 +507,14 @@ cmd_publish() {
uci commit haproxy uci commit haproxy
# Regenerate HAProxy config and reload container # Regenerate HAProxy config and reload container (in background - takes ~90s with many vhosts)
/usr/sbin/haproxyctl generate 2>/dev/null (
/usr/sbin/haproxyctl reload 2>/dev/null /usr/sbin/haproxyctl generate >/dev/null 2>&1
/usr/sbin/haproxyctl reload >/dev/null 2>&1
) &
log_info "Site published!" log_info "Site published!"
log_info "HAProxy config regenerating in background..."
echo "" echo ""
echo "URL: https://$domain" echo "URL: https://$domain"
echo "" echo ""
@ -555,8 +558,9 @@ cmd_delete() {
uci delete haproxy.metablog_${name} 2>/dev/null uci delete haproxy.metablog_${name} 2>/dev/null
uci delete haproxy.metablog_${name}_srv 2>/dev/null uci delete haproxy.metablog_${name}_srv 2>/dev/null
uci commit haproxy uci commit haproxy
/usr/sbin/haproxyctl generate 2>/dev/null # Background HAProxy regeneration (takes ~90s with many vhosts)
/etc/init.d/haproxy reload 2>/dev/null (/usr/sbin/haproxyctl generate >/dev/null 2>&1 && /etc/init.d/haproxy reload >/dev/null 2>&1) &
log_info "HAProxy config regenerating in background..."
fi fi
# Remove site config # Remove site config
@ -994,9 +998,9 @@ _emancipate_haproxy() {
uci commit haproxy uci commit haproxy
# Generate HAProxy config # Generate HAProxy config (in background - takes ~90s with many vhosts)
if command -v haproxyctl >/dev/null 2>&1; then if command -v haproxyctl >/dev/null 2>&1; then
haproxyctl generate 2>/dev/null haproxyctl generate >/dev/null 2>&1 &
fi fi
} }

View File

@ -57,21 +57,39 @@ collect_iface_stats() {
collect_lan_clients() { collect_lan_clients() {
local timestamp=$(date -Iseconds) local timestamp=$(date -Iseconds)
local now=$(date +%s) local now=$(date +%s)
local flow_counts_file="/tmp/dpi_flow_counts_$$"
# Use awk to parse ARP table and generate JSON # Pre-compute flow counts per source IP in a single pass (avoids broken pipes from spawning grep per client)
awk -v lan_if="$LAN_IF" -v ts="$timestamp" -v now="$now" ' awk '{
for (i = 1; i <= NF; i++) {
if ($i ~ /^src=/) {
split($i, a, "=")
ips[a[2]]++
break
}
}
}
END {
for (ip in ips) print ip, ips[ip]
}' /proc/net/nf_conntrack 2>/dev/null > "$flow_counts_file"
# Parse ARP table and look up pre-computed flow counts
awk -v lan_if="$LAN_IF" -v ts="$timestamp" -v now="$now" -v counts_file="$flow_counts_file" '
BEGIN { BEGIN {
# Load flow counts into associative array
while ((getline line < counts_file) > 0) {
split(line, parts)
flow_counts[parts[1]] = parts[2]
}
close(counts_file)
printf "{\"timestamp\":\"%s\",\"clients\":[", ts printf "{\"timestamp\":\"%s\",\"clients\":[", ts
first = 1 first = 1
} }
NR > 1 && $6 == lan_if && $4 != "00:00:00:00:00:00" { NR > 1 && $6 == lan_if && $4 != "00:00:00:00:00:00" {
ip = $1 ip = $1
mac = $4 mac = $4
# Count flows from conntrack flows = (ip in flow_counts) ? flow_counts[ip] : 0
cmd = "grep -c \"src=" ip "\" /proc/net/nf_conntrack 2>/dev/null || echo 0"
cmd | getline flows
close(cmd)
flows = flows + 0
if (first == 0) printf "," if (first == 0) printf ","
printf "{\"ip\":\"%s\",\"mac\":\"%s\",\"flows\":%d,\"last_seen\":%d}", ip, mac, flows, now printf "{\"ip\":\"%s\",\"mac\":\"%s\",\"flows\":%d,\"last_seen\":%d}", ip, mac, flows, now
@ -82,6 +100,7 @@ collect_lan_clients() {
} }
' /proc/net/arp > "$CLIENTS_FILE.tmp" ' /proc/net/arp > "$CLIENTS_FILE.tmp"
rm -f "$flow_counts_file"
mv "$CLIENTS_FILE.tmp" "$CLIENTS_FILE" mv "$CLIENTS_FILE.tmp" "$CLIENTS_FILE"
} }