#!/bin/sh # ╔═══════════════════════════════════════════════════════════════════╗ # ║ ⚡ CYBERFEED v0.2 - RSS Aggregator for OpenWrt/SecuBox ⚡ ║ # ║ Cyberpunk Feed Analyzer with Timeline & Audio Preview ║ # ║ Author: CyberMind.FR | License: MIT ║ # ╚═══════════════════════════════════════════════════════════════════╝ . /lib/functions.sh # === CONFIGURATION === CYBERFEED_DIR="/tmp/cyberfeed" CACHE_DIR="${CYBERFEED_DIR}/cache" OUTPUT_DIR="${CYBERFEED_DIR}/output" CONFIG_FILE="/etc/cyberfeed/feeds.conf" HISTORY_FILE="/var/lib/cyberfeed/history.json" MEDIA_DIR="/srv/cyberfeed/media" MAX_ITEMS=20 CACHE_TTL=300 DOWNLOAD_MEDIA=0 GENERATE_TIMELINE=1 # Load UCI config load_config() { config_load cyberfeed config_get MAX_ITEMS main max_items 20 config_get CACHE_TTL main cache_ttl 300 config_get OUTPUT_DIR main output_dir "/tmp/cyberfeed/output" config_get HISTORY_FILE main history_file "/var/lib/cyberfeed/history.json" config_get MEDIA_DIR main media_dir "/srv/cyberfeed/media" config_get DOWNLOAD_MEDIA main download_media 0 config_get GENERATE_TIMELINE main generate_timeline 1 } # === HISTORY MANAGEMENT === init_history() { local dir=$(dirname "$HISTORY_FILE") mkdir -p "$dir" [ -f "$HISTORY_FILE" ] || echo '{"seen":[],"downloaded":[]}' > "$HISTORY_FILE" } is_seen() { local id="$1" grep -q "\"$id\"" "$HISTORY_FILE" 2>/dev/null } mark_seen() { local id="$1" if [ -f "$HISTORY_FILE" ]; then local seen=$(jsonfilter -i "$HISTORY_FILE" -e '@.seen' 2>/dev/null || echo '[]') # Simple append (proper JSON manipulation would need jq) sed -i "s/\"seen\":\[/\"seen\":[\"$id\",/" "$HISTORY_FILE" 2>/dev/null fi } # === CYBERPUNK EMOJI MAPPING (applied inside AWK) === # Emojification is done inside the AWK parser to avoid corrupting JSON keys # === RSS FETCHER === fetch_feed() { local url="$1" local name="$2" local cache_file="${CACHE_DIR}/${name}.xml" if [ -f "$cache_file" ]; then local file_time=$(stat -c %Y "$cache_file" 2>/dev/null || echo 0) local now=$(date +%s) local age=$((now - file_time)) if [ "$age" -lt "$CACHE_TTL" ]; then cat "$cache_file" return 0 fi fi wget -q -T 15 -O "$cache_file" "$url" 2>/dev/null if [ -f "$cache_file" ] && [ -s "$cache_file" ]; then cat "$cache_file" else rm -f "$cache_file" return 1 fi } # === ENHANCED RSS PARSER (BusyBox AWK compatible) === parse_rss() { local xml="$1" local source="$2" local category="$3" echo "$xml" | awk -v source="$source" -v category="$category" -v max="$MAX_ITEMS" ' # Helper: extract content between XML tags function extract_tag(str, tag, start, end, rest, content) { # Try content if (match(str, "<" tag "[^>]*>")) { start = RSTART + RLENGTH rest = substr(str, start) if (match(rest, "")) { content = substr(rest, 1, RSTART - 1) # Handle CDATA if (match(content, "^$/, "", content) } return content } } return "" } # Helper: extract attribute value function extract_attr(str, tag, attr, tagstart, tagend, tagstr, attrpos, rest, val) { if (match(str, "<" tag "[^>]*>")) { tagstr = substr(str, RSTART, RLENGTH) if (match(tagstr, attr "=\"[^\"]*\"")) { val = substr(tagstr, RSTART + length(attr) + 2) sub(/".*/, "", val) return val } } return "" } # Helper: add cyberpunk emojis to text (case-insensitive) function emojify(text) { gsub(/[Hh]ack/, "🔓hack", text) gsub(/[Bb]reach/, "🔓breach", text) gsub(/[Ee]xploit/, "🔓exploit", text) gsub(/[Vv]ulnerab/, "🔓vulnerab", text) gsub(/[Ss]ecur/, "🛡️secur", text) gsub(/[Pp]rotect/, "🛡️protect", text) gsub(/[Ff]irewall/, "🛡️firewall", text) gsub(/[Cc]yber/, "⚡cyber", text) gsub(/[Ee]ncrypt/, "🔐encrypt", text) gsub(/[Cc]rypto/, "🔐crypto", text) gsub(/[Mm]alware/, "☠️malware", text) gsub(/[Vv]irus/, "☠️virus", text) gsub(/[Aa]ttack/, "💀attack", text) gsub(/[Tt]hreat/, "💀threat", text) gsub(/[Nn]etwork/, "🌐network", text) gsub(/[Ss]erver/, "💾server", text) gsub(/[Cc]loud/, "💾cloud", text) gsub(/[Cc]ode/, "💻code", text) gsub(/[Ll]inux/, "🐧linux", text) gsub(/[Gg]ithub/, "🐧github", text) gsub(/AI/, "🤖AI", text) gsub(/[Uu]pdate/, "📡update", text) gsub(/[Ll]aunch/, "🚀launch", text) gsub(/[Rr]elease/, "🚀release", text) gsub(/[Pp]odcast/, "🎧podcast", text) gsub(/[Rr]adio/, "🎧radio", text) gsub(/[Vv]ideo/, "📺video", text) return text } BEGIN { RS="|" item_count=0 } { if (item_count >= max) next title = extract_tag($0, "title") link = extract_tag($0, "link") # Atom links use href attribute if (link == "") { link = extract_attr($0, "link", "href") } # Extract date (multiple formats) date = extract_tag($0, "pubDate") if (date == "") date = extract_tag($0, "published") if (date == "") date = extract_tag($0, "updated") if (date == "") date = extract_tag($0, "dc:date") # Extract description desc = extract_tag($0, "description") if (desc == "") desc = extract_tag($0, "summary") if (desc == "") desc = extract_tag($0, "content") # Extract enclosure URL and type (podcasts) enclosure = extract_attr($0, "enclosure", "url") enclosure_type = extract_attr($0, "enclosure", "type") # Fallback to media:content if (enclosure == "") { enclosure = extract_attr($0, "media:content", "url") enclosure_type = extract_attr($0, "media:content", "type") } # Extract iTunes duration duration = extract_tag($0, "itunes:duration") # Extract GUID guid = extract_tag($0, "guid") if (guid == "") guid = link if (title != "") { # Apply emojification to title and desc only title = emojify(title) desc = emojify(desc) # Clean and escape for JSON gsub(/\\/, "\\\\", title) gsub(/"/, "\\\"", title) gsub(/[\r\n\t]/, " ", title) gsub(/\\/, "\\\\", desc) gsub(/"/, "\\\"", desc) gsub(/[\r\n\t]/, " ", desc) gsub(/<[^>]+>/, "", desc) desc = substr(desc, 1, 280) gsub(/\\/, "\\\\", link) gsub(/"/, "\\\"", link) gsub(/\\/, "\\\\", enclosure) gsub(/"/, "\\\"", enclosure) # Determine media type media_type = "" if (enclosure_type ~ /audio/) media_type = "audio" else if (enclosure_type ~ /video/) media_type = "video" else if (enclosure ~ /\.mp3|\.m4a|\.ogg|\.wav/) media_type = "audio" else if (enclosure ~ /\.mp4|\.webm|\.mkv/) media_type = "video" printf "{\"title\":\"%s\",\"link\":\"%s\",\"date\":\"%s\",\"desc\":\"%s\",\"source\":\"%s\",\"category\":\"%s\",\"enclosure\":\"%s\",\"media_type\":\"%s\",\"duration\":\"%s\",\"guid\":\"%s\"},", title, link, date, desc, source, category, enclosure, media_type, duration, guid item_count++ } } ' } # === TIMELINE HTML GENERATOR (Amber & Green CRT Colors) === generate_timeline() { local json_file="$1" local output_file="${OUTPUT_DIR}/timeline.html" cat > "$output_file" << 'TIMELINEHTML' ⚡ CYBERFEED TIMELINE ⚡

⚡ TIMELINE ⚡

[ NEURAL FEED MATRIX ]

LOADING DATA STREAM...

-- ITEMS | SYNCING... | CYBERFEED v0.2
TIMELINEHTML } # === MAIN HTML GENERATOR (with audio player) === generate_html() { local json_file="$1" local output_file="${OUTPUT_DIR}/index.html" cat > "$output_file" << 'HTMLEOF' ⚡ CYBERFEED ⚡ Neural RSS Matrix

⚡ CYBERFEED ⚡

NEURAL RSS MATRIX INTERFACE

FEEDS: -- SYNC: --:--:-- STATUS: ONLINE

🔮

Awaiting Neural Feed Connection...

HTMLEOF } # === STATUS === get_status() { local enabled=$(uci -q get cyberfeed.main.enabled || echo 0) local feed_count=0 local item_count=0 local last_sync=0 if [ -f "${OUTPUT_DIR}/feeds.json" ]; then item_count=$(grep -c '"title"' "${OUTPUT_DIR}/feeds.json" 2>/dev/null || echo 0) last_sync=$(stat -c %Y "${OUTPUT_DIR}/feeds.json" 2>/dev/null || echo 0) fi if [ -f "$CONFIG_FILE" ]; then feed_count=$(grep -v "^#" "$CONFIG_FILE" 2>/dev/null | grep -c "|" || echo 0) fi cat << EOF { "enabled": $enabled, "feed_count": $feed_count, "item_count": $item_count, "last_sync": $last_sync, "has_timeline": $([ -f "${OUTPUT_DIR}/timeline.html" ] && echo "true" || echo "false") } EOF } # === SYNC FEEDS === sync_feeds() { load_config init_history mkdir -p "$CACHE_DIR" "$OUTPUT_DIR" if [ ! -f "$CONFIG_FILE" ]; then echo '[]' > "${OUTPUT_DIR}/feeds.json" generate_html "${OUTPUT_DIR}/feeds.json" return 0 fi local json_items="[" local feed_count=0 while IFS='|' read -r name url type category || [ -n "$name" ]; do case "$name" in ''|\#*) continue ;; esac [ -z "$category" ] && category="custom" echo "📡 Fetching: $name" >&2 raw_xml=$(fetch_feed "$url" "$name") if [ -n "$raw_xml" ]; then parsed=$(parse_rss "$raw_xml" "$name" "$category") if [ -n "$parsed" ]; then json_items="${json_items}${parsed}" feed_count=$((feed_count + 1)) fi fi done < "$CONFIG_FILE" json_items=$(echo "$json_items" | sed 's/,$//') json_items="${json_items}]" echo "$json_items" > "${OUTPUT_DIR}/feeds.json" generate_html "${OUTPUT_DIR}/feeds.json" # Generate timeline if enabled if [ "$GENERATE_TIMELINE" = "1" ]; then generate_timeline "${OUTPUT_DIR}/feeds.json" fi # Create symlinks [ -L /www/cyberfeed/index.html ] || ln -sf "${OUTPUT_DIR}/index.html" /www/cyberfeed/index.html 2>/dev/null [ -L /www/cyberfeed/feeds.json ] || ln -sf "${OUTPUT_DIR}/feeds.json" /www/cyberfeed/feeds.json 2>/dev/null [ -L /www/cyberfeed/timeline.html ] || ln -sf "${OUTPUT_DIR}/timeline.html" /www/cyberfeed/timeline.html 2>/dev/null echo "" echo "╔═══════════════════════════════════════════════════════════════╗" echo "║ ⚡ CYBERFEED v0.2 SYNC COMPLETE ⚡ ║" echo "╠═══════════════════════════════════════════════════════════════╣" printf "║ 📊 Feeds processed: %-36s ║\n" "$feed_count" echo "║ 📁 Output: /www/cyberfeed/ ║" echo "║ 📅 Timeline: /www/cyberfeed/timeline.html ║" echo "╚═══════════════════════════════════════════════════════════════╝" } # === LIST FEEDS === list_feeds() { if [ ! -f "$CONFIG_FILE" ]; then echo "[]" return fi echo "[" local first=1 while IFS='|' read -r name url type category || [ -n "$name" ]; do case "$name" in ''|\#*) continue ;; esac [ "$first" = "1" ] || echo "," first=0 printf '{"name":"%s","url":"%s","type":"%s","category":"%s"}' \ "$name" "$url" "${type:-rss}" "${category:-custom}" done < "$CONFIG_FILE" echo "]" } # === ADD FEED === add_feed() { local name="$1" local url="$2" local type="${3:-rss}" local category="${4:-custom}" [ -z "$name" ] || [ -z "$url" ] && { echo '{"success":false,"error":"Name and URL required"}' return 1 } if grep -q "^${name}|" "$CONFIG_FILE" 2>/dev/null; then echo '{"success":false,"error":"Feed already exists"}' return 1 fi echo "${name}|${url}|${type}|${category}" >> "$CONFIG_FILE" echo '{"success":true}' } # === DELETE FEED === delete_feed() { local name="$1" [ -z "$name" ] && { echo '{"success":false,"error":"Name required"}' return 1 } if grep -q "^${name}|" "$CONFIG_FILE" 2>/dev/null; then sed -i "/^${name}|/d" "$CONFIG_FILE" rm -f "${CACHE_DIR}/${name}.xml" echo '{"success":true}' else echo '{"success":false,"error":"Feed not found"}' return 1 fi } # === MAIN === case "$1" in sync) sync_feeds ;; status) get_status ;; list) list_feeds ;; add) add_feed "$2" "$3" "$4" "$5" ;; delete) delete_feed "$2" ;; *) echo "Usage: $0 {sync|status|list|add|delete}" echo "" echo "CyberFeed v0.2 - Enhanced RSS Aggregator" echo "" echo "Commands:" echo " sync Fetch feeds, generate HTML + Timeline" echo " status Show service status (JSON)" echo " list List configured feeds (JSON)" echo " add NAME URL [TYPE] [CATEGORY]" echo " delete NAME Remove a feed" echo "" echo "Output:" echo " /www/cyberfeed/index.html - Main dashboard" echo " /www/cyberfeed/timeline.html - Chronological timeline" echo " /www/cyberfeed/feeds.json - Raw JSON data" ;; esac