feat(waf): Add sensitivity-based auto-ban system with CrowdSec integration
WAF Auto-ban Features: - Three sensitivity levels: aggressive, moderate, permissive - Aggressive: Immediate ban on first critical threat - Moderate: Ban after 3 attempts in 5 minutes (default) - Permissive: Ban after 5 attempts in 1 hour - Attempt tracking with configurable thresholds Critical threats (immediate in aggressive/moderate): - CVE exploits, SQL injection, Command injection - XXE, Log4Shell, SSTI attacks CrowdSec Integration: - Auto-ban requests written to /srv/mitmproxy/autoban-requests.log - Cron job processes bans every minute via mitmproxyctl - Bans sent to CrowdSec for network-wide enforcement New Commands: - mitmproxyctl process-autoban: Process pending bans - mitmproxyctl reload-autoban: Reload config after UCI changes CrowdSec Dashboard: - Added ban button to alerts page - Modal confirmation with 24h ban duration - Real-time banned IP tracking Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
parent
db847ba1cd
commit
56d45fe7c2
@ -2,17 +2,40 @@
|
||||
'require view';
|
||||
'require dom';
|
||||
'require poll';
|
||||
'require ui';
|
||||
'require crowdsec-dashboard.api as api';
|
||||
|
||||
return view.extend({
|
||||
alerts: [],
|
||||
bannedIPs: new Set(),
|
||||
|
||||
load: function() {
|
||||
var self = this;
|
||||
var link = document.createElement('link');
|
||||
link.rel = 'stylesheet';
|
||||
link.href = L.resource('crowdsec-dashboard/dashboard.css');
|
||||
document.head.appendChild(link);
|
||||
return api.getAlerts(100).catch(function() { return []; });
|
||||
|
||||
// Load both alerts and current decisions to know which IPs are already banned
|
||||
return Promise.all([
|
||||
api.getAlerts(100).catch(function() { return []; }),
|
||||
api.getDecisions().catch(function() { return []; })
|
||||
]).then(function(results) {
|
||||
var decisions = results[1];
|
||||
// Track banned IPs
|
||||
if (Array.isArray(decisions)) {
|
||||
decisions.forEach(function(d) {
|
||||
if (d.decisions) {
|
||||
d.decisions.forEach(function(dec) {
|
||||
if (dec.value) self.bannedIPs.add(dec.value);
|
||||
});
|
||||
} else if (d.value) {
|
||||
self.bannedIPs.add(d.value);
|
||||
}
|
||||
});
|
||||
}
|
||||
return results[0];
|
||||
});
|
||||
},
|
||||
|
||||
render: function(data) {
|
||||
@ -94,6 +117,7 @@ return view.extend({
|
||||
},
|
||||
|
||||
renderAlerts: function(alerts) {
|
||||
var self = this;
|
||||
if (!alerts.length) {
|
||||
return E('div', { 'class': 'cs-empty' }, 'No alerts');
|
||||
}
|
||||
@ -103,25 +127,95 @@ return view.extend({
|
||||
E('th', {}, 'Source'),
|
||||
E('th', {}, 'Country'),
|
||||
E('th', {}, 'Scenario'),
|
||||
E('th', {}, 'Events')
|
||||
E('th', {}, 'Events'),
|
||||
E('th', { 'style': 'width: 80px;' }, 'Action')
|
||||
])),
|
||||
E('tbody', {}, alerts.slice(0, 50).map(function(a) {
|
||||
var src = a.source || {};
|
||||
var ip = src.ip || '';
|
||||
var country = src.cn || src.country || '';
|
||||
var isBanned = self.bannedIPs.has(ip);
|
||||
|
||||
return E('tr', {}, [
|
||||
E('td', { 'class': 'cs-time' }, api.formatRelativeTime(a.created_at)),
|
||||
E('td', {}, E('span', { 'class': 'cs-ip' }, src.ip || '-')),
|
||||
E('td', {}, E('span', { 'class': 'cs-ip' }, ip || '-')),
|
||||
E('td', {}, [
|
||||
E('span', { 'class': 'cs-flag' }, api.getCountryFlag(country)),
|
||||
' ', country
|
||||
]),
|
||||
E('td', {}, E('span', { 'class': 'cs-scenario' }, api.parseScenario(a.scenario))),
|
||||
E('td', {}, String(a.events_count || 0))
|
||||
E('td', {}, String(a.events_count || 0)),
|
||||
E('td', {}, ip ? self.renderBanButton(ip, a.scenario, isBanned) : '-')
|
||||
]);
|
||||
}))
|
||||
]);
|
||||
},
|
||||
|
||||
renderBanButton: function(ip, scenario, isBanned) {
|
||||
var self = this;
|
||||
|
||||
if (isBanned) {
|
||||
return E('button', {
|
||||
'class': 'cbi-button cbi-button-neutral',
|
||||
'style': 'padding: 2px 8px; font-size: 11px;',
|
||||
'disabled': 'disabled',
|
||||
'title': 'Already banned'
|
||||
}, 'Banned');
|
||||
}
|
||||
|
||||
return E('button', {
|
||||
'class': 'cbi-button cbi-button-negative',
|
||||
'style': 'padding: 2px 8px; font-size: 11px;',
|
||||
'click': function(ev) {
|
||||
ev.preventDefault();
|
||||
self.banIP(ip, scenario);
|
||||
},
|
||||
'title': 'Ban this IP for 24 hours'
|
||||
}, 'Ban');
|
||||
},
|
||||
|
||||
banIP: function(ip, scenario) {
|
||||
var self = this;
|
||||
var reason = 'Manual ban from alert: ' + (scenario || 'unknown');
|
||||
|
||||
ui.showModal('Ban IP', [
|
||||
E('p', {}, 'Ban ' + ip + ' for 24 hours?'),
|
||||
E('p', { 'style': 'font-size: 12px; color: #666;' }, 'Reason: ' + reason),
|
||||
E('div', { 'class': 'right' }, [
|
||||
E('button', {
|
||||
'class': 'cbi-button',
|
||||
'click': ui.hideModal
|
||||
}, 'Cancel'),
|
||||
' ',
|
||||
E('button', {
|
||||
'class': 'cbi-button cbi-button-negative',
|
||||
'click': function() {
|
||||
ui.hideModal();
|
||||
ui.showModal('Banning...', [
|
||||
E('p', { 'class': 'spinning' }, 'Adding ban for ' + ip + '...')
|
||||
]);
|
||||
|
||||
api.addBan(ip, '24h', reason).then(function(result) {
|
||||
ui.hideModal();
|
||||
if (result && result.success !== false) {
|
||||
self.bannedIPs.add(ip);
|
||||
ui.addNotification(null, E('p', {}, 'IP ' + ip + ' has been banned for 24 hours'), 'success');
|
||||
// Refresh the alerts list
|
||||
var el = document.getElementById('alerts-list');
|
||||
if (el) dom.content(el, self.renderAlerts(self.alerts));
|
||||
} else {
|
||||
ui.addNotification(null, E('p', {}, 'Failed to ban IP: ' + (result.error || 'Unknown error')), 'error');
|
||||
}
|
||||
}).catch(function(err) {
|
||||
ui.hideModal();
|
||||
ui.addNotification(null, E('p', {}, 'Failed to ban IP: ' + err), 'error');
|
||||
});
|
||||
}
|
||||
}, 'Ban')
|
||||
])
|
||||
]);
|
||||
},
|
||||
|
||||
filterAlerts: function() {
|
||||
var query = (document.getElementById('alert-search').value || '').toLowerCase();
|
||||
var filtered = this.alerts.filter(function(a) {
|
||||
|
||||
@ -33,6 +33,48 @@ config wan_protection 'wan_protection'
|
||||
# Rate limiting: max requests per IP per minute (0=disabled)
|
||||
option rate_limit '0'
|
||||
|
||||
# Auto-ban configuration - automatically ban IPs via CrowdSec
|
||||
config autoban 'autoban'
|
||||
# Enable automatic banning of detected threats
|
||||
option enabled '0'
|
||||
# Ban duration (e.g., 1h, 4h, 24h, 7d)
|
||||
option ban_duration '4h'
|
||||
# Minimum severity to trigger auto-ban: critical, high, medium
|
||||
option min_severity 'critical'
|
||||
# Auto-ban on CVE exploit attempts
|
||||
option ban_cve_exploits '1'
|
||||
# Auto-ban SQL injection attempts
|
||||
option ban_sqli '1'
|
||||
# Auto-ban command injection attempts
|
||||
option ban_cmdi '1'
|
||||
# Auto-ban path traversal attempts
|
||||
option ban_traversal '1'
|
||||
# Auto-ban known vulnerability scanners
|
||||
option ban_scanners '1'
|
||||
# Auto-ban on rate limit exceeded
|
||||
option ban_rate_limit '0'
|
||||
# Whitelist IPs from auto-ban (comma-separated)
|
||||
option whitelist ''
|
||||
#
|
||||
# Sensitivity level: aggressive, moderate, permissive
|
||||
# - aggressive: Ban immediately on first detection (critical threats only)
|
||||
# - moderate: Ban after repeated attempts within minutes (default)
|
||||
# - permissive: Ban after persistent attempts over longer period
|
||||
option sensitivity 'moderate'
|
||||
#
|
||||
# Aggressive level: Immediate ban on first critical threat
|
||||
# (CVE exploits, SQL injection, command injection always trigger immediately)
|
||||
#
|
||||
# Moderate level thresholds
|
||||
# Ban after N attempts within the time window
|
||||
option moderate_threshold '3'
|
||||
option moderate_window '300'
|
||||
#
|
||||
# Permissive level thresholds
|
||||
# Ban after N attempts within the time window
|
||||
option permissive_threshold '5'
|
||||
option permissive_window '3600'
|
||||
|
||||
# LAN Transparent mode settings (outbound traffic interception)
|
||||
config transparent 'transparent'
|
||||
option enabled '0'
|
||||
|
||||
@ -0,0 +1,4 @@
|
||||
# mitmproxy WAF auto-ban processor
|
||||
# Runs every minute to process auto-ban requests from threat detection
|
||||
# Bans are sent to CrowdSec for enforcement
|
||||
* * * * * root [ -x /usr/sbin/mitmproxyctl ] && /usr/sbin/mitmproxyctl process-autoban >/dev/null 2>&1
|
||||
@ -33,9 +33,18 @@ Commands:
|
||||
sync-routes Sync HAProxy backends to mitmproxy routes
|
||||
haproxy-enable Enable HAProxy backend inspection mode
|
||||
haproxy-disable Disable HAProxy backend inspection mode
|
||||
process-autoban Process auto-ban requests from WAF (run via cron)
|
||||
reload-autoban Reload auto-ban config after UCI changes
|
||||
service-run Internal: run container under procd
|
||||
service-stop Stop container
|
||||
|
||||
Auto-ban Sensitivity Levels:
|
||||
aggressive - Ban immediately on first critical threat
|
||||
moderate - Ban after repeated attempts (default: 3 in 5 min)
|
||||
permissive - Ban after persistent attempts (default: 5 in 1 hour)
|
||||
|
||||
Configure with: uci set mitmproxy.autoban.sensitivity='moderate'
|
||||
|
||||
Modes (configure in /etc/config/mitmproxy):
|
||||
regular - Standard HTTP/HTTPS proxy (default)
|
||||
transparent - Transparent proxy (auto-configures nftables)
|
||||
@ -67,6 +76,65 @@ uci_get() { uci -q get ${CONFIG}.$1; }
|
||||
uci_set() { uci set ${CONFIG}.$1="$2" && uci commit ${CONFIG}; }
|
||||
uci_get_list() { uci -q get ${CONFIG}.$1 2>/dev/null; }
|
||||
|
||||
# Write autoban config to JSON for container to read
|
||||
write_autoban_config() {
|
||||
load_config
|
||||
|
||||
local autoban_enabled=$(uci_get autoban.enabled || echo 0)
|
||||
local ban_duration=$(uci_get autoban.ban_duration || echo "4h")
|
||||
local min_severity=$(uci_get autoban.min_severity || echo "critical")
|
||||
local ban_cve=$(uci_get autoban.ban_cve_exploits || echo 1)
|
||||
local ban_sqli=$(uci_get autoban.ban_sqli || echo 1)
|
||||
local ban_cmdi=$(uci_get autoban.ban_cmdi || echo 1)
|
||||
local ban_traversal=$(uci_get autoban.ban_traversal || echo 1)
|
||||
local ban_scanners=$(uci_get autoban.ban_scanners || echo 1)
|
||||
local ban_rate_limit=$(uci_get autoban.ban_rate_limit || echo 0)
|
||||
local whitelist=$(uci_get autoban.whitelist || echo "")
|
||||
local sensitivity=$(uci_get autoban.sensitivity || echo "moderate")
|
||||
local moderate_threshold=$(uci_get autoban.moderate_threshold || echo 3)
|
||||
local moderate_window=$(uci_get autoban.moderate_window || echo 300)
|
||||
local permissive_threshold=$(uci_get autoban.permissive_threshold || echo 5)
|
||||
local permissive_window=$(uci_get autoban.permissive_window || echo 3600)
|
||||
|
||||
# Convert 0/1 to true/false for JSON
|
||||
local enabled_json="false"
|
||||
[ "$autoban_enabled" = "1" ] && enabled_json="true"
|
||||
local cve_json="false"
|
||||
[ "$ban_cve" = "1" ] && cve_json="true"
|
||||
local sqli_json="false"
|
||||
[ "$ban_sqli" = "1" ] && sqli_json="true"
|
||||
local cmdi_json="false"
|
||||
[ "$ban_cmdi" = "1" ] && cmdi_json="true"
|
||||
local traversal_json="false"
|
||||
[ "$ban_traversal" = "1" ] && traversal_json="true"
|
||||
local scanners_json="false"
|
||||
[ "$ban_scanners" = "1" ] && scanners_json="true"
|
||||
local rate_json="false"
|
||||
[ "$ban_rate_limit" = "1" ] && rate_json="true"
|
||||
|
||||
# Write JSON config
|
||||
cat > "$data_path/autoban.json" << EOF
|
||||
{
|
||||
"enabled": $enabled_json,
|
||||
"ban_duration": "$ban_duration",
|
||||
"min_severity": "$min_severity",
|
||||
"ban_cve_exploits": $cve_json,
|
||||
"ban_sqli": $sqli_json,
|
||||
"ban_cmdi": $cmdi_json,
|
||||
"ban_traversal": $traversal_json,
|
||||
"ban_scanners": $scanners_json,
|
||||
"ban_rate_limit": $rate_json,
|
||||
"whitelist": "$whitelist",
|
||||
"sensitivity": "$sensitivity",
|
||||
"moderate_threshold": $moderate_threshold,
|
||||
"moderate_window": $moderate_window,
|
||||
"permissive_threshold": $permissive_threshold,
|
||||
"permissive_window": $permissive_window
|
||||
}
|
||||
EOF
|
||||
chmod 644 "$data_path/autoban.json"
|
||||
}
|
||||
|
||||
# Load configuration with defaults
|
||||
load_config() {
|
||||
# Main settings
|
||||
@ -865,6 +933,9 @@ lxc_run() {
|
||||
ensure_dir "$data_path"
|
||||
ensure_dir "$ADDON_PATH"
|
||||
|
||||
# Write autoban config for container
|
||||
write_autoban_config
|
||||
|
||||
# Setup LAN transparent firewall rules if both mode=transparent AND transparent.enabled=1
|
||||
if [ "$mode" = "transparent" ] && [ "$transparent_enabled" = "1" ]; then
|
||||
nft_setup
|
||||
@ -1255,6 +1326,103 @@ cmd_haproxy_enable() {
|
||||
log_info "View threats at: http://<router-ip>/cgi-bin/luci/admin/services/mitmproxy"
|
||||
}
|
||||
|
||||
# =============================================================================
|
||||
# AUTOBAN PROCESSOR
|
||||
# =============================================================================
|
||||
|
||||
cmd_reload_autoban_config() {
|
||||
load_config
|
||||
|
||||
log_info "Reloading auto-ban configuration..."
|
||||
write_autoban_config
|
||||
log_info "Auto-ban config updated at $data_path/autoban.json"
|
||||
}
|
||||
|
||||
cmd_process_autoban() {
|
||||
load_config
|
||||
|
||||
# Refresh config before processing
|
||||
write_autoban_config 2>/dev/null || true
|
||||
|
||||
local autoban_enabled=$(uci_get autoban.enabled || echo 0)
|
||||
if [ "$autoban_enabled" != "1" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local autoban_log="$data_path/autoban-requests.log"
|
||||
local processed_log="$data_path/autoban-processed.log"
|
||||
local ban_duration=$(uci_get autoban.ban_duration || echo "4h")
|
||||
local whitelist=$(uci_get autoban.whitelist || echo "")
|
||||
|
||||
# Check if log exists and has content
|
||||
[ ! -f "$autoban_log" ] && return 0
|
||||
[ ! -s "$autoban_log" ] && return 0
|
||||
|
||||
# Check if CrowdSec CLI is available
|
||||
if ! command -v cscli >/dev/null 2>&1; then
|
||||
log_warn "cscli not found - cannot process auto-bans"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Process each line in the log
|
||||
local processed=0
|
||||
local skipped=0
|
||||
|
||||
while IFS= read -r line; do
|
||||
[ -z "$line" ] && continue
|
||||
|
||||
# Parse JSON line: {"ip": "x.x.x.x", "reason": "...", "duration": "4h", ...}
|
||||
local ip=$(echo "$line" | jsonfilter -e '@.ip' 2>/dev/null)
|
||||
local reason=$(echo "$line" | jsonfilter -e '@.reason' 2>/dev/null)
|
||||
local req_duration=$(echo "$line" | jsonfilter -e '@.duration' 2>/dev/null)
|
||||
|
||||
[ -z "$ip" ] && continue
|
||||
|
||||
# Use request duration or default
|
||||
local duration="${req_duration:-$ban_duration}"
|
||||
|
||||
# Check whitelist
|
||||
local skip=0
|
||||
if [ -n "$whitelist" ]; then
|
||||
for wl_ip in $(echo "$whitelist" | tr ',' ' '); do
|
||||
if [ "$ip" = "$wl_ip" ]; then
|
||||
log_info "Skipping whitelisted IP: $ip"
|
||||
skip=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ "$skip" = "1" ]; then
|
||||
skipped=$((skipped + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check if IP is already banned
|
||||
if cscli decisions list -i "$ip" 2>/dev/null | grep -q "$ip"; then
|
||||
log_info "IP already banned: $ip"
|
||||
skipped=$((skipped + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
# Add ban via CrowdSec
|
||||
log_info "Auto-banning IP: $ip for $duration (reason: $reason)"
|
||||
if cscli decisions add -i "$ip" -d "$duration" -R "mitmproxy-waf: $reason" -t ban >/dev/null 2>&1; then
|
||||
processed=$((processed + 1))
|
||||
# Log to processed file
|
||||
echo "$(date -Iseconds) BANNED $ip $duration $reason" >> "$processed_log"
|
||||
else
|
||||
log_error "Failed to ban IP: $ip"
|
||||
fi
|
||||
done < "$autoban_log"
|
||||
|
||||
# Clear the processed log
|
||||
if [ $processed -gt 0 ] || [ $skipped -gt 0 ]; then
|
||||
log_info "Processed $processed bans, skipped $skipped"
|
||||
: > "$autoban_log" # Truncate the file
|
||||
fi
|
||||
}
|
||||
|
||||
cmd_haproxy_disable() {
|
||||
require_root
|
||||
load_config
|
||||
@ -1311,6 +1479,8 @@ case "${1:-}" in
|
||||
sync-routes) shift; cmd_sync_routes "$@" ;;
|
||||
haproxy-enable) shift; cmd_haproxy_enable "$@" ;;
|
||||
haproxy-disable) shift; cmd_haproxy_disable "$@" ;;
|
||||
process-autoban) shift; cmd_process_autoban "$@" ;;
|
||||
reload-autoban) shift; cmd_reload_autoban_config "$@" ;;
|
||||
service-run) shift; cmd_service_run "$@" ;;
|
||||
service-stop) shift; cmd_service_stop "$@" ;;
|
||||
help|--help|-h|'') usage ;;
|
||||
|
||||
@ -24,6 +24,10 @@ LOG_FILE = "/var/log/secubox-access.log"
|
||||
CROWDSEC_LOG = "/data/threats.log"
|
||||
ALERTS_FILE = "/tmp/secubox-mitm-alerts.json"
|
||||
STATS_FILE = "/tmp/secubox-mitm-stats.json"
|
||||
# Auto-ban request file - host script watches this to trigger CrowdSec bans
|
||||
AUTOBAN_FILE = "/data/autoban-requests.log"
|
||||
# Auto-ban config file (written by host from UCI)
|
||||
AUTOBAN_CONFIG = "/data/autoban.json"
|
||||
|
||||
# ============================================================================
|
||||
# THREAT DETECTION PATTERNS
|
||||
@ -481,9 +485,15 @@ class SecuBoxAnalytics:
|
||||
self.stats = defaultdict(lambda: defaultdict(int))
|
||||
self.ip_request_count = defaultdict(list) # For rate limiting
|
||||
self.blocked_ips = set()
|
||||
self.autoban_config = {}
|
||||
self.autoban_requested = set() # Track IPs we've already requested to ban
|
||||
# Attempt tracking for sensitivity-based auto-ban
|
||||
# Structure: {ip: [(timestamp, severity, reason), ...]}
|
||||
self.threat_attempts = defaultdict(list)
|
||||
self._load_geoip()
|
||||
self._load_blocked_ips()
|
||||
ctx.log.info("SecuBox Analytics addon v2.0 loaded - Enhanced threat detection")
|
||||
self._load_autoban_config()
|
||||
ctx.log.info("SecuBox Analytics addon v2.2 loaded - Enhanced threat detection with sensitivity-based auto-ban")
|
||||
|
||||
def _load_geoip(self):
|
||||
"""Load GeoIP database if available"""
|
||||
@ -508,6 +518,241 @@ class SecuBoxAnalytics:
|
||||
except Exception as e:
|
||||
ctx.log.debug(f"Could not load blocked IPs: {e}")
|
||||
|
||||
def _load_autoban_config(self):
|
||||
"""Load auto-ban configuration from host"""
|
||||
try:
|
||||
if os.path.exists(AUTOBAN_CONFIG):
|
||||
with open(AUTOBAN_CONFIG, 'r') as f:
|
||||
self.autoban_config = json.load(f)
|
||||
if self.autoban_config.get('enabled'):
|
||||
sensitivity = self.autoban_config.get('sensitivity', 'moderate')
|
||||
ctx.log.info(f"Auto-ban enabled: sensitivity={sensitivity}, min_severity={self.autoban_config.get('min_severity', 'critical')}, duration={self.autoban_config.get('ban_duration', '4h')}")
|
||||
else:
|
||||
# Default config if file doesn't exist
|
||||
self.autoban_config = {
|
||||
'enabled': False,
|
||||
'ban_duration': '4h',
|
||||
'min_severity': 'critical',
|
||||
'ban_cve_exploits': True,
|
||||
'ban_sqli': True,
|
||||
'ban_cmdi': True,
|
||||
'ban_traversal': True,
|
||||
'ban_scanners': True,
|
||||
'ban_rate_limit': False,
|
||||
'whitelist': [],
|
||||
# Sensitivity levels
|
||||
'sensitivity': 'moderate',
|
||||
'moderate_threshold': 3,
|
||||
'moderate_window': 300,
|
||||
'permissive_threshold': 5,
|
||||
'permissive_window': 3600
|
||||
}
|
||||
except Exception as e:
|
||||
ctx.log.warn(f"Could not load auto-ban config: {e}")
|
||||
self.autoban_config = {'enabled': False}
|
||||
|
||||
def _clean_old_attempts(self, ip: str, window: int):
|
||||
"""Remove attempts older than the window for an IP"""
|
||||
now = time.time()
|
||||
self.threat_attempts[ip] = [
|
||||
a for a in self.threat_attempts[ip]
|
||||
if now - a[0] < window
|
||||
]
|
||||
|
||||
def _record_attempt(self, ip: str, severity: str, reason: str):
|
||||
"""Record a threat attempt for an IP"""
|
||||
self.threat_attempts[ip].append((time.time(), severity, reason))
|
||||
|
||||
def _check_threshold(self, ip: str, threshold: int, window: int) -> tuple:
|
||||
"""Check if IP has exceeded attempt threshold within window"""
|
||||
self._clean_old_attempts(ip, window)
|
||||
attempts = self.threat_attempts[ip]
|
||||
if len(attempts) >= threshold:
|
||||
reasons = [a[2] for a in attempts[-threshold:]]
|
||||
return True, f"Repeated threats ({len(attempts)} in {window}s): {reasons[0]}"
|
||||
return False, ''
|
||||
|
||||
def _should_autoban(self, ip: str, scan_result: dict, client_fp: dict, rate_limited: bool) -> tuple:
|
||||
"""
|
||||
Determine if an IP should be auto-banned based on threat detection and sensitivity level.
|
||||
|
||||
Returns: (should_ban: bool, reason: str)
|
||||
|
||||
Sensitivity Levels:
|
||||
- aggressive: Ban immediately on first critical threat (CVE, SQLi, CMDi)
|
||||
- moderate: Ban after N threats within M minutes (default: 3 in 5 min)
|
||||
- permissive: Ban after N threats within M minutes (default: 5 in 1 hour)
|
||||
|
||||
Critical threats (always immediate in aggressive mode):
|
||||
- CVE exploits, SQL injection, Command injection, XXE, Log4Shell, SSTI
|
||||
|
||||
Other triggers (follow sensitivity thresholds):
|
||||
- XSS, Path traversal, SSRF, LDAP injection
|
||||
- Known vulnerability scanners
|
||||
- Rate limit exceeded (if enabled)
|
||||
"""
|
||||
if not self.autoban_config.get('enabled'):
|
||||
return False, ''
|
||||
|
||||
# Check whitelist
|
||||
whitelist = self.autoban_config.get('whitelist', [])
|
||||
if isinstance(whitelist, str):
|
||||
whitelist = [w.strip() for w in whitelist.split(',') if w.strip()]
|
||||
if ip in whitelist:
|
||||
return False, ''
|
||||
|
||||
# Skip local IPs
|
||||
if ip.startswith(('10.', '172.16.', '172.17.', '172.18.', '172.19.',
|
||||
'172.20.', '172.21.', '172.22.', '172.23.', '172.24.',
|
||||
'172.25.', '172.26.', '172.27.', '172.28.', '172.29.',
|
||||
'172.30.', '172.31.', '192.168.', '127.')):
|
||||
return False, ''
|
||||
|
||||
# Already requested ban for this IP
|
||||
if ip in self.autoban_requested:
|
||||
return False, ''
|
||||
|
||||
sensitivity = self.autoban_config.get('sensitivity', 'moderate')
|
||||
min_severity = self.autoban_config.get('min_severity', 'critical')
|
||||
severity_order = {'low': 0, 'medium': 1, 'high': 2, 'critical': 3}
|
||||
|
||||
# Get threshold settings based on sensitivity
|
||||
if sensitivity == 'aggressive':
|
||||
threshold = 1 # Immediate ban
|
||||
window = 60
|
||||
elif sensitivity == 'permissive':
|
||||
threshold = int(self.autoban_config.get('permissive_threshold', 5))
|
||||
window = int(self.autoban_config.get('permissive_window', 3600))
|
||||
else: # moderate (default)
|
||||
threshold = int(self.autoban_config.get('moderate_threshold', 3))
|
||||
window = int(self.autoban_config.get('moderate_window', 300))
|
||||
|
||||
threat_detected = False
|
||||
threat_reason = ''
|
||||
threat_severity = 'medium'
|
||||
is_critical_threat = False
|
||||
|
||||
# Check threat patterns
|
||||
if scan_result.get('is_scan'):
|
||||
threat_severity = scan_result.get('severity', 'medium')
|
||||
threat_type = scan_result.get('type', '')
|
||||
pattern = scan_result.get('pattern', '')
|
||||
category = scan_result.get('category', '')
|
||||
cve = scan_result.get('cve', '')
|
||||
|
||||
# Critical threats - always ban immediately in aggressive mode
|
||||
# CVE exploits
|
||||
if cve and self.autoban_config.get('ban_cve_exploits', True):
|
||||
threat_detected = True
|
||||
threat_reason = f"CVE exploit attempt: {cve}"
|
||||
is_critical_threat = True
|
||||
|
||||
# SQL injection
|
||||
elif threat_type == 'injection' and 'sql' in pattern.lower():
|
||||
if self.autoban_config.get('ban_sqli', True):
|
||||
threat_detected = True
|
||||
threat_reason = f"SQL injection attempt: {pattern}"
|
||||
is_critical_threat = True
|
||||
|
||||
# Command injection
|
||||
elif threat_type == 'injection' and 'command' in pattern.lower():
|
||||
if self.autoban_config.get('ban_cmdi', True):
|
||||
threat_detected = True
|
||||
threat_reason = f"Command injection attempt: {pattern}"
|
||||
is_critical_threat = True
|
||||
|
||||
# XXE (critical)
|
||||
elif pattern == 'xxe':
|
||||
threat_detected = True
|
||||
threat_reason = "XXE attack attempt"
|
||||
is_critical_threat = True
|
||||
|
||||
# Log4Shell (critical)
|
||||
elif pattern == 'log4shell':
|
||||
threat_detected = True
|
||||
threat_reason = f"Log4Shell attempt: {cve or 'CVE-2021-44228'}"
|
||||
is_critical_threat = True
|
||||
|
||||
# SSTI (critical)
|
||||
elif pattern == 'ssti':
|
||||
threat_detected = True
|
||||
threat_reason = "SSTI attack attempt"
|
||||
is_critical_threat = True
|
||||
|
||||
# Path traversal (high - follows threshold)
|
||||
elif threat_type == 'traversal' or 'traversal' in pattern.lower():
|
||||
if self.autoban_config.get('ban_traversal', True):
|
||||
threat_detected = True
|
||||
threat_reason = f"Path traversal attempt: {pattern}"
|
||||
|
||||
# Other threats based on severity threshold
|
||||
elif severity_order.get(threat_severity, 0) >= severity_order.get(min_severity, 3):
|
||||
threat_detected = True
|
||||
threat_reason = f"Threat detected ({threat_severity}): {pattern or category}"
|
||||
|
||||
# Check for known scanners
|
||||
if not threat_detected and self.autoban_config.get('ban_scanners', True):
|
||||
bot_type = client_fp.get('bot_type', '')
|
||||
if bot_type in ['vulnerability_scanner', 'injection_tool', 'exploitation_tool', 'directory_scanner']:
|
||||
threat_detected = True
|
||||
threat_reason = f"Vulnerability scanner detected: {bot_type}"
|
||||
# Scanners are high severity but not critical
|
||||
threat_severity = 'high'
|
||||
|
||||
# Rate limit exceeded
|
||||
if not threat_detected and rate_limited and self.autoban_config.get('ban_rate_limit', False):
|
||||
threat_detected = True
|
||||
threat_reason = "Rate limit exceeded"
|
||||
threat_severity = 'medium'
|
||||
|
||||
if not threat_detected:
|
||||
return False, ''
|
||||
|
||||
# Record the attempt
|
||||
self._record_attempt(ip, threat_severity, threat_reason)
|
||||
|
||||
# Decision logic based on sensitivity
|
||||
if sensitivity == 'aggressive':
|
||||
# Aggressive: ban immediately on first critical threat
|
||||
if is_critical_threat:
|
||||
return True, threat_reason
|
||||
# For non-critical, still check threshold (but threshold=1)
|
||||
return self._check_threshold(ip, threshold, window)
|
||||
|
||||
elif sensitivity == 'permissive':
|
||||
# Permissive: always require threshold to be met
|
||||
return self._check_threshold(ip, threshold, window)
|
||||
|
||||
else: # moderate
|
||||
# Moderate: critical threats ban immediately, others follow threshold
|
||||
if is_critical_threat:
|
||||
return True, threat_reason
|
||||
return self._check_threshold(ip, threshold, window)
|
||||
|
||||
def _request_autoban(self, ip: str, reason: str, severity: str = 'high'):
|
||||
"""Write auto-ban request for host to process"""
|
||||
if ip in self.autoban_requested:
|
||||
return
|
||||
|
||||
self.autoban_requested.add(ip)
|
||||
duration = self.autoban_config.get('ban_duration', '4h')
|
||||
|
||||
ban_request = {
|
||||
'timestamp': datetime.utcnow().isoformat() + 'Z',
|
||||
'ip': ip,
|
||||
'reason': reason,
|
||||
'severity': severity,
|
||||
'duration': duration,
|
||||
'source': 'waf'
|
||||
}
|
||||
|
||||
try:
|
||||
with open(AUTOBAN_FILE, 'a') as f:
|
||||
f.write(json.dumps(ban_request) + '\n')
|
||||
ctx.log.warn(f"AUTO-BAN REQUESTED: {ip} for {duration} - {reason}")
|
||||
except Exception as e:
|
||||
ctx.log.error(f"Failed to write auto-ban request: {e}")
|
||||
|
||||
def _get_country(self, ip: str) -> str:
|
||||
"""Get country code from IP"""
|
||||
if not self.geoip or ip.startswith(('10.', '172.16.', '192.168.', '127.')):
|
||||
@ -1129,6 +1374,16 @@ class SecuBoxAnalytics:
|
||||
if client_fp.get('is_bot'):
|
||||
ctx.log.info(f"BOT DETECTED: {source_ip} - {client_fp.get('user_agent', '')[:80]}")
|
||||
|
||||
# Check for auto-ban
|
||||
should_ban, ban_reason = self._should_autoban(
|
||||
source_ip,
|
||||
scan_result,
|
||||
client_fp,
|
||||
rate_limit.get('is_limited', False)
|
||||
)
|
||||
if should_ban:
|
||||
self._request_autoban(source_ip, ban_reason, scan_result.get('severity', 'high'))
|
||||
|
||||
def response(self, flow: http.HTTPFlow):
|
||||
"""Process response to complete log entry"""
|
||||
entry = flow.metadata.get('secubox_entry', {})
|
||||
|
||||
Loading…
Reference in New Issue
Block a user