Fix 12 code review issues (4 MEDIUM + 8 LOW)

MEDIUM:
- M1: Whitelist direct IP/CIDR additions now persist to direct.txt
- M2: get_map_id() uses 5s TTL cache (single bpftool call for all maps)
- M3: IPv6 extension header parsing in xdp_ddos.c (hop-by-hop/routing/frag/dst)
- M4: Shell injection prevention - sanitize_input() + sys.argv[] for all Python calls

LOW:
- L1: Remove redundant self.running (uses _stop_event only)
- L2: Remove unused config values (rate_limit_after, cooldown_multiplier, retrain_interval)
- L3: Thread poll intervals reloaded on SIGHUP
- L4: batch_map_operation counts only successfully written entries
- L5: Clarify unique_ips_approx comment (per-packet counter)
- L6: Document LRU_HASH multi-CPU race condition as acceptable
- L7: Download Cloudflare IPv6 ranges in whitelist preset
- L8: Fix file handle leak in xdp_country.py list_countries()

Also: SIGHUP now preserves EWMA/violation state, daemon skips whitelisted
IPs in EWMA/AI escalation, deep copy for default config, IHL validation.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
kaffa
2026-02-07 09:23:41 +09:00
parent dbfcb62cdf
commit 667c6eac81
7 changed files with 218 additions and 67 deletions

View File

@@ -61,7 +61,7 @@ struct traffic_features {
__u64 udp_count;
__u64 icmp_count;
__u64 other_proto_count;
__u64 unique_ips_approx; // approximate via counter
__u64 unique_ips_approx; // per-packet counter (not truly unique, used as relative indicator)
__u64 small_pkt_count; // packets < 100 bytes
__u64 large_pkt_count; // packets > 1400 bytes
};
@@ -218,6 +218,9 @@ static __always_inline int check_blocked_v6(struct in6_addr *ip, __u64 now) {
}
// Rate check for IPv4: returns 1 if rate exceeded
// Note: LRU_HASH lookups on multi-CPU are racy (no per-entry lock), so counters
// may be slightly inaccurate under high concurrency. This is acceptable for rate
// limiting where approximate enforcement is sufficient.
static __always_inline int rate_check_v4(__u32 ip, __u64 now, __u64 pkt_len) {
__u32 cfg_key = 0;
struct rate_cfg *cfg = bpf_map_lookup_elem(&rate_config, &cfg_key);
@@ -349,6 +352,12 @@ int xdp_ddos(struct xdp_md *ctx) {
__u8 proto = iph->protocol;
__u8 tcp_flags = 0;
// Validate IHL (minimum 5 = 20 bytes)
if (iph->ihl < 5) {
inc_stat(4);
return XDP_PASS;
}
// Extract TCP flags if applicable
if (proto == IPPROTO_TCP) {
struct tcphdr *tcph = l3_hdr + (iph->ihl * 4);
@@ -393,9 +402,35 @@ int xdp_ddos(struct xdp_md *ctx) {
struct in6_addr saddr = ip6h->saddr;
__u8 proto = ip6h->nexthdr;
__u8 tcp_flags = 0;
void *next_hdr = (void *)(ip6h + 1);
// Skip known IPv6 extension headers (up to 4 to stay within verifier limits)
#pragma unroll
for (int i = 0; i < 4; i++) {
if (proto != IPPROTO_HOPOPTS && proto != IPPROTO_ROUTING &&
proto != IPPROTO_DSTOPTS && proto != IPPROTO_FRAGMENT)
break;
if (proto == IPPROTO_FRAGMENT) {
// Fragment header is fixed 8 bytes
if (next_hdr + 8 > data_end)
break;
proto = *(__u8 *)next_hdr;
next_hdr += 8;
} else {
// Other extension headers: length in 2nd byte (units of 8 octets, +8)
if (next_hdr + 2 > data_end)
break;
__u8 ext_len = *((__u8 *)next_hdr + 1);
__u32 hdr_len = (((__u32)ext_len) + 1) * 8;
if (next_hdr + hdr_len > data_end)
break;
proto = *(__u8 *)next_hdr;
next_hdr += hdr_len;
}
}
if (proto == IPPROTO_TCP) {
struct tcphdr *tcph = (void *)(ip6h + 1);
struct tcphdr *tcph = next_hdr;
if ((void *)(tcph + 1) <= data_end) {
tcp_flags = ((__u8 *)tcph)[13];
}