588 lines
17 KiB
C

#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in.h>
#include <linux/in6.h>
#define RETRANS_WAIT_NS 1000000000ULL // 1s
#define THRESHOLD_SYN 1500 // 1500 SYNs
#define THRESHOLD_SYNACK 10000 // 10k SYNACKs
#define THRESHOLD_FRAG 10000 // 10k fragments
#define WHITELIST_TIMEOUT 900000000000ULL // 15 minutes whitelist
#define CLEANUP_INTERVAL 10000000000ULL // 5 seconds
#define STAT_SYN_COUNT 0
#define STAT_SYNACK_COUNT 1
#define STAT_FRAG_COUNT 4
#define STAT_LAST_RESET 2
#define STAT_LAST_CLEANUP 3
#define ATTACK_SYN 0
#define ATTACK_SYNACK 1
#define ATTACK_FRAG 2
struct conn_key_v4
{
__u32 src_ip;
__u16 src_port;
__u32 dst_ip;
__u16 dst_port;
__u8 proto;
};
struct conn_key_v6
{
struct in6_addr src_ip;
__u16 src_port;
struct in6_addr dst_ip;
__u16 dst_port;
__u8 proto;
};
struct conn_entry
{
__u64 timestamp;
__u32 flags; // For future extensions
};
struct
{
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 1000000);
__type(key, struct conn_key_v4);
__type(value, struct conn_entry);
} first_seen_v4 SEC(".maps");
struct
{
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 1000000);
__type(key, struct conn_key_v6);
__type(value, struct conn_entry);
} first_seen_v6 SEC(".maps");
// Whitelist with LRU eviction
struct
{
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 500000);
__type(key, struct conn_key_v4);
__type(value, __u64);
} whitelist_v4 SEC(".maps");
struct
{
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 500000);
__type(key, struct conn_key_v6);
__type(value, __u64);
} whitelist_v6 SEC(".maps");
struct
{
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 4);
__type(key, __u32);
__type(value, __u64);
} stats SEC(".maps");
struct
{
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 3);
__type(key, __u32);
__type(value, __u64);
} attack_mode SEC(".maps");
struct cleanup_state
{
__u32 bucket_v4;
__u32 bucket_v6;
__u64 last_cleanup;
};
struct
{
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, struct cleanup_state);
} cleanup_state_map SEC(".maps");
struct frag_window_counter
{
__u64 window_id;
__u32 count;
};
struct
{
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(max_entries, 1024);
__type(key, __u32); // z.B. 0 für global, oder dst_ip für Zielbasiert
__type(value, struct frag_window_counter);
} frag_window_map SEC(".maps");
static __inline void update_stats(__u32 stat_type, __u64 now)
{
__u64 *stat = bpf_map_lookup_elem(&stats, &stat_type);
if (stat)
{
if (stat_type == STAT_LAST_RESET || stat_type == STAT_LAST_CLEANUP)
{
*stat = now;
}
else
{
__sync_fetch_and_add(stat, 1);
}
}
}
static __inline int check_attack_mode(__u32 attack_type, __u64 now)
{
__u64 *attack_start = bpf_map_lookup_elem(&attack_mode, &attack_type);
__u32 stat_key = (attack_type == ATTACK_SYN) ? STAT_SYN_COUNT : (attack_type == ATTACK_SYNACK) ? STAT_SYNACK_COUNT
: STAT_FRAG_COUNT;
__u32 threshold = (attack_type == ATTACK_SYN) ? THRESHOLD_SYN : (attack_type == ATTACK_SYNACK) ? THRESHOLD_SYNACK
: THRESHOLD_FRAG;
__u64 *counter = bpf_map_lookup_elem(&stats, &stat_key);
if (attack_start && *attack_start > 0)
{
if ((now - *attack_start) < 60000000000ULL)
{
if (counter && *counter > threshold)
{
bpf_map_update_elem(&attack_mode, &attack_type, &now, BPF_ANY);
}
return 1;
}
__u64 zero = 0;
bpf_map_update_elem(&attack_mode, &attack_type, &zero, BPF_ANY);
}
if (counter && *counter > threshold)
{
bpf_map_update_elem(&attack_mode, &attack_type, &now, BPF_ANY);
return 1;
}
return 0;
}
static __inline void reset_stats_if_needed(__u64 now)
{
__u32 key_last_reset = STAT_LAST_RESET;
__u64 *last_reset = bpf_map_lookup_elem(&stats, &key_last_reset);
if (!last_reset || (now - *last_reset) >= 1000000000ULL)
{
__u64 zero = 0;
__u32 key_syn = STAT_SYN_COUNT;
__u32 key_synack = STAT_SYNACK_COUNT;
__u32 key_frag = STAT_FRAG_COUNT;
bpf_map_update_elem(&stats, &key_syn, &zero, BPF_ANY);
bpf_map_update_elem(&stats, &key_synack, &zero, BPF_ANY);
bpf_map_update_elem(&stats, &key_frag, &zero, BPF_ANY);
update_stats(STAT_LAST_RESET, now);
}
}
static __inline int is_whitelisted_v4(struct conn_key_v4 *key)
{
__u64 *last_seen = bpf_map_lookup_elem(&whitelist_v4, key);
return last_seen != 0;
}
static __inline int is_whitelisted_v6(struct conn_key_v6 *key)
{
__u64 *last_seen = bpf_map_lookup_elem(&whitelist_v6, key);
return last_seen != 0;
}
static __inline void remove_from_whitelist_v4(struct conn_key_v4 *key)
{
bpf_map_delete_elem(&whitelist_v4, key);
}
static __inline void remove_from_whitelist_v6(struct conn_key_v6 *key)
{
bpf_map_delete_elem(&whitelist_v6, key);
}
static __inline void add_to_whitelist_v4(struct conn_key_v4 *key, __u64 now)
{
__u64 expiry = now + WHITELIST_TIMEOUT;
bpf_map_update_elem(&whitelist_v4, key, &expiry, BPF_ANY);
}
static __inline void add_to_whitelist_v6(struct conn_key_v6 *key, __u64 now)
{
__u64 expiry = now + WHITELIST_TIMEOUT;
bpf_map_update_elem(&whitelist_v6, key, &expiry, BPF_ANY);
}
static __inline void renew_whitelist_v4(struct conn_key_v4 *key, __u64 now)
{
__u64 *expiry = bpf_map_lookup_elem(&whitelist_v4, key);
if (expiry)
{
__u64 new_expiry = now + WHITELIST_TIMEOUT;
bpf_map_update_elem(&whitelist_v4, key, &new_expiry, BPF_ANY);
}
}
static __inline void renew_whitelist_v6(struct conn_key_v6 *key, __u64 now)
{
__u64 *expiry = bpf_map_lookup_elem(&whitelist_v6, key);
if (expiry)
{
__u64 new_expiry = now + WHITELIST_TIMEOUT;
bpf_map_update_elem(&whitelist_v6, key, &new_expiry, BPF_ANY);
}
}
static __inline void incremental_cleanup(__u64 now)
{
__u32 state_key = 0;
struct cleanup_state *state = bpf_map_lookup_elem(&cleanup_state_map, &state_key);
if (!state)
{
struct cleanup_state init_state = {0, 0, now};
bpf_map_update_elem(&cleanup_state_map, &state_key, &init_state, BPF_ANY);
return;
}
if ((now - state->last_cleanup) < CLEANUP_INTERVAL)
{
return;
}
state->last_cleanup = now;
bpf_map_update_elem(&cleanup_state_map, &state_key, state, BPF_ANY);
}
static __inline int process_tcp_v4(struct iphdr *iph, void *data_end, __u64 now)
{
struct tcphdr *tcph = (void *)iph + iph->ihl * 4;
if ((void *)(tcph + 1) > data_end)
return XDP_PASS;
struct conn_key_v4 key = {
.src_ip = iph->saddr,
.dst_ip = iph->daddr,
.proto = IPPROTO_TCP,
.src_port = tcph->source,
.dst_port = tcph->dest,
};
if (tcph->syn && !tcph->ack)
{
update_stats(STAT_SYN_COUNT, now);
reset_stats_if_needed(now);
if (!check_attack_mode(ATTACK_SYN, now))
{
add_to_whitelist_v4(&key, now);
return XDP_PASS;
}
struct conn_entry *entry = bpf_map_lookup_elem(&first_seen_v4, &key);
if (!entry)
{
struct conn_entry new_entry = {.timestamp = now, .flags = 0};
bpf_map_update_elem(&first_seen_v4, &key, &new_entry, BPF_ANY);
return XDP_DROP;
}
if ((now - entry->timestamp) > RETRANS_WAIT_NS)
{
bpf_map_delete_elem(&first_seen_v4, &key);
add_to_whitelist_v4(&key, now);
return XDP_PASS;
}
return XDP_DROP;
}
if (is_whitelisted_v4(&key))
{
if (tcph->rst || tcph->fin)
{
remove_from_whitelist_v4(&key);
struct conn_key_v4 rev_key = {
.src_ip = key.dst_ip, .dst_ip = key.src_ip, .src_port = key.dst_port, .dst_port = key.src_port, .proto = key.proto};
remove_from_whitelist_v4(&rev_key);
return XDP_PASS; // wichtig hier ist durchlassen damit man auch bei der Backend IP die Conn schließt
}
renew_whitelist_v4(&key, now);
return XDP_PASS; // Alle anderen Flags auch passen lassen
}
// SYN - ACk retransmission check to ensure no invalid traffic pass to client
if (tcph->syn && tcph->ack)
{
update_stats(STAT_SYNACK_COUNT, now);
reset_stats_if_needed(now);
if (!check_attack_mode(ATTACK_SYNACK, now))
{
add_to_whitelist_v4(&key, now);
struct conn_key_v4 rev_key = {
.src_ip = key.dst_ip, .dst_ip = key.src_ip, .src_port = key.dst_port, .dst_port = key.src_port, .proto = key.proto};
add_to_whitelist_v4(&rev_key, now);
return XDP_PASS;
}
struct conn_entry *entry = bpf_map_lookup_elem(&first_seen_v4, &key);
if (!entry)
{
struct conn_entry new_entry = {.timestamp = now, .flags = 0};
bpf_map_update_elem(&first_seen_v4, &key, &new_entry, BPF_ANY);
return XDP_DROP;
}
if ((now - entry->timestamp) > RETRANS_WAIT_NS)
{
bpf_map_delete_elem(&first_seen_v4, &key);
add_to_whitelist_v4(&key, now);
struct conn_key_v4 rev_key = {
.src_ip = key.dst_ip, .dst_ip = key.src_ip, .src_port = key.dst_port, .dst_port = key.src_port, .proto = key.proto};
add_to_whitelist_v4(&rev_key, now);
return XDP_PASS;
}
return XDP_DROP;
}
return XDP_DROP;
}
static __inline int process_tcp_v6(struct ipv6hdr *ip6h, void *data_end, __u64 now)
{
struct tcphdr *tcph = (void *)(ip6h + 1);
if ((void *)(tcph + 1) > data_end)
return XDP_PASS;
struct conn_key_v6 key = {
.src_ip = ip6h->saddr,
.dst_ip = ip6h->daddr,
.proto = IPPROTO_TCP,
.src_port = tcph->source,
.dst_port = tcph->dest,
};
if (tcph->syn && !tcph->ack)
{
update_stats(STAT_SYN_COUNT, now);
reset_stats_if_needed(now);
if (!check_attack_mode(ATTACK_SYN, now))
{
add_to_whitelist_v6(&key, now);
return XDP_PASS;
}
struct conn_entry *entry = bpf_map_lookup_elem(&first_seen_v6, &key);
if (!entry)
{
struct conn_entry new_entry = {.timestamp = now, .flags = 0};
bpf_map_update_elem(&first_seen_v6, &key, &new_entry, BPF_ANY);
return XDP_DROP;
}
if ((now - entry->timestamp) > RETRANS_WAIT_NS)
{
bpf_map_delete_elem(&first_seen_v6, &key);
add_to_whitelist_v6(&key, now);
return XDP_PASS;
}
return XDP_DROP;
}
if (is_whitelisted_v6(&key))
{
if (tcph->rst || tcph->fin)
{
remove_from_whitelist_v6(&key);
struct conn_key_v6 rev_key = {
.src_ip = key.dst_ip, .dst_ip = key.src_ip, .src_port = key.dst_port, .dst_port = key.src_port, .proto = key.proto};
remove_from_whitelist_v6(&rev_key);
return XDP_PASS; // wichtig hier ist durchlassen damit die connection auch bei der Backend IP geschlossen wird
}
renew_whitelist_v6(&key, now);
return XDP_PASS; // Alle anderen Flags auch passen lassen
}
if (tcph->syn && tcph->ack)
{
update_stats(STAT_SYNACK_COUNT, now);
reset_stats_if_needed(now);
if (!check_attack_mode(ATTACK_SYNACK, now))
{
add_to_whitelist_v6(&key, now);
struct conn_key_v6 rev_key = {
.src_ip = key.dst_ip, .dst_ip = key.src_ip, .src_port = key.dst_port, .dst_port = key.src_port, .proto = key.proto};
add_to_whitelist_v6(&rev_key, now);
return XDP_PASS;
}
struct conn_entry *entry = bpf_map_lookup_elem(&first_seen_v6, &key);
if (!entry)
{
struct conn_entry new_entry = {.timestamp = now, .flags = 0};
bpf_map_update_elem(&first_seen_v6, &key, &new_entry, BPF_ANY);
return XDP_DROP;
}
if ((now - entry->timestamp) > RETRANS_WAIT_NS)
{
bpf_map_delete_elem(&first_seen_v6, &key);
add_to_whitelist_v6(&key, now);
struct conn_key_v6 rev_key = {
.src_ip = key.dst_ip, .dst_ip = key.src_ip, .src_port = key.dst_port, .dst_port = key.src_port, .proto = key.proto};
add_to_whitelist_v6(&rev_key, now);
return XDP_PASS;
}
return XDP_DROP;
}
return XDP_DROP;
}
SEC("xdp")
int tmw_shield_retr_test(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
void *data_end = (void *)(long)ctx->data_end;
struct ethhdr *eth = data;
if ((void *)(eth + 1) > data_end)
return XDP_PASS;
__u64 now = bpf_ktime_get_ns();
__u16 eth_proto = bpf_ntohs(eth->h_proto);
incremental_cleanup(now);
if (eth_proto == ETH_P_IP)
{
struct iphdr *iph = data + sizeof(struct ethhdr);
if ((void *)(iph + 1) > data_end)
return XDP_PASS;
if ((iph->frag_off & bpf_htons(0x3FFF)) != 0)
{
__u64 now = bpf_ktime_get_ns();
__u32 attack_type = ATTACK_FRAG;
__u64 *attack_start = bpf_map_lookup_elem(&attack_mode, &attack_type);
__u64 window = now / 350000000ULL; // 350ms Fenster
__u32 key = 0;
struct frag_window_counter *counter = bpf_map_lookup_elem(&frag_window_map, &key);
if (!counter || counter->window_id != window)
{
struct frag_window_counter new_counter = {.window_id = window, .count = 1};
bpf_map_update_elem(&frag_window_map, &key, &new_counter, BPF_ANY);
}
else
{
counter->count++;
bpf_map_update_elem(&frag_window_map, &key, counter, BPF_ANY);
}
if (attack_start && *attack_start > 0 && (now - *attack_start) < 60000000000ULL)
{
if (counter && counter->count > THRESHOLD_FRAG)
{
bpf_map_update_elem(&attack_mode, &attack_type, &now, BPF_ANY);
}
return XDP_DROP;
}
if (counter && counter->count > THRESHOLD_FRAG)
{
// Attack-Mode für 60s setzen
bpf_map_update_elem(&attack_mode, &attack_type, &now, BPF_ANY);
return XDP_DROP;
}
}
}
else if (eth_proto == ETH_P_IPV6)
{
struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
if ((void *)(ip6h + 1) > data_end)
return XDP_PASS;
__u8 *nexthdr = &ip6h->nexthdr;
if (*nexthdr == 44)
{ // 44 = Fragment Header
__u64 now = bpf_ktime_get_ns();
__u32 attack_type = ATTACK_FRAG;
__u64 *attack_start = bpf_map_lookup_elem(&attack_mode, &attack_type);
__u64 window = now / 350000000ULL; // 350ms Fenster
__u32 key = 0;
struct frag_window_counter *counter = bpf_map_lookup_elem(&frag_window_map, &key);
if (!counter || counter->window_id != window)
{
struct frag_window_counter new_counter = {.window_id = window, .count = 1};
bpf_map_update_elem(&frag_window_map, &key, &new_counter, BPF_ANY);
}
else
{
counter->count++;
bpf_map_update_elem(&frag_window_map, &key, counter, BPF_ANY);
}
if (attack_start && *attack_start > 0 && (now - *attack_start) < 60000000000ULL)
{
return XDP_DROP;
}
if (counter && counter->count > THRESHOLD_FRAG)
{
bpf_map_update_elem(&attack_mode, &attack_type, &now, BPF_ANY);
return XDP_DROP;
}
}
}
switch (eth_proto)
{
case ETH_P_IP:
{
struct iphdr *iph = data + sizeof(struct ethhdr);
if ((void *)(iph + 1) > data_end)
return XDP_PASS;
if (iph->protocol == IPPROTO_TCP)
{
return process_tcp_v4(iph, data_end, now);
}
else if (iph->protocol == IPPROTO_UDP)
{
return XDP_PASS;
}
break;
}
case ETH_P_IPV6:
{
struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
if ((void *)(ip6h + 1) > data_end)
return XDP_PASS;
if (ip6h->nexthdr == IPPROTO_TCP)
{
return process_tcp_v6(ip6h, data_end, now);
}
else if (ip6h->nexthdr == IPPROTO_UDP)
{
return XDP_PASS;
}
break;
}
}
return XDP_PASS;
}
char _license[] SEC("license") = "GPL";