我已经创建了一个小项目(https://github.com/NHAS/wag),使用XDP & eBPF允许基于时间的连接通过wireguard VPN.
我已经将XDP eBPF程序连接到了wireguard TUN设备,但是吞吐量很低(速度测试显示wireguard + eBPF下降了约20 Mbps,而wireguard-eBPF下降了约100 Mbps)。此外,ping wireguard服务器本身的延迟不一致,并且以1个ICMP数据包/约600个ping的速率丢弃。
请注意,这发生在空载期间。在那里的流量将低于100 Mbps的总。
下面的代码是用cilium加载到内核中的。
// Kernel load
...
xdpLink, err = link.AttachXDP(link.XDPOptions{
Program: xdpObjects.XdpProgFunc,
Interface: iface.Index,
})
...
eBPF内核:
// +build ignore
#include "bpf_endian.h"
#include "common.h"
char __license[] SEC("license") = "Dual MIT/GPL";
// One /24
#define MAX_MAP_ENTRIES 256
// Inner map is a LPM tri, so we use this as the key
struct ip4_trie_key
{
__u32 prefixlen; // first member must be u32
__u32 addr; // rest can are arbitrary
};
// Map of users (ipv4) to BOOTTIME uint64 timestamp denoting authorization status
struct bpf_map_def SEC("maps") sessions = {
.type = BPF_MAP_TYPE_HASH,
.max_entries = MAX_MAP_ENTRIES,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.map_flags = 0,
};
// Map of users (ipv4) to BOOTTIME uint64 timestamp denoting when the last packet was recieved
struct bpf_map_def SEC("maps") last_packet_time = {
.type = BPF_MAP_TYPE_HASH,
.max_entries = MAX_MAP_ENTRIES,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.map_flags = 0,
};
// A single variable in nano seconds
struct bpf_map_def SEC("maps") inactivity_timeout_minutes = {
.type = BPF_MAP_TYPE_ARRAY,
.max_entries = 1,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.map_flags = 0,
};
// Two tables of the same construction
// IP to LPM trie
struct bpf_map_def SEC("maps") mfa_table = {
.type = BPF_MAP_TYPE_HASH_OF_MAPS,
.max_entries = MAX_MAP_ENTRIES,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.map_flags = 0,
};
struct bpf_map_def SEC("maps") public_table = {
.type = BPF_MAP_TYPE_HASH_OF_MAPS,
.max_entries = MAX_MAP_ENTRIES,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.map_flags = 0,
};
/*
Attempt to parse the IPv4 source address from the packet.
Returns 0 if there is no IPv4 header field; otherwise returns non-zero.
*/
static int parse_ip_src_dst_addr(struct xdp_md *ctx, __u32 *ip_src_addr, __u32 *ip_dst_addr)
{
void *data_end = (void *)(long)ctx->data_end;
void *data = (void *)(long)ctx->data;
// As this is being attached to a wireguard interface (tun device), we dont get layer 2 frames
// Just happy little ip packets
// Then parse the IP header.
struct iphdr *ip = data;
if ((void *)(ip + 1) > data_end)
{
return 0;
}
// We dont support ipv6
if (ip->version != 4)
{
return 0;
}
// Return the source IP address in network byte order.
*ip_src_addr = (__u32)(ip->saddr);
*ip_dst_addr = (__u32)(ip->daddr);
return 1;
}
static int conntrack(__u32 *src_ip, __u32 *dst_ip)
{
// Max lifetime of the session.
__u64 *session_expiry = bpf_map_lookup_elem(&sessions, src_ip);
if (!session_expiry)
{
return 0;
}
// The most recent time a valid packet was received from our a user src_ip
__u64 *lastpacket = bpf_map_lookup_elem(&last_packet_time, src_ip);
if (!lastpacket)
{
return 0;
}
// Our userland defined inactivity timeout
u32 index = 0;
__u64 *inactivity_timeout = bpf_map_lookup_elem(&inactivity_timeout_minutes, &index);
if (!inactivity_timeout)
{
return 0;
}
__u64 currentTime = bpf_ktime_get_boot_ns();
// The inner map must be a LPM trie
struct ip4_trie_key key = {
.prefixlen = 32,
.addr = *dst_ip,
};
// If the inactivity timeout is not disabled and users session has timed out
u8 isTimedOut = (*inactivity_timeout != __UINT64_MAX__ && ((currentTime - *lastpacket) >= *inactivity_timeout));
if (isTimedOut)
{
u64 locked = 0;
bpf_map_update_elem(&sessions, src_ip, &locked, BPF_EXIST);
}
// Order of preference is MFA -> Public, just in case someone adds multiple entries for the same route to make sure accidental exposure is less likely
// If the key is a match for the LPM in the public table
void *user_restricted_routes = bpf_map_lookup_elem(&mfa_table, src_ip);
if (user_restricted_routes)
{
if (bpf_map_lookup_elem(user_restricted_routes, &key) &&
// 0 indicates invalid session
*session_expiry != 0 &&
// If max session lifetime is disabled, or we are before the max lifetime of the session
(*session_expiry == __UINT64_MAX__ || *session_expiry > currentTime) &&
!isTimedOut)
{
// Doesnt matter if the value is not atomically set
*lastpacket = currentTime;
return 1;
}
}
void *user_public_routes = bpf_map_lookup_elem(&public_table, src_ip);
if (user_public_routes && bpf_map_lookup_elem(user_public_routes, &key))
{
// Only update the lastpacket time if we're not expired
if (!isTimedOut)
{
*lastpacket = currentTime;
}
return 1;
}
return 0;
}
SEC("xdp")
int xdp_prog_func(struct xdp_md *ctx)
{
__u32 src_ip, dst_ip;
if (!parse_ip_src_dst_addr(ctx, &src_ip, &dst_ip))
{
return XDP_DROP;
}
if (conntrack(&src_ip, &dst_ip) || conntrack(&dst_ip, &src_ip))
{
return XDP_PASS;
}
return XDP_DROP;
}
我期待回答的问题是:
- 我如何描述eBPF计划的哪些领域(如有)是密集型的?
- 这是XDP的处理时间限制,还是要记住的最佳时间?
- 我的eBPF程序正常吗?
谢谢。
1条答案
按热度按时间wwtsj6pe1#
对于BPF XDP钩子,每个数据包的巨大开销的最常见来源是:
/proc/sys/net/core/bpf_jit_enable
的值。正如评论中所讨论的,你处于第二种情况。你的程序连接到不支持XDP驱动模式的TUN设备上。这意味着你的BPF程序在skb分配之后运行,性能不会比在tc钩子上好多少。