Skip to content

Commit

Permalink
Switch to uniform integer types
Browse files Browse the repository at this point in the history
  • Loading branch information
hack3ric committed Mar 28, 2024
1 parent 0464045 commit dcd7327
Show file tree
Hide file tree
Showing 4 changed files with 60 additions and 69 deletions.
53 changes: 22 additions & 31 deletions src/bpf/egress.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,11 @@
#include "mimic.h"

// Extend socket buffer and move n bytes from front to back.
static int mangle_data(struct __sk_buff* skb, u16 offset) {
u16 data_len = skb->len - offset;
static int mangle_data(struct __sk_buff* skb, __u16 offset) {
__u16 data_len = skb->len - offset;
try_shot(bpf_skb_change_tail(skb, skb->len + TCP_UDP_HEADER_DIFF, 0));
u8 buf[TCP_UDP_HEADER_DIFF] = {};
u32 copy_len = min(data_len, TCP_UDP_HEADER_DIFF);
__u8 buf[TCP_UDP_HEADER_DIFF] = {};
__u32 copy_len = min(data_len, TCP_UDP_HEADER_DIFF);
if (copy_len > 0) {
// HACK: make verifier happy
// Probably related:
Expand All @@ -28,33 +28,24 @@ static int mangle_data(struct __sk_buff* skb, u16 offset) {
return TC_ACT_OK;
}

static __always_inline void update_tcp_header(struct tcphdr* tcp, u16 udp_len, bool syn, bool ack, bool rst, u32 seq,
u32 ack_seq) {
static inline void update_tcp_header(struct tcphdr* tcp, __u16 udp_len, __u32 seq, __u32 ack_seq) {
tcp->seq = bpf_htonl(seq);
tcp->ack_seq = bpf_htonl(ack_seq);

tcp_flag_word(tcp) = 0;
tcp->doff = 5;
tcp->window = bpf_htons(0xfff);
if (rst) {
tcp->rst = 1;
} else {
tcp->syn = syn;
tcp->ack = ack;
}

u16 urg_ptr = 0;
tcp->urg_ptr = bpf_htons(urg_ptr);
tcp->ack = true;
tcp->urg_ptr = 0;
}

SEC("tc")
int egress_handler(struct __sk_buff* skb) {
decl_ok(struct ethhdr, eth, 0, skb);
u16 eth_proto = bpf_ntohs(eth->h_proto);
__u16 eth_proto = bpf_ntohs(eth->h_proto);

struct iphdr* ipv4 = NULL;
struct ipv6hdr* ipv6 = NULL;
u32 ip_end;
__u32 ip_end;

if (eth_proto == ETH_P_IP) {
redecl_shot(struct iphdr, ipv4, ETH_HLEN, skb);
Expand All @@ -66,14 +57,14 @@ int egress_handler(struct __sk_buff* skb) {
return TC_ACT_OK;
}

u8 ip_proto = ipv4 ? ipv4->protocol : ipv6 ? ipv6->nexthdr : 0;
__u8 ip_proto = ipv4 ? ipv4->protocol : ipv6 ? ipv6->nexthdr : 0;
if (ip_proto != IPPROTO_UDP) return TC_ACT_OK;
decl_ok(struct udphdr, udp, ip_end, skb);

if (!matches_whitelist(QUARTET_UDP, false)) return TC_ACT_OK;

u32 vkey = SETTINGS_LOG_VERBOSITY;
u32 log_verbosity = *(u32*)try_p_shot(bpf_map_lookup_elem(&mimic_settings, &vkey));
__u32 vkey = SETTINGS_LOG_VERBOSITY;
__u32 log_verbosity = *(__u32*)try_p_shot(bpf_map_lookup_elem(&mimic_settings, &vkey));

struct conn_tuple conn_key = gen_conn_key(QUARTET_UDP, false);
log_quartet(log_verbosity, LOG_LEVEL_DEBUG, false, LOG_TYPE_MATCHED, conn_key);
Expand All @@ -83,11 +74,11 @@ int egress_handler(struct __sk_buff* skb) {
old_udphdr.check = 0;
__be16 old_udp_csum = udp->check;

u16 udp_len = bpf_ntohs(udp->len);
u16 payload_len = udp_len - sizeof(*udp);
__u16 udp_len = bpf_ntohs(udp->len);
__u16 payload_len = udp_len - sizeof(*udp);

u32 seq = 0, ack_seq = 0, conn_seq, conn_ack_seq;
u32 random = bpf_get_prandom_u32();
__u32 seq = 0, ack_seq = 0, conn_seq, conn_ack_seq;
__u32 random = bpf_get_prandom_u32();
enum conn_state conn_state;

bpf_spin_lock(&conn->lock);
Expand Down Expand Up @@ -135,26 +126,26 @@ int egress_handler(struct __sk_buff* skb) {

try(mangle_data(skb, ip_end + sizeof(*udp)));
decl_shot(struct tcphdr, tcp, ip_end, skb);
update_tcp_header(tcp, udp_len, false, true, false, seq, ack_seq);
update_tcp_header(tcp, udp_len, seq, ack_seq);

tcp->check = 0;
s64 csum_diff = bpf_csum_diff((__be32*)&old_udphdr, sizeof(struct udphdr), (__be32*)tcp, sizeof(struct tcphdr), 0);
__s64 csum_diff = bpf_csum_diff((__be32*)&old_udphdr, sizeof(struct udphdr), (__be32*)tcp, sizeof(struct tcphdr), 0);
tcp->check = old_udp_csum;

u32 off = ip_end + offsetof(struct tcphdr, check);
__u32 off = ip_end + offsetof(struct tcphdr, check);
bpf_l4_csum_replace(skb, off, 0, csum_diff, 0);

__be16 newlen = bpf_htons(udp_len + TCP_UDP_HEADER_DIFF);
s64 diff = 0;
__s64 diff = 0;
if (ipv4) {
struct ipv4_ph_part oldph = {._pad = 0, .protocol = IPPROTO_UDP, .len = old_udphdr.len};
struct ipv4_ph_part newph = {._pad = 0, .protocol = IPPROTO_TCP, .len = newlen};
u32 size = sizeof(struct ipv4_ph_part);
__u32 size = sizeof(struct ipv4_ph_part);
diff = bpf_csum_diff((__be32*)&oldph, size, (__be32*)&newph, size, 0);
} else if (ipv6) {
struct ipv6_ph_part oldph = {._1 = {}, .len = old_udphdr.len, ._2 = {}, .nexthdr = IPPROTO_UDP};
struct ipv6_ph_part newph = {._1 = {}, .len = newlen, ._2 = {}, .nexthdr = IPPROTO_TCP};
u32 size = sizeof(struct ipv6_ph_part);
__u32 size = sizeof(struct ipv6_ph_part);
diff = bpf_csum_diff((__be32*)&oldph, size, (__be32*)&newph, size, 0);
}
bpf_l4_csum_replace(skb, off, 0, diff, BPF_F_PSEUDO_HDR);
Expand Down
48 changes: 24 additions & 24 deletions src/bpf/ingress.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@
#include "mimic.h"

// Move back n bytes, shrink socket buffer and restore data.
static inline int restore_data(struct xdp_md* xdp, u16 offset, u32 buf_len) {
u8 buf[TCP_UDP_HEADER_DIFF] = {};
u16 data_len = buf_len - offset;
u32 copy_len = min(data_len, TCP_UDP_HEADER_DIFF);
static inline int restore_data(struct xdp_md* xdp, __u16 offset, __u32 buf_len) {
__u8 buf[TCP_UDP_HEADER_DIFF] = {};
__u16 data_len = buf_len - offset;
__u32 copy_len = min(data_len, TCP_UDP_HEADER_DIFF);
if (copy_len > 0) {
if (copy_len < 2) copy_len = 1; // HACK: see egress.c
try_drop(bpf_xdp_load_bytes(xdp, buf_len - copy_len, buf, copy_len));
Expand All @@ -25,27 +25,27 @@ static inline int restore_data(struct xdp_md* xdp, u16 offset, u32 buf_len) {
return XDP_PASS;
}

static __always_inline u32 new_ack_seq(struct tcphdr* tcp, u16 payload_len) {
static __always_inline __u32 new_ack_seq(struct tcphdr* tcp, __u16 payload_len) {
return bpf_ntohl(tcp->seq) + payload_len + tcp->syn;
}

static __always_inline void pre_syn_ack(u32* seq, u32* ack_seq, struct connection* conn, struct tcphdr* tcp,
u16 payload_len, u32 random) {
static __always_inline void pre_syn_ack(__u32* seq, __u32* ack_seq, struct connection* conn, struct tcphdr* tcp,
__u16 payload_len, __u32 random) {
conn->state = STATE_SYN_RECV;
*seq = conn->seq = random;
*ack_seq = conn->ack_seq = new_ack_seq(tcp, payload_len);
conn->seq += 1;
}

static __always_inline void pre_ack(enum conn_state new_state, u32* seq, u32* ack_seq, struct connection* conn,
struct tcphdr* tcp, u16 payload_len) {
static __always_inline void pre_ack(enum conn_state new_state, __u32* seq, __u32* ack_seq, struct connection* conn,
struct tcphdr* tcp, __u16 payload_len) {
conn->state = new_state;
*seq = conn->seq;
*ack_seq = conn->ack_seq = new_ack_seq(tcp, payload_len);
}

static __always_inline void pre_rst_ack(u32* seq, u32* ack_seq, struct connection* conn, struct tcphdr* tcp,
u16 payload_len) {
static __always_inline void pre_rst_ack(__u32* seq, __u32* ack_seq, struct connection* conn, struct tcphdr* tcp,
__u16 payload_len) {
conn->state = STATE_IDLE;
*seq = conn->seq = conn->ack_seq = 0;
*ack_seq = new_ack_seq(tcp, payload_len);
Expand All @@ -54,11 +54,11 @@ static __always_inline void pre_rst_ack(u32* seq, u32* ack_seq, struct connectio
SEC("xdp")
int ingress_handler(struct xdp_md* xdp) {
decl_pass(struct ethhdr, eth, 0, xdp);
u16 eth_proto = bpf_ntohs(eth->h_proto);
__u16 eth_proto = bpf_ntohs(eth->h_proto);

struct iphdr* ipv4 = NULL;
struct ipv6hdr* ipv6 = NULL;
u32 ip_end;
__u32 ip_end;

if (eth_proto == ETH_P_IP) {
redecl_drop(struct iphdr, ipv4, ETH_HLEN, xdp);
Expand All @@ -70,21 +70,21 @@ int ingress_handler(struct xdp_md* xdp) {
return XDP_PASS;
}

u8 ip_proto = ipv4 ? ipv4->protocol : ipv6 ? ipv6->nexthdr : 0;
__u8 ip_proto = ipv4 ? ipv4->protocol : ipv6 ? ipv6->nexthdr : 0;
if (ip_proto != IPPROTO_TCP) return XDP_PASS;
decl_pass(struct tcphdr, tcp, ip_end, xdp);

if (!matches_whitelist(QUARTET_TCP, true)) return XDP_PASS;

u32 vkey = SETTINGS_LOG_VERBOSITY;
u32 log_verbosity = *(u32*)try_p_drop(bpf_map_lookup_elem(&mimic_settings, &vkey));
__u32 vkey = SETTINGS_LOG_VERBOSITY;
__u32 log_verbosity = *(__u32*)try_p_drop(bpf_map_lookup_elem(&mimic_settings, &vkey));

struct conn_tuple conn_key = gen_conn_key(QUARTET_TCP, true);
log_quartet(log_verbosity, LOG_LEVEL_DEBUG, true, LOG_TYPE_MATCHED, conn_key);
struct connection* conn = try_p_drop(get_conn(&conn_key));

u32 buf_len = bpf_xdp_get_buff_len(xdp);
u32 payload_len = buf_len - ip_end - sizeof(*tcp);
__u32 buf_len = bpf_xdp_get_buff_len(xdp);
__u32 payload_len = buf_len - ip_end - sizeof(*tcp);

// TODO: verify checksum

Expand All @@ -103,8 +103,8 @@ int ingress_handler(struct xdp_md* xdp) {
}

bool syn, ack, rst, will_send_ctrl_packet, will_drop, newly_estab;
u32 seq = 0, ack_seq = 0, conn_seq, conn_ack_seq;
u32 random = bpf_get_prandom_u32();
__u32 seq = 0, ack_seq = 0, conn_seq, conn_ack_seq;
__u32 random = bpf_get_prandom_u32();
enum conn_state state;
syn = ack = rst = will_send_ctrl_packet = will_drop = newly_estab = false;

Expand Down Expand Up @@ -174,8 +174,8 @@ int ingress_handler(struct xdp_md* xdp) {
ipv4->tot_len = new_len;
ipv4->protocol = IPPROTO_UDP;

u32 ipv4_csum = (u16)~bpf_ntohs(ipv4->check);
update_csum(&ipv4_csum, -(s32)TCP_UDP_HEADER_DIFF);
__u32 ipv4_csum = (__u16)~bpf_ntohs(ipv4->check);
update_csum(&ipv4_csum, -(__s32)TCP_UDP_HEADER_DIFF);
update_csum(&ipv4_csum, IPPROTO_UDP - IPPROTO_TCP);
ipv4->check = bpf_htons(csum_fold(ipv4_csum));
} else if (ipv6) {
Expand All @@ -187,10 +187,10 @@ int ingress_handler(struct xdp_md* xdp) {
try_xdp(restore_data(xdp, ip_end + sizeof(*tcp), buf_len));
decl_drop(struct udphdr, udp, ip_end, xdp);

u16 udp_len = buf_len - ip_end - TCP_UDP_HEADER_DIFF;
__u16 udp_len = buf_len - ip_end - TCP_UDP_HEADER_DIFF;
udp->len = bpf_htons(udp_len);

u32 csum = 0;
__u32 csum = 0;
if (ipv4) {
update_csum_ul(&csum, bpf_ntohl(ipv4_saddr));
update_csum_ul(&csum, bpf_ntohl(ipv4_daddr));
Expand Down
9 changes: 4 additions & 5 deletions src/bpf/log.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,9 @@ extern struct mimic_log_rb_map {
__uint(max_entries, sizeof(struct log_event) * 32);
} mimic_log_rb;

static inline void log_any(u32 log_verbosity, enum log_level level, bool ingress, enum log_type type,
static inline void log_any(__u32 log_verbosity, enum log_level level, bool ingress, enum log_type type,
union log_info info) {
if (log_verbosity < level) return;

struct log_event* e = bpf_ringbuf_reserve(&mimic_log_rb, sizeof(*e), 0);
if (!e) return;
e->level = level;
Expand All @@ -25,13 +24,13 @@ static inline void log_any(u32 log_verbosity, enum log_level level, bool ingress
bpf_ringbuf_submit(e, 0);
}

static inline void log_quartet(u32 log_verbosity, enum log_level level, bool ingress, enum log_type type,
static inline void log_quartet(__u32 log_verbosity, enum log_level level, bool ingress, enum log_type type,
struct conn_tuple quartet) {
log_any(log_verbosity, level, ingress, type, (union log_info){.quartet = quartet});
}

static __always_inline void log_tcp(u32 log_verbosity, enum log_level level, bool ingress, enum log_type type,
enum conn_state state, __u32 seq, __u32 ack_seq) {
static __always_inline void log_tcp(__u32 log_verbosity, enum log_level level, bool ingress, enum log_type type,
enum conn_state state, __u32 seq, __u32 ack_seq) {
log_any(log_verbosity, level, ingress, type,
(union log_info){.tcp = {.state = state, .seq = seq, .ack_seq = ack_seq}});
}
Expand Down
19 changes: 10 additions & 9 deletions src/bpf/mimic.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ extern struct mimic_conns_map {
extern struct mimic_settings_map {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 2);
__type(key, u32);
__type(value, u32);
__type(key, __u32);
__type(value, __u32);
} mimic_settings;

extern struct mimic_send_rb_map {
Expand All @@ -38,20 +38,20 @@ extern struct mimic_send_rb_map {
#define TCP_UDP_HEADER_DIFF (sizeof(struct tcphdr) - sizeof(struct udphdr))

struct ipv4_ph_part {
u8 _pad;
u8 protocol;
__u8 _pad;
__u8 protocol;
__be16 len;
} __attribute__((packed));

struct ipv6_ph_part {
u8 _1[2];
__u8 _1[2];
__be16 len;
u8 _2[3];
u8 nexthdr;
__u8 _2[3];
__u8 nexthdr;
} __attribute__((packed));

struct sk_buff* mimic_inspect_skb(struct __sk_buff*) __ksym;
int mimic_change_csum_offset(struct __sk_buff*, u16) __ksym;
int mimic_change_csum_offset(struct __sk_buff*, __u16) __ksym;

// clang-format off
#define QUARTET_DEF struct iphdr* ipv4, struct ipv6hdr* ipv6, struct udphdr* udp, struct tcphdr* tcp
Expand Down Expand Up @@ -128,7 +128,8 @@ static inline struct connection* get_conn(struct conn_tuple* conn_key) {
return conn;
}

static __always_inline void send_ctrl_packet(struct conn_tuple c, bool syn, bool ack, bool rst, u32 seq, u32 ack_seq) {
static __always_inline void send_ctrl_packet(struct conn_tuple c, bool syn, bool ack, bool rst, __u32 seq,
__u32 ack_seq) {
struct send_options* s = bpf_ringbuf_reserve(&mimic_send_rb, sizeof(*s), 0);
if (!s) return;
*s = (struct send_options){.c = c, .syn = syn, .ack = ack, .rst = rst, .seq = seq, .ack_seq = ack_seq};
Expand Down

0 comments on commit dcd7327

Please sign in to comment.