diff --git a/bpf/egress.c b/bpf/egress.c index 64c9e29..ba90949 100644 --- a/bpf/egress.c +++ b/bpf/egress.c @@ -28,7 +28,7 @@ static inline int mangle_data(struct __sk_buff* skb, __u16 offset, __be32* csum_ try_shot(bpf_skb_store_bytes(skb, skb->len - copy_len, buf + 1, copy_len, 0)); // Fix checksum when moved bytes does not align with u16 boundaries - if (copy_len == reserve_len && data_len % 2 != 0) { + if ((data_len <= copy_len && reserve_len % 2 != 0) || data_len % 2 != 0) { __u32 l = min(round_to_mul(copy_len, 4), MAX_RESERVE_LEN); *csum_diff = bpf_csum_diff((__be32*)(buf + 1), l, (__be32*)buf, l + 4, *csum_diff); } diff --git a/bpf/ingress.c b/bpf/ingress.c index d398d2f..cbb9b86 100644 --- a/bpf/ingress.c +++ b/bpf/ingress.c @@ -36,7 +36,7 @@ static inline int restore_data(struct xdp_md* xdp, __u16 offset, __u32 buf_len, try_drop(bpf_xdp_store_bytes(xdp, offset - TCP_UDP_HEADER_DIFF, buf + 1, copy_len)); // Fix checksum when moved bytes does not align with u16 boundaries - if (copy_len == reserve_len && data_len % 2 != 0) { + if ((data_len <= copy_len && copy_len % 2 != 0) || data_len % 2 != 0) { __u32 l = min(round_to_mul(copy_len, 4), MAX_RESERVE_LEN); *csum_diff = bpf_csum_diff((__be32*)buf, l + 4, (__be32*)(buf + 1), l, *csum_diff); }