Home
last modified time | relevance | path

Searched refs:skb (Results 1 – 4 of 4) sorted by relevance

/system/netd/bpf_progs/
Dnetd.c100 static __always_inline inline void update_##the_stats_map(struct __sk_buff* skb, \
111 uint64_t bytes = skb->len; \
113 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6)); \
136 static inline bool skip_owner_match(struct __sk_buff* skb) { in DEFINE_UPDATE_STATS()
139 if (skb->protocol == htons(ETH_P_IP)) { in DEFINE_UPDATE_STATS()
143 ret = bpf_skb_load_bytes(skb, offset, &proto, 1); in DEFINE_UPDATE_STATS()
148 ret = bpf_skb_load_bytes(skb, IPPROTO_IHL_OFF, &ihl, 1); in DEFINE_UPDATE_STATS()
150 ret = bpf_skb_load_bytes(skb, ihl * 4 + TCP_FLAG_OFF, &flag, 1); in DEFINE_UPDATE_STATS()
156 } else if (skb->protocol == htons(ETH_P_IPV6)) { in DEFINE_UPDATE_STATS()
159 ret = bpf_skb_load_bytes(skb, offset, &proto, 1); in DEFINE_UPDATE_STATS()
[all …]
Dbpf_net_helpers.h26 static uint64_t (*bpf_get_socket_cookie)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_cookie;
28 static uint32_t (*bpf_get_socket_uid)(struct __sk_buff* skb) = (void*)BPF_FUNC_get_socket_uid;
29 static int (*bpf_skb_load_bytes)(struct __sk_buff* skb, int off, void* to,
35 static int64_t (*bpf_csum_update)(struct __sk_buff* skb, __wsum csum) = (void*)BPF_FUNC_csum_update;
37 static int (*bpf_skb_change_proto)(struct __sk_buff* skb, __be16 proto,
39 static int (*bpf_l3_csum_replace)(struct __sk_buff* skb, __u32 offset, __u64 from, __u64 to,
41 static int (*bpf_l4_csum_replace)(struct __sk_buff* skb, __u32 offset, __u64 from, __u64 to,
45 static int (*bpf_skb_change_head)(struct __sk_buff* skb, __u32 head_room,
47 static int (*bpf_skb_adjust_room)(struct __sk_buff* skb, __s32 len_diff, __u32 mode,
56 static inline __always_inline __unused bool is_received_skb(struct __sk_buff* skb) { in is_received_skb() argument
[all …]
Doffload.c37 static inline __always_inline int do_forward(struct __sk_buff* skb, bool is_ethernet) { in do_forward() argument
39 void* data = (void*)(long)skb->data; in do_forward()
40 const void* data_end = (void*)(long)skb->data_end; in do_forward()
45 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_OK; in do_forward()
67 .iif = skb->ifindex, in do_forward()
76 uint32_t stat_and_limit_k = skb->ifindex; in do_forward()
100 uint64_t bytes = skb->len; in do_forward()
121 if (bpf_skb_change_head(skb, l2_header_size, /*flags*/ 0)) { in do_forward()
127 data = (void*)(long)skb->data; in do_forward()
128 data_end = (void*)(long)skb->data_end; in do_forward()
[all …]
Dclatd.c42 static inline __always_inline int nat64(struct __sk_buff* skb, bool is_ethernet) { in nat64() argument
44 void* data = (void*)(long)skb->data; in nat64()
45 const void* data_end = (void*)(long)skb->data_end; in nat64()
50 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_OK; in nat64()
76 .iif = skb->ifindex, in nat64()
132 if (bpf_skb_change_proto(skb, htons(ETH_P_IP), 0)) return TC_ACT_OK; in nat64()
149 bpf_csum_update(skb, sum6); in nat64()
152 data = (void*)(long)skb->data; in nat64()
153 data_end = (void*)(long)skb->data_end; in nat64()
180 int sched_cls_ingress_clat_ether(struct __sk_buff* skb) { in sched_cls_ingress_clat_ether() argument
[all …]