1 /* Common BPF helpers to be used by all BPF programs loaded by Android */
2 
3 #include <linux/bpf.h>
4 #include <stdbool.h>
5 #include <stdint.h>
6 
7 #include "bpf_map_def.h"
8 
9 /* place things in different elf sections */
10 #define SEC(NAME) __attribute__((section(NAME), used))
11 
12 /* Example use: LICENSE("GPL"); or LICENSE("Apache 2.0"); */
13 #define LICENSE(NAME) char _license[] SEC("license") = (NAME)
14 
15 /* flag the resulting bpf .o file as critical to system functionality,
16  * loading all kernel version appropriate programs in it must succeed
17  * for bpfloader success
18  */
19 #define CRITICAL(REASON) char _critical[] SEC("critical") = (REASON)
20 
21 /*
22  * Helper functions called from eBPF programs written in C. These are
23  * implemented in the kernel sources.
24  */
25 
26 /* generic functions */
27 
28 /*
29  * Type-unsafe bpf map functions - avoid if possible.
30  *
31  * Using these it is possible to pass in keys/values of the wrong type/size,
32  * or, for 'bpf_map_lookup_elem_unsafe' receive into a pointer to the wrong type.
33  * You will not get a compile time failure, and for certain types of errors you
34  * might not even get a failure from the kernel's ebpf verifier during program load,
35  * instead stuff might just not work right at runtime.
36  *
37  * Instead please use:
38  *   DEFINE_BPF_MAP(foo_map, TYPE, KeyType, ValueType, num_entries)
39  * where TYPE can be something like HASH or ARRAY, and num_entries is an integer.
40  *
41  * This defines the map (hence this should not be used in a header file included
42  * from multiple locations) and provides type safe accessors:
43  *   ValueType * bpf_foo_map_lookup_elem(const KeyType *)
44  *   int bpf_foo_map_update_elem(const KeyType *, const ValueType *, flags)
45  *   int bpf_foo_map_delete_elem(const KeyType *)
46  *
47  * This will make sure that if you change the type of a map you'll get compile
48  * errors at any spots you forget to update with the new type.
49  *
50  * Note: these all take 'const void* map' because from the C/eBPF point of view
51  * the map struct is really just a readonly map definition of the in kernel object.
52  * Runtime modification of the map defining struct is meaningless, since
53  * the contents is only ever used during bpf program loading & map creation
54  * by the bpf loader, and not by the eBPF program itself.
55  */
56 static void* (*bpf_map_lookup_elem_unsafe)(const void* map,
57                                            const void* key) = (void*)BPF_FUNC_map_lookup_elem;
58 static int (*bpf_map_update_elem_unsafe)(const void* map, const void* key, const void* value,
59                                          unsigned long long flags) = (void*)
60         BPF_FUNC_map_update_elem;
61 static int (*bpf_map_delete_elem_unsafe)(const void* map,
62                                          const void* key) = (void*)BPF_FUNC_map_delete_elem;
63 
64 /* type safe macro to declare a map and related accessor functions */
65 #define DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, usr, grp, md)     \
66     const struct bpf_map_def SEC("maps") the_map = {                                             \
67             .type = BPF_MAP_TYPE_##TYPE,                                                         \
68             .key_size = sizeof(TypeOfKey),                                                       \
69             .value_size = sizeof(TypeOfValue),                                                   \
70             .max_entries = (num_entries),                                                        \
71             .uid = (usr),                                                                        \
72             .gid = (grp),                                                                        \
73             .mode = (md),                                                                        \
74     };                                                                                           \
75                                                                                                  \
76     static inline __always_inline __unused TypeOfValue* bpf_##the_map##_lookup_elem(             \
77             const TypeOfKey* k) {                                                                \
78         return bpf_map_lookup_elem_unsafe(&the_map, k);                                          \
79     };                                                                                           \
80                                                                                                  \
81     static inline __always_inline __unused int bpf_##the_map##_update_elem(                      \
82             const TypeOfKey* k, const TypeOfValue* v, unsigned long long flags) {                \
83         return bpf_map_update_elem_unsafe(&the_map, k, v, flags);                                \
84     };                                                                                           \
85                                                                                                  \
86     static inline __always_inline __unused int bpf_##the_map##_delete_elem(const TypeOfKey* k) { \
87         return bpf_map_delete_elem_unsafe(&the_map, k);                                          \
88     };
89 
90 #define DEFINE_BPF_MAP(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \
91     DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, AID_ROOT, 0600)
92 
93 #define DEFINE_BPF_MAP_GWO(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, gid) \
94     DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, gid, 0620)
95 
96 #define DEFINE_BPF_MAP_GRO(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, gid) \
97     DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, gid, 0640)
98 
99 #define DEFINE_BPF_MAP_GRW(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, gid) \
100     DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, gid, 0660)
101 
102 static int (*bpf_probe_read)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read;
103 static int (*bpf_probe_read_str)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read_str;
104 static unsigned long long (*bpf_ktime_get_ns)(void) = (void*) BPF_FUNC_ktime_get_ns;
105 static int (*bpf_trace_printk)(const char* fmt, int fmt_size, ...) = (void*) BPF_FUNC_trace_printk;
106 static unsigned long long (*bpf_get_current_pid_tgid)(void) = (void*) BPF_FUNC_get_current_pid_tgid;
107 static unsigned long long (*bpf_get_current_uid_gid)(void) = (void*) BPF_FUNC_get_current_uid_gid;
108 static unsigned long long (*bpf_get_smp_processor_id)(void) = (void*) BPF_FUNC_get_smp_processor_id;
109 
110 #define KVER_NONE 0
111 #define KVER(a, b, c) ((a)*65536 + (b)*256 + (c))
112 #define KVER_INF 0xFFFFFFFF
113 
114 #define DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \
115                                        opt)                                                        \
116     const struct bpf_prog_def SEC("progs") the_prog##_def = {                                      \
117             .uid = (prog_uid),                                                                     \
118             .gid = (prog_gid),                                                                     \
119             .min_kver = (min_kv),                                                                  \
120             .max_kver = (max_kv),                                                                  \
121             .optional = (opt),                                                                     \
122     };                                                                                             \
123     SEC(SECTION_NAME)                                                                              \
124     int the_prog
125 
126 // Programs (here used in the sense of functions/sections) marked optional are allowed to fail
127 // to load (for example due to missing kernel patches).
128 // The bpfloader will just ignore these failures and continue processing the next section.
129 //
130 // A non-optional program (function/section) failing to load causes a failure and aborts
131 // processing of the entire .o, if the .o is additionally marked critical, this will result
132 // in the entire bpfloader process terminating with a failure and not setting the bpf.progs_loaded
133 // system property.  This in turn results in waitForProgsLoaded() never finishing.
134 //
135 // ie. a non-optional program in a critical .o is mandatory for kernels matching the min/max kver.
136 
137 // programs requiring a kernel version >= min_kv && < max_kv
138 #define DEFINE_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv) \
139     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \
140                                    false)
141 #define DEFINE_OPTIONAL_BPF_PROG_KVER_RANGE(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, \
142                                             max_kv)                                             \
143     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, true)
144 
145 // programs requiring a kernel version >= min_kv
146 #define DEFINE_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv)                 \
147     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF, \
148                                    false)
149 #define DEFINE_OPTIONAL_BPF_PROG_KVER(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv)        \
150     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, KVER_INF, \
151                                    true)
152 
153 // programs with no kernel version requirements
154 #define DEFINE_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
155     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, 0, KVER_INF, false)
156 #define DEFINE_OPTIONAL_BPF_PROG(SECTION_NAME, prog_uid, prog_gid, the_prog) \
157     DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, 0, KVER_INF, true)
158