1 // SPDX-License-Identifier: GPL-2.0
2 #include <bpf/btf.h>
3 #include <test_btf.h>
4 #include <linux/btf.h>
5 #include <test_progs.h>
6 #include <network_helpers.h>
7
8 #include "linked_list.skel.h"
9 #include "linked_list_fail.skel.h"
10
11 static char log_buf[1024 * 1024];
12
13 static struct {
14 const char *prog_name;
15 const char *err_msg;
16 } linked_list_fail_tests[] = {
17 #define TEST(test, off) \
18 { #test "_missing_lock_push_front", \
19 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
20 { #test "_missing_lock_push_back", \
21 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
22 { #test "_missing_lock_pop_front", \
23 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
24 { #test "_missing_lock_pop_back", \
25 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
26 TEST(kptr, 32)
27 TEST(global, 16)
28 TEST(map, 0)
29 TEST(inner_map, 0)
30 #undef TEST
31 #define TEST(test, op) \
32 { #test "_kptr_incorrect_lock_" #op, \
33 "held lock and object are not in the same allocation\n" \
34 "bpf_spin_lock at off=32 must be held for bpf_list_head" }, \
35 { #test "_global_incorrect_lock_" #op, \
36 "held lock and object are not in the same allocation\n" \
37 "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
38 { #test "_map_incorrect_lock_" #op, \
39 "held lock and object are not in the same allocation\n" \
40 "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \
41 { #test "_inner_map_incorrect_lock_" #op, \
42 "held lock and object are not in the same allocation\n" \
43 "bpf_spin_lock at off=0 must be held for bpf_list_head" },
44 TEST(kptr, push_front)
45 TEST(kptr, push_back)
46 TEST(kptr, pop_front)
47 TEST(kptr, pop_back)
48 TEST(global, push_front)
49 TEST(global, push_back)
50 TEST(global, pop_front)
51 TEST(global, pop_back)
52 TEST(map, push_front)
53 TEST(map, push_back)
54 TEST(map, pop_front)
55 TEST(map, pop_back)
56 TEST(inner_map, push_front)
57 TEST(inner_map, push_back)
58 TEST(inner_map, pop_front)
59 TEST(inner_map, pop_back)
60 #undef TEST
61 { "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
62 { "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
63 { "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
64 { "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
65 { "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
66 { "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
67 { "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
68 { "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
69 { "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
70 { "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
71 { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
72 { "obj_new_acq", "Unreleased reference id=" },
73 { "use_after_drop", "invalid mem access 'scalar'" },
74 { "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" },
75 { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
76 { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
77 { "direct_read_head", "direct access to bpf_list_head is disallowed" },
78 { "direct_write_head", "direct access to bpf_list_head is disallowed" },
79 { "direct_read_node", "direct access to bpf_list_node is disallowed" },
80 { "direct_write_node", "direct access to bpf_list_node is disallowed" },
81 { "use_after_unlock_push_front", "invalid mem access 'scalar'" },
82 { "use_after_unlock_push_back", "invalid mem access 'scalar'" },
83 { "double_push_front", "arg#1 expected pointer to allocated object" },
84 { "double_push_back", "arg#1 expected pointer to allocated object" },
85 { "no_node_value_type", "bpf_list_node not found at offset=0" },
86 { "incorrect_value_type",
87 "operation on bpf_list_head expects arg#1 bpf_list_node at offset=0 in struct foo, "
88 "but arg is at offset=0 in struct bar" },
89 { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
90 { "incorrect_node_off1", "bpf_list_node not found at offset=1" },
91 { "incorrect_node_off2", "arg#1 offset=40, but expected bpf_list_node at offset=0 in struct foo" },
92 { "no_head_type", "bpf_list_head not found at offset=0" },
93 { "incorrect_head_var_off1", "R1 doesn't have constant offset" },
94 { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
95 { "incorrect_head_off1", "bpf_list_head not found at offset=17" },
96 { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
97 { "pop_front_off",
98 "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) "
99 "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n"
100 "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
101 { "pop_back_off",
102 "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) "
103 "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n"
104 "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
105 };
106
test_linked_list_fail_prog(const char * prog_name,const char * err_msg)107 static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
108 {
109 LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
110 .kernel_log_size = sizeof(log_buf),
111 .kernel_log_level = 1);
112 struct linked_list_fail *skel;
113 struct bpf_program *prog;
114 int ret;
115
116 skel = linked_list_fail__open_opts(&opts);
117 if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts"))
118 return;
119
120 prog = bpf_object__find_program_by_name(skel->obj, prog_name);
121 if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
122 goto end;
123
124 bpf_program__set_autoload(prog, true);
125
126 ret = linked_list_fail__load(skel);
127 if (!ASSERT_ERR(ret, "linked_list_fail__load must fail"))
128 goto end;
129
130 if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
131 fprintf(stderr, "Expected: %s\n", err_msg);
132 fprintf(stderr, "Verifier: %s\n", log_buf);
133 }
134
135 end:
136 linked_list_fail__destroy(skel);
137 }
138
clear_fields(struct bpf_map * map)139 static void clear_fields(struct bpf_map *map)
140 {
141 char buf[24];
142 int key = 0;
143
144 memset(buf, 0xff, sizeof(buf));
145 ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
146 }
147
148 enum {
149 TEST_ALL,
150 PUSH_POP,
151 PUSH_POP_MULT,
152 LIST_IN_LIST,
153 };
154
test_linked_list_success(int mode,bool leave_in_map)155 static void test_linked_list_success(int mode, bool leave_in_map)
156 {
157 LIBBPF_OPTS(bpf_test_run_opts, opts,
158 .data_in = &pkt_v4,
159 .data_size_in = sizeof(pkt_v4),
160 .repeat = 1,
161 );
162 struct linked_list *skel;
163 int ret;
164
165 skel = linked_list__open_and_load();
166 if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load"))
167 return;
168
169 if (mode == LIST_IN_LIST)
170 goto lil;
171 if (mode == PUSH_POP_MULT)
172 goto ppm;
173
174 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts);
175 ASSERT_OK(ret, "map_list_push_pop");
176 ASSERT_OK(opts.retval, "map_list_push_pop retval");
177 if (!leave_in_map)
178 clear_fields(skel->maps.array_map);
179
180 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts);
181 ASSERT_OK(ret, "inner_map_list_push_pop");
182 ASSERT_OK(opts.retval, "inner_map_list_push_pop retval");
183 if (!leave_in_map)
184 clear_fields(skel->maps.inner_map);
185
186 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts);
187 ASSERT_OK(ret, "global_list_push_pop");
188 ASSERT_OK(opts.retval, "global_list_push_pop retval");
189 if (!leave_in_map)
190 clear_fields(skel->maps.bss_A);
191
192 if (mode == PUSH_POP)
193 goto end;
194
195 ppm:
196 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts);
197 ASSERT_OK(ret, "map_list_push_pop_multiple");
198 ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval");
199 if (!leave_in_map)
200 clear_fields(skel->maps.array_map);
201
202 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts);
203 ASSERT_OK(ret, "inner_map_list_push_pop_multiple");
204 ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval");
205 if (!leave_in_map)
206 clear_fields(skel->maps.inner_map);
207
208 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts);
209 ASSERT_OK(ret, "global_list_push_pop_multiple");
210 ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval");
211 if (!leave_in_map)
212 clear_fields(skel->maps.bss_A);
213
214 if (mode == PUSH_POP_MULT)
215 goto end;
216
217 lil:
218 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts);
219 ASSERT_OK(ret, "map_list_in_list");
220 ASSERT_OK(opts.retval, "map_list_in_list retval");
221 if (!leave_in_map)
222 clear_fields(skel->maps.array_map);
223
224 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts);
225 ASSERT_OK(ret, "inner_map_list_in_list");
226 ASSERT_OK(opts.retval, "inner_map_list_in_list retval");
227 if (!leave_in_map)
228 clear_fields(skel->maps.inner_map);
229
230 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts);
231 ASSERT_OK(ret, "global_list_in_list");
232 ASSERT_OK(opts.retval, "global_list_in_list retval");
233 if (!leave_in_map)
234 clear_fields(skel->maps.bss_A);
235 end:
236 linked_list__destroy(skel);
237 }
238
239 #define SPIN_LOCK 2
240 #define LIST_HEAD 3
241 #define LIST_NODE 4
242
init_btf(void)243 static struct btf *init_btf(void)
244 {
245 int id, lid, hid, nid;
246 struct btf *btf;
247
248 btf = btf__new_empty();
249 if (!ASSERT_OK_PTR(btf, "btf__new_empty"))
250 return NULL;
251 id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
252 if (!ASSERT_EQ(id, 1, "btf__add_int"))
253 goto end;
254 lid = btf__add_struct(btf, "bpf_spin_lock", 4);
255 if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock"))
256 goto end;
257 hid = btf__add_struct(btf, "bpf_list_head", 16);
258 if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
259 goto end;
260 nid = btf__add_struct(btf, "bpf_list_node", 16);
261 if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
262 goto end;
263 return btf;
264 end:
265 btf__free(btf);
266 return NULL;
267 }
268
test_btf(void)269 static void test_btf(void)
270 {
271 struct btf *btf = NULL;
272 int id, err;
273
274 while (test__start_subtest("btf: too many locks")) {
275 btf = init_btf();
276 if (!ASSERT_OK_PTR(btf, "init_btf"))
277 break;
278 id = btf__add_struct(btf, "foo", 24);
279 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
280 break;
281 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
282 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
283 break;
284 err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0);
285 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
286 break;
287 err = btf__add_field(btf, "c", LIST_HEAD, 64, 0);
288 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
289 break;
290
291 err = btf__load_into_kernel(btf);
292 ASSERT_EQ(err, -E2BIG, "check btf");
293 btf__free(btf);
294 break;
295 }
296
297 while (test__start_subtest("btf: missing lock")) {
298 btf = init_btf();
299 if (!ASSERT_OK_PTR(btf, "init_btf"))
300 break;
301 id = btf__add_struct(btf, "foo", 16);
302 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
303 break;
304 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
305 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
306 break;
307 id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0);
308 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a"))
309 break;
310 id = btf__add_struct(btf, "baz", 16);
311 if (!ASSERT_EQ(id, 7, "btf__add_struct baz"))
312 break;
313 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
314 if (!ASSERT_OK(err, "btf__add_field baz::a"))
315 break;
316
317 err = btf__load_into_kernel(btf);
318 ASSERT_EQ(err, -EINVAL, "check btf");
319 btf__free(btf);
320 break;
321 }
322
323 while (test__start_subtest("btf: bad offset")) {
324 btf = init_btf();
325 if (!ASSERT_OK_PTR(btf, "init_btf"))
326 break;
327 id = btf__add_struct(btf, "foo", 36);
328 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
329 break;
330 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
331 if (!ASSERT_OK(err, "btf__add_field foo::a"))
332 break;
333 err = btf__add_field(btf, "b", LIST_NODE, 0, 0);
334 if (!ASSERT_OK(err, "btf__add_field foo::b"))
335 break;
336 err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0);
337 if (!ASSERT_OK(err, "btf__add_field foo::c"))
338 break;
339 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
340 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
341 break;
342
343 err = btf__load_into_kernel(btf);
344 ASSERT_EQ(err, -EEXIST, "check btf");
345 btf__free(btf);
346 break;
347 }
348
349 while (test__start_subtest("btf: missing contains:")) {
350 btf = init_btf();
351 if (!ASSERT_OK_PTR(btf, "init_btf"))
352 break;
353 id = btf__add_struct(btf, "foo", 24);
354 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
355 break;
356 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
357 if (!ASSERT_OK(err, "btf__add_field foo::a"))
358 break;
359 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
360 if (!ASSERT_OK(err, "btf__add_field foo::b"))
361 break;
362
363 err = btf__load_into_kernel(btf);
364 ASSERT_EQ(err, -EINVAL, "check btf");
365 btf__free(btf);
366 break;
367 }
368
369 while (test__start_subtest("btf: missing struct")) {
370 btf = init_btf();
371 if (!ASSERT_OK_PTR(btf, "init_btf"))
372 break;
373 id = btf__add_struct(btf, "foo", 24);
374 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
375 break;
376 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
377 if (!ASSERT_OK(err, "btf__add_field foo::a"))
378 break;
379 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
380 if (!ASSERT_OK(err, "btf__add_field foo::b"))
381 break;
382 id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1);
383 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar"))
384 break;
385
386 err = btf__load_into_kernel(btf);
387 ASSERT_EQ(err, -ENOENT, "check btf");
388 btf__free(btf);
389 break;
390 }
391
392 while (test__start_subtest("btf: missing node")) {
393 btf = init_btf();
394 if (!ASSERT_OK_PTR(btf, "init_btf"))
395 break;
396 id = btf__add_struct(btf, "foo", 24);
397 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
398 break;
399 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
400 if (!ASSERT_OK(err, "btf__add_field foo::a"))
401 break;
402 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
403 if (!ASSERT_OK(err, "btf__add_field foo::b"))
404 break;
405 id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1);
406 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c"))
407 break;
408
409 err = btf__load_into_kernel(btf);
410 btf__free(btf);
411 ASSERT_EQ(err, -ENOENT, "check btf");
412 break;
413 }
414
415 while (test__start_subtest("btf: node incorrect type")) {
416 btf = init_btf();
417 if (!ASSERT_OK_PTR(btf, "init_btf"))
418 break;
419 id = btf__add_struct(btf, "foo", 20);
420 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
421 break;
422 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
423 if (!ASSERT_OK(err, "btf__add_field foo::a"))
424 break;
425 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
426 if (!ASSERT_OK(err, "btf__add_field foo::b"))
427 break;
428 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
429 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
430 break;
431 id = btf__add_struct(btf, "bar", 4);
432 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
433 break;
434 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
435 if (!ASSERT_OK(err, "btf__add_field bar::a"))
436 break;
437
438 err = btf__load_into_kernel(btf);
439 ASSERT_EQ(err, -EINVAL, "check btf");
440 btf__free(btf);
441 break;
442 }
443
444 while (test__start_subtest("btf: multiple bpf_list_node with name b")) {
445 btf = init_btf();
446 if (!ASSERT_OK_PTR(btf, "init_btf"))
447 break;
448 id = btf__add_struct(btf, "foo", 52);
449 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
450 break;
451 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
452 if (!ASSERT_OK(err, "btf__add_field foo::a"))
453 break;
454 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
455 if (!ASSERT_OK(err, "btf__add_field foo::b"))
456 break;
457 err = btf__add_field(btf, "b", LIST_NODE, 256, 0);
458 if (!ASSERT_OK(err, "btf__add_field foo::c"))
459 break;
460 err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0);
461 if (!ASSERT_OK(err, "btf__add_field foo::d"))
462 break;
463 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
464 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
465 break;
466
467 err = btf__load_into_kernel(btf);
468 ASSERT_EQ(err, -EINVAL, "check btf");
469 btf__free(btf);
470 break;
471 }
472
473 while (test__start_subtest("btf: owning | owned AA cycle")) {
474 btf = init_btf();
475 if (!ASSERT_OK_PTR(btf, "init_btf"))
476 break;
477 id = btf__add_struct(btf, "foo", 36);
478 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
479 break;
480 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
481 if (!ASSERT_OK(err, "btf__add_field foo::a"))
482 break;
483 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
484 if (!ASSERT_OK(err, "btf__add_field foo::b"))
485 break;
486 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
487 if (!ASSERT_OK(err, "btf__add_field foo::c"))
488 break;
489 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
490 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
491 break;
492
493 err = btf__load_into_kernel(btf);
494 ASSERT_EQ(err, -ELOOP, "check btf");
495 btf__free(btf);
496 break;
497 }
498
499 while (test__start_subtest("btf: owning | owned ABA cycle")) {
500 btf = init_btf();
501 if (!ASSERT_OK_PTR(btf, "init_btf"))
502 break;
503 id = btf__add_struct(btf, "foo", 36);
504 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
505 break;
506 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
507 if (!ASSERT_OK(err, "btf__add_field foo::a"))
508 break;
509 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
510 if (!ASSERT_OK(err, "btf__add_field foo::b"))
511 break;
512 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
513 if (!ASSERT_OK(err, "btf__add_field foo::c"))
514 break;
515 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
516 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
517 break;
518 id = btf__add_struct(btf, "bar", 36);
519 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
520 break;
521 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
522 if (!ASSERT_OK(err, "btf__add_field bar::a"))
523 break;
524 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
525 if (!ASSERT_OK(err, "btf__add_field bar::b"))
526 break;
527 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
528 if (!ASSERT_OK(err, "btf__add_field bar::c"))
529 break;
530 id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
531 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b"))
532 break;
533
534 err = btf__load_into_kernel(btf);
535 ASSERT_EQ(err, -ELOOP, "check btf");
536 btf__free(btf);
537 break;
538 }
539
540 while (test__start_subtest("btf: owning -> owned")) {
541 btf = init_btf();
542 if (!ASSERT_OK_PTR(btf, "init_btf"))
543 break;
544 id = btf__add_struct(btf, "foo", 20);
545 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
546 break;
547 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
548 if (!ASSERT_OK(err, "btf__add_field foo::a"))
549 break;
550 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
551 if (!ASSERT_OK(err, "btf__add_field foo::b"))
552 break;
553 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
554 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
555 break;
556 id = btf__add_struct(btf, "bar", 16);
557 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
558 break;
559 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
560 if (!ASSERT_OK(err, "btf__add_field bar::a"))
561 break;
562
563 err = btf__load_into_kernel(btf);
564 ASSERT_EQ(err, 0, "check btf");
565 btf__free(btf);
566 break;
567 }
568
569 while (test__start_subtest("btf: owning -> owning | owned -> owned")) {
570 btf = init_btf();
571 if (!ASSERT_OK_PTR(btf, "init_btf"))
572 break;
573 id = btf__add_struct(btf, "foo", 20);
574 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
575 break;
576 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
577 if (!ASSERT_OK(err, "btf__add_field foo::a"))
578 break;
579 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
580 if (!ASSERT_OK(err, "btf__add_field foo::b"))
581 break;
582 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
583 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
584 break;
585 id = btf__add_struct(btf, "bar", 36);
586 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
587 break;
588 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
589 if (!ASSERT_OK(err, "btf__add_field bar::a"))
590 break;
591 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
592 if (!ASSERT_OK(err, "btf__add_field bar::b"))
593 break;
594 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
595 if (!ASSERT_OK(err, "btf__add_field bar::c"))
596 break;
597 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
598 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
599 break;
600 id = btf__add_struct(btf, "baz", 16);
601 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
602 break;
603 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
604 if (!ASSERT_OK(err, "btf__add_field baz:a"))
605 break;
606
607 err = btf__load_into_kernel(btf);
608 ASSERT_EQ(err, 0, "check btf");
609 btf__free(btf);
610 break;
611 }
612
613 while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) {
614 btf = init_btf();
615 if (!ASSERT_OK_PTR(btf, "init_btf"))
616 break;
617 id = btf__add_struct(btf, "foo", 36);
618 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
619 break;
620 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
621 if (!ASSERT_OK(err, "btf__add_field foo::a"))
622 break;
623 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
624 if (!ASSERT_OK(err, "btf__add_field foo::b"))
625 break;
626 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
627 if (!ASSERT_OK(err, "btf__add_field foo::c"))
628 break;
629 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
630 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
631 break;
632 id = btf__add_struct(btf, "bar", 36);
633 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
634 break;
635 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
636 if (!ASSERT_OK(err, "btf__add_field bar:a"))
637 break;
638 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
639 if (!ASSERT_OK(err, "btf__add_field bar:b"))
640 break;
641 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
642 if (!ASSERT_OK(err, "btf__add_field bar:c"))
643 break;
644 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
645 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
646 break;
647 id = btf__add_struct(btf, "baz", 16);
648 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
649 break;
650 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
651 if (!ASSERT_OK(err, "btf__add_field baz:a"))
652 break;
653
654 err = btf__load_into_kernel(btf);
655 ASSERT_EQ(err, -ELOOP, "check btf");
656 btf__free(btf);
657 break;
658 }
659
660 while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) {
661 btf = init_btf();
662 if (!ASSERT_OK_PTR(btf, "init_btf"))
663 break;
664 id = btf__add_struct(btf, "foo", 20);
665 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
666 break;
667 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
668 if (!ASSERT_OK(err, "btf__add_field foo::a"))
669 break;
670 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
671 if (!ASSERT_OK(err, "btf__add_field foo::b"))
672 break;
673 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
674 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
675 break;
676 id = btf__add_struct(btf, "bar", 36);
677 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
678 break;
679 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
680 if (!ASSERT_OK(err, "btf__add_field bar::a"))
681 break;
682 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
683 if (!ASSERT_OK(err, "btf__add_field bar::b"))
684 break;
685 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
686 if (!ASSERT_OK(err, "btf__add_field bar::c"))
687 break;
688 id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
689 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
690 break;
691 id = btf__add_struct(btf, "baz", 36);
692 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
693 break;
694 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
695 if (!ASSERT_OK(err, "btf__add_field bar::a"))
696 break;
697 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
698 if (!ASSERT_OK(err, "btf__add_field bar::b"))
699 break;
700 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
701 if (!ASSERT_OK(err, "btf__add_field bar::c"))
702 break;
703 id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
704 if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
705 break;
706 id = btf__add_struct(btf, "bam", 16);
707 if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
708 break;
709 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
710 if (!ASSERT_OK(err, "btf__add_field bam::a"))
711 break;
712
713 err = btf__load_into_kernel(btf);
714 ASSERT_EQ(err, -ELOOP, "check btf");
715 btf__free(btf);
716 break;
717 }
718
719 while (test__start_subtest("btf: list_node and rb_node in same struct")) {
720 btf = init_btf();
721 if (!ASSERT_OK_PTR(btf, "init_btf"))
722 break;
723
724 id = btf__add_struct(btf, "bpf_rb_node", 24);
725 if (!ASSERT_EQ(id, 5, "btf__add_struct bpf_rb_node"))
726 break;
727 id = btf__add_struct(btf, "bar", 40);
728 if (!ASSERT_EQ(id, 6, "btf__add_struct bar"))
729 break;
730 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
731 if (!ASSERT_OK(err, "btf__add_field bar::a"))
732 break;
733 err = btf__add_field(btf, "c", 5, 128, 0);
734 if (!ASSERT_OK(err, "btf__add_field bar::c"))
735 break;
736
737 id = btf__add_struct(btf, "foo", 20);
738 if (!ASSERT_EQ(id, 7, "btf__add_struct foo"))
739 break;
740 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
741 if (!ASSERT_OK(err, "btf__add_field foo::a"))
742 break;
743 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
744 if (!ASSERT_OK(err, "btf__add_field foo::b"))
745 break;
746 id = btf__add_decl_tag(btf, "contains:bar:a", 7, 0);
747 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:bar:a"))
748 break;
749
750 err = btf__load_into_kernel(btf);
751 ASSERT_EQ(err, -EINVAL, "check btf");
752 btf__free(btf);
753 break;
754 }
755 }
756
test_linked_list(void)757 void test_linked_list(void)
758 {
759 int i;
760
761 for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) {
762 if (!test__start_subtest(linked_list_fail_tests[i].prog_name))
763 continue;
764 test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name,
765 linked_list_fail_tests[i].err_msg);
766 }
767 test_btf();
768 test_linked_list_success(PUSH_POP, false);
769 test_linked_list_success(PUSH_POP, true);
770 test_linked_list_success(PUSH_POP_MULT, false);
771 test_linked_list_success(PUSH_POP_MULT, true);
772 test_linked_list_success(LIST_IN_LIST, false);
773 test_linked_list_success(LIST_IN_LIST, true);
774 test_linked_list_success(TEST_ALL, false);
775 }
776