1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4 #include <vmlinux.h>
5 #include <bpf/bpf_tracing.h>
6 #include <bpf/bpf_helpers.h>
7
8 #include "task_kfunc_common.h"
9
10 char _license[] SEC("license") = "GPL";
11
12 int err, pid;
13
14 /* Prototype for all of the program trace events below:
15 *
16 * TRACE_EVENT(task_newtask,
17 * TP_PROTO(struct task_struct *p, u64 clone_flags)
18 */
19
is_test_kfunc_task(void)20 static bool is_test_kfunc_task(void)
21 {
22 int cur_pid = bpf_get_current_pid_tgid() >> 32;
23
24 return pid == cur_pid;
25 }
26
test_acquire_release(struct task_struct * task)27 static int test_acquire_release(struct task_struct *task)
28 {
29 struct task_struct *acquired;
30
31 acquired = bpf_task_acquire(task);
32 bpf_task_release(acquired);
33
34 return 0;
35 }
36
37 SEC("tp_btf/task_newtask")
BPF_PROG(test_task_acquire_release_argument,struct task_struct * task,u64 clone_flags)38 int BPF_PROG(test_task_acquire_release_argument, struct task_struct *task, u64 clone_flags)
39 {
40 if (!is_test_kfunc_task())
41 return 0;
42
43 return test_acquire_release(task);
44 }
45
46 SEC("tp_btf/task_newtask")
BPF_PROG(test_task_acquire_release_current,struct task_struct * task,u64 clone_flags)47 int BPF_PROG(test_task_acquire_release_current, struct task_struct *task, u64 clone_flags)
48 {
49 if (!is_test_kfunc_task())
50 return 0;
51
52 return test_acquire_release(bpf_get_current_task_btf());
53 }
54
55 SEC("tp_btf/task_newtask")
BPF_PROG(test_task_acquire_leave_in_map,struct task_struct * task,u64 clone_flags)56 int BPF_PROG(test_task_acquire_leave_in_map, struct task_struct *task, u64 clone_flags)
57 {
58 long status;
59
60 if (!is_test_kfunc_task())
61 return 0;
62
63 status = tasks_kfunc_map_insert(task);
64 if (status)
65 err = 1;
66
67 return 0;
68 }
69
70 SEC("tp_btf/task_newtask")
BPF_PROG(test_task_xchg_release,struct task_struct * task,u64 clone_flags)71 int BPF_PROG(test_task_xchg_release, struct task_struct *task, u64 clone_flags)
72 {
73 struct task_struct *kptr;
74 struct __tasks_kfunc_map_value *v;
75 long status;
76
77 if (!is_test_kfunc_task())
78 return 0;
79
80 status = tasks_kfunc_map_insert(task);
81 if (status) {
82 err = 1;
83 return 0;
84 }
85
86 v = tasks_kfunc_map_value_lookup(task);
87 if (!v) {
88 err = 2;
89 return 0;
90 }
91
92 kptr = bpf_kptr_xchg(&v->task, NULL);
93 if (!kptr) {
94 err = 3;
95 return 0;
96 }
97
98 bpf_task_release(kptr);
99
100 return 0;
101 }
102
103 SEC("tp_btf/task_newtask")
BPF_PROG(test_task_get_release,struct task_struct * task,u64 clone_flags)104 int BPF_PROG(test_task_get_release, struct task_struct *task, u64 clone_flags)
105 {
106 struct task_struct *kptr;
107 struct __tasks_kfunc_map_value *v;
108 long status;
109
110 if (!is_test_kfunc_task())
111 return 0;
112
113 status = tasks_kfunc_map_insert(task);
114 if (status) {
115 err = 1;
116 return 0;
117 }
118
119 v = tasks_kfunc_map_value_lookup(task);
120 if (!v) {
121 err = 2;
122 return 0;
123 }
124
125 kptr = bpf_task_kptr_get(&v->task);
126 if (kptr) {
127 /* Until we resolve the issues with using task->rcu_users, we
128 * expect bpf_task_kptr_get() to return a NULL task. See the
129 * comment at the definition of bpf_task_acquire_not_zero() for
130 * more details.
131 */
132 bpf_task_release(kptr);
133 err = 3;
134 return 0;
135 }
136
137
138 return 0;
139 }
140
141 SEC("tp_btf/task_newtask")
BPF_PROG(test_task_current_acquire_release,struct task_struct * task,u64 clone_flags)142 int BPF_PROG(test_task_current_acquire_release, struct task_struct *task, u64 clone_flags)
143 {
144 struct task_struct *current, *acquired;
145
146 if (!is_test_kfunc_task())
147 return 0;
148
149 current = bpf_get_current_task_btf();
150 acquired = bpf_task_acquire(current);
151 bpf_task_release(acquired);
152
153 return 0;
154 }
155
lookup_compare_pid(const struct task_struct * p)156 static void lookup_compare_pid(const struct task_struct *p)
157 {
158 struct task_struct *acquired;
159
160 acquired = bpf_task_from_pid(p->pid);
161 if (!acquired) {
162 err = 1;
163 return;
164 }
165
166 if (acquired->pid != p->pid)
167 err = 2;
168 bpf_task_release(acquired);
169 }
170
171 SEC("tp_btf/task_newtask")
BPF_PROG(test_task_from_pid_arg,struct task_struct * task,u64 clone_flags)172 int BPF_PROG(test_task_from_pid_arg, struct task_struct *task, u64 clone_flags)
173 {
174 struct task_struct *acquired;
175
176 if (!is_test_kfunc_task())
177 return 0;
178
179 lookup_compare_pid(task);
180 return 0;
181 }
182
183 SEC("tp_btf/task_newtask")
BPF_PROG(test_task_from_pid_current,struct task_struct * task,u64 clone_flags)184 int BPF_PROG(test_task_from_pid_current, struct task_struct *task, u64 clone_flags)
185 {
186 struct task_struct *current, *acquired;
187
188 if (!is_test_kfunc_task())
189 return 0;
190
191 lookup_compare_pid(bpf_get_current_task_btf());
192 return 0;
193 }
194
is_pid_lookup_valid(s32 pid)195 static int is_pid_lookup_valid(s32 pid)
196 {
197 struct task_struct *acquired;
198
199 acquired = bpf_task_from_pid(pid);
200 if (acquired) {
201 bpf_task_release(acquired);
202 return 1;
203 }
204
205 return 0;
206 }
207
208 SEC("tp_btf/task_newtask")
BPF_PROG(test_task_from_pid_invalid,struct task_struct * task,u64 clone_flags)209 int BPF_PROG(test_task_from_pid_invalid, struct task_struct *task, u64 clone_flags)
210 {
211 struct task_struct *acquired;
212
213 if (!is_test_kfunc_task())
214 return 0;
215
216 if (is_pid_lookup_valid(-1)) {
217 err = 1;
218 return 0;
219 }
220
221 if (is_pid_lookup_valid(0xcafef00d)) {
222 err = 2;
223 return 0;
224 }
225
226 return 0;
227 }
228