1 {
2 	"check valid spill/fill",
3 	.insns = {
4 	/* spill R1(ctx) into stack */
5 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
6 	/* fill it back into R2 */
7 	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -8),
8 	/* should be able to access R0 = *(R2 + 8) */
9 	/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */
10 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
11 	BPF_EXIT_INSN(),
12 	},
13 	.errstr_unpriv = "R0 leaks addr",
14 	.result = ACCEPT,
15 	.result_unpriv = REJECT,
16 	.retval = POINTER_VALUE,
17 },
18 {
19 	"check valid spill/fill, skb mark",
20 	.insns = {
21 	BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
22 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
23 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
24 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
25 		    offsetof(struct __sk_buff, mark)),
26 	BPF_EXIT_INSN(),
27 	},
28 	.result = ACCEPT,
29 	.result_unpriv = ACCEPT,
30 },
31 {
32 	"check valid spill/fill, ptr to mem",
33 	.insns = {
34 	/* reserve 8 byte ringbuf memory */
35 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
36 	BPF_LD_MAP_FD(BPF_REG_1, 0),
37 	BPF_MOV64_IMM(BPF_REG_2, 8),
38 	BPF_MOV64_IMM(BPF_REG_3, 0),
39 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
40 	/* store a pointer to the reserved memory in R6 */
41 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
42 	/* check whether the reservation was successful */
43 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
44 	/* spill R6(mem) into the stack */
45 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
46 	/* fill it back in R7 */
47 	BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
48 	/* should be able to access *(R7) = 0 */
49 	BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
50 	/* submit the reserved ringbuf memory */
51 	BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
52 	BPF_MOV64_IMM(BPF_REG_2, 0),
53 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
54 	BPF_MOV64_IMM(BPF_REG_0, 0),
55 	BPF_EXIT_INSN(),
56 	},
57 	.fixup_map_ringbuf = { 1 },
58 	.result = ACCEPT,
59 	.result_unpriv = ACCEPT,
60 },
61 {
62 	"check corrupted spill/fill",
63 	.insns = {
64 	/* spill R1(ctx) into stack */
65 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
66 	/* mess up with R1 pointer on stack */
67 	BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
68 	/* fill back into R0 is fine for priv.
69 	 * R0 now becomes SCALAR_VALUE.
70 	 */
71 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
72 	/* Load from R0 should fail. */
73 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
74 	BPF_EXIT_INSN(),
75 	},
76 	.errstr_unpriv = "attempt to corrupt spilled",
77 	.errstr = "R0 invalid mem access 'inv",
78 	.result = REJECT,
79 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
80 },
81 {
82 	"check corrupted spill/fill, LSB",
83 	.insns = {
84 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
85 	BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
86 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
87 	BPF_EXIT_INSN(),
88 	},
89 	.errstr_unpriv = "attempt to corrupt spilled",
90 	.result_unpriv = REJECT,
91 	.result = ACCEPT,
92 	.retval = POINTER_VALUE,
93 },
94 {
95 	"check corrupted spill/fill, MSB",
96 	.insns = {
97 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
98 	BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
99 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
100 	BPF_EXIT_INSN(),
101 	},
102 	.errstr_unpriv = "attempt to corrupt spilled",
103 	.result_unpriv = REJECT,
104 	.result = ACCEPT,
105 	.retval = POINTER_VALUE,
106 },
107 {
108 	"Spill and refill a u32 const scalar.  Offset to skb->data",
109 	.insns = {
110 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
111 		    offsetof(struct __sk_buff, data)),
112 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
113 		    offsetof(struct __sk_buff, data_end)),
114 	/* r4 = 20 */
115 	BPF_MOV32_IMM(BPF_REG_4, 20),
116 	/* *(u32 *)(r10 -8) = r4 */
117 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
118 	/* r4 = *(u32 *)(r10 -8) */
119 	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
120 	/* r0 = r2 */
121 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
122 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv20 */
123 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
124 	/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=inv20 */
125 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
126 	/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=inv20 */
127 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
128 	BPF_MOV64_IMM(BPF_REG_0, 0),
129 	BPF_EXIT_INSN(),
130 	},
131 	.result = ACCEPT,
132 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
133 },
134 {
135 	"Spill a u32 const, refill from another half of the uninit u32 from the stack",
136 	.insns = {
137 	/* r4 = 20 */
138 	BPF_MOV32_IMM(BPF_REG_4, 20),
139 	/* *(u32 *)(r10 -8) = r4 */
140 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
141 	/* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/
142 	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
143 	BPF_MOV64_IMM(BPF_REG_0, 0),
144 	BPF_EXIT_INSN(),
145 	},
146 	.result = REJECT,
147 	.errstr = "invalid read from stack off -4+0 size 4",
148 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
149 },
150 {
151 	"Spill a u32 const scalar.  Refill as u16.  Offset to skb->data",
152 	.insns = {
153 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
154 		    offsetof(struct __sk_buff, data)),
155 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
156 		    offsetof(struct __sk_buff, data_end)),
157 	/* r4 = 20 */
158 	BPF_MOV32_IMM(BPF_REG_4, 20),
159 	/* *(u32 *)(r10 -8) = r4 */
160 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
161 	/* r4 = *(u16 *)(r10 -8) */
162 	BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
163 	/* r0 = r2 */
164 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
165 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
166 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
167 	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
168 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
169 	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
170 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
171 	BPF_MOV64_IMM(BPF_REG_0, 0),
172 	BPF_EXIT_INSN(),
173 	},
174 	.result = REJECT,
175 	.errstr = "invalid access to packet",
176 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
177 },
178 {
179 	"Spill u32 const scalars.  Refill as u64.  Offset to skb->data",
180 	.insns = {
181 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
182 		    offsetof(struct __sk_buff, data)),
183 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
184 		    offsetof(struct __sk_buff, data_end)),
185 	/* r6 = 0 */
186 	BPF_MOV32_IMM(BPF_REG_6, 0),
187 	/* r7 = 20 */
188 	BPF_MOV32_IMM(BPF_REG_7, 20),
189 	/* *(u32 *)(r10 -4) = r6 */
190 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
191 	/* *(u32 *)(r10 -8) = r7 */
192 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
193 	/* r4 = *(u64 *)(r10 -8) */
194 	BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
195 	/* r0 = r2 */
196 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
197 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
198 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
199 	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
200 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
201 	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
202 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
203 	BPF_MOV64_IMM(BPF_REG_0, 0),
204 	BPF_EXIT_INSN(),
205 	},
206 	.result = REJECT,
207 	.errstr = "invalid access to packet",
208 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
209 },
210 {
211 	"Spill a u32 const scalar.  Refill as u16 from fp-6.  Offset to skb->data",
212 	.insns = {
213 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
214 		    offsetof(struct __sk_buff, data)),
215 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
216 		    offsetof(struct __sk_buff, data_end)),
217 	/* r4 = 20 */
218 	BPF_MOV32_IMM(BPF_REG_4, 20),
219 	/* *(u32 *)(r10 -8) = r4 */
220 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
221 	/* r4 = *(u16 *)(r10 -6) */
222 	BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
223 	/* r0 = r2 */
224 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
225 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
226 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
227 	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
228 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
229 	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
230 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
231 	BPF_MOV64_IMM(BPF_REG_0, 0),
232 	BPF_EXIT_INSN(),
233 	},
234 	.result = REJECT,
235 	.errstr = "invalid access to packet",
236 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
237 },
238 {
239 	"Spill and refill a u32 const scalar at non 8byte aligned stack addr.  Offset to skb->data",
240 	.insns = {
241 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
242 		    offsetof(struct __sk_buff, data)),
243 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
244 		    offsetof(struct __sk_buff, data_end)),
245 	/* r4 = 20 */
246 	BPF_MOV32_IMM(BPF_REG_4, 20),
247 	/* *(u32 *)(r10 -8) = r4 */
248 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
249 	/* *(u32 *)(r10 -4) = r4 */
250 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
251 	/* r4 = *(u32 *)(r10 -4),  */
252 	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
253 	/* r0 = r2 */
254 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
255 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=U32_MAX */
256 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
257 	/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
258 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
259 	/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
260 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
261 	BPF_MOV64_IMM(BPF_REG_0, 0),
262 	BPF_EXIT_INSN(),
263 	},
264 	.result = REJECT,
265 	.errstr = "invalid access to packet",
266 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
267 },
268 {
269 	"Spill and refill a umax=40 bounded scalar.  Offset to skb->data",
270 	.insns = {
271 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
272 		    offsetof(struct __sk_buff, data)),
273 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
274 		    offsetof(struct __sk_buff, data_end)),
275 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1,
276 		    offsetof(struct __sk_buff, tstamp)),
277 	BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
278 	BPF_MOV64_IMM(BPF_REG_0, 0),
279 	BPF_EXIT_INSN(),
280 	/* *(u32 *)(r10 -8) = r4 R4=inv,umax=40 */
281 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
282 	/* r4 = (*u32 *)(r10 - 8) */
283 	BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
284 	/* r2 += r4 R2=pkt R4=inv,umax=40 */
285 	BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
286 	/* r0 = r2 R2=pkt,umax=40 R4=inv,umax=40 */
287 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
288 	/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
289 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),
290 	/* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */
291 	BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_3, 1),
292 	/* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */
293 	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
294 	BPF_MOV64_IMM(BPF_REG_0, 0),
295 	BPF_EXIT_INSN(),
296 	},
297 	.result = ACCEPT,
298 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
299 },
300 {
301 	"Spill a u32 scalar at fp-4 and then at fp-8",
302 	.insns = {
303 	/* r4 = 4321 */
304 	BPF_MOV32_IMM(BPF_REG_4, 4321),
305 	/* *(u32 *)(r10 -4) = r4 */
306 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -4),
307 	/* *(u32 *)(r10 -8) = r4 */
308 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
309 	/* r4 = *(u64 *)(r10 -8) */
310 	BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
311 	BPF_MOV64_IMM(BPF_REG_0, 0),
312 	BPF_EXIT_INSN(),
313 	},
314 	.result = ACCEPT,
315 	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
316 },
317