1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3 */
4
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6 * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7 * spent some effort to ensure the datapath with redirect maps does not use
8 * any locking. This is a quick note on the details.
9 *
10 * We have three possible paths to get into the devmap control plane bpf
11 * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12 * will invoke an update, delete, or lookup operation. To ensure updates and
13 * deletes appear atomic from the datapath side xchg() is used to modify the
14 * netdev_map array. Then because the datapath does a lookup into the netdev_map
15 * array (read-only) from an RCU critical section we use call_rcu() to wait for
16 * an rcu grace period before free'ing the old data structures. This ensures the
17 * datapath always has a valid copy. However, the datapath does a "flush"
18 * operation that pushes any pending packets in the driver outside the RCU
19 * critical section. Each bpf_dtab_netdev tracks these pending operations using
20 * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until
21 * this list is empty, indicating outstanding flush operations have completed.
22 *
23 * BPF syscalls may race with BPF program calls on any of the update, delete
24 * or lookup operations. As noted above the xchg() operation also keep the
25 * netdev_map consistent in this case. From the devmap side BPF programs
26 * calling into these operations are the same as multiple user space threads
27 * making system calls.
28 *
29 * Finally, any of the above may race with a netdev_unregister notifier. The
30 * unregister notifier must search for net devices in the map structure that
31 * contain a reference to the net device and remove them. This is a two step
32 * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33 * check to see if the ifindex is the same as the net_device being removed.
34 * When removing the dev a cmpxchg() is used to ensure the correct dev is
35 * removed, in the case of a concurrent update or delete operation it is
36 * possible that the initially referenced dev is no longer in the map. As the
37 * notifier hook walks the map we know that new dev references can not be
38 * added by the user because core infrastructure ensures dev_get_by_index()
39 * calls will fail at this point.
40 *
41 * The devmap_hash type is a map type which interprets keys as ifindexes and
42 * indexes these using a hashmap. This allows maps that use ifindex as key to be
43 * densely packed instead of having holes in the lookup array for unused
44 * ifindexes. The setup and packet enqueue/send code is shared between the two
45 * types of devmap; only the lookup and insertion is different.
46 */
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
51 #include <linux/btf_ids.h>
52
53 #define DEV_CREATE_FLAG_MASK \
54 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55
56 struct xdp_dev_bulk_queue {
57 struct xdp_frame *q[DEV_MAP_BULK_SIZE];
58 struct list_head flush_node;
59 struct net_device *dev;
60 struct net_device *dev_rx;
61 struct bpf_prog *xdp_prog;
62 unsigned int count;
63 };
64
65 struct bpf_dtab_netdev {
66 struct net_device *dev; /* must be first member, due to tracepoint */
67 struct hlist_node index_hlist;
68 struct bpf_dtab *dtab;
69 struct bpf_prog *xdp_prog;
70 struct rcu_head rcu;
71 unsigned int idx;
72 struct bpf_devmap_val val;
73 };
74
75 struct bpf_dtab {
76 struct bpf_map map;
77 struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
78 struct list_head list;
79
80 /* these are only used for DEVMAP_HASH type maps */
81 struct hlist_head *dev_index_head;
82 spinlock_t index_lock;
83 unsigned int items;
84 u32 n_buckets;
85 };
86
87 static DEFINE_PER_CPU(struct list_head, dev_flush_list);
88 static DEFINE_SPINLOCK(dev_map_lock);
89 static LIST_HEAD(dev_map_list);
90
dev_map_create_hash(unsigned int entries,int numa_node)91 static struct hlist_head *dev_map_create_hash(unsigned int entries,
92 int numa_node)
93 {
94 int i;
95 struct hlist_head *hash;
96
97 hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
98 if (hash != NULL)
99 for (i = 0; i < entries; i++)
100 INIT_HLIST_HEAD(&hash[i]);
101
102 return hash;
103 }
104
dev_map_index_hash(struct bpf_dtab * dtab,int idx)105 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
106 int idx)
107 {
108 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
109 }
110
dev_map_init_map(struct bpf_dtab * dtab,union bpf_attr * attr)111 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
112 {
113 u32 valsize = attr->value_size;
114
115 /* check sanity of attributes. 2 value sizes supported:
116 * 4 bytes: ifindex
117 * 8 bytes: ifindex + prog fd
118 */
119 if (attr->max_entries == 0 || attr->key_size != 4 ||
120 (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
121 valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
122 attr->map_flags & ~DEV_CREATE_FLAG_MASK)
123 return -EINVAL;
124
125 /* Lookup returns a pointer straight to dev->ifindex, so make sure the
126 * verifier prevents writes from the BPF side
127 */
128 attr->map_flags |= BPF_F_RDONLY_PROG;
129
130
131 bpf_map_init_from_attr(&dtab->map, attr);
132
133 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
134 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
135
136 if (!dtab->n_buckets) /* Overflow check */
137 return -EINVAL;
138 }
139
140 if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
141 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
142 dtab->map.numa_node);
143 if (!dtab->dev_index_head)
144 return -ENOMEM;
145
146 spin_lock_init(&dtab->index_lock);
147 } else {
148 dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
149 sizeof(struct bpf_dtab_netdev *),
150 dtab->map.numa_node);
151 if (!dtab->netdev_map)
152 return -ENOMEM;
153 }
154
155 return 0;
156 }
157
dev_map_alloc(union bpf_attr * attr)158 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
159 {
160 struct bpf_dtab *dtab;
161 int err;
162
163 if (!capable(CAP_NET_ADMIN))
164 return ERR_PTR(-EPERM);
165
166 dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
167 if (!dtab)
168 return ERR_PTR(-ENOMEM);
169
170 err = dev_map_init_map(dtab, attr);
171 if (err) {
172 bpf_map_area_free(dtab);
173 return ERR_PTR(err);
174 }
175
176 spin_lock(&dev_map_lock);
177 list_add_tail_rcu(&dtab->list, &dev_map_list);
178 spin_unlock(&dev_map_lock);
179
180 return &dtab->map;
181 }
182
dev_map_free(struct bpf_map * map)183 static void dev_map_free(struct bpf_map *map)
184 {
185 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
186 int i;
187
188 /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
189 * so the programs (can be more than one that used this map) were
190 * disconnected from events. The following synchronize_rcu() guarantees
191 * both rcu read critical sections complete and waits for
192 * preempt-disable regions (NAPI being the relevant context here) so we
193 * are certain there will be no further reads against the netdev_map and
194 * all flush operations are complete. Flush operations can only be done
195 * from NAPI context for this reason.
196 */
197
198 spin_lock(&dev_map_lock);
199 list_del_rcu(&dtab->list);
200 spin_unlock(&dev_map_lock);
201
202 bpf_clear_redirect_map(map);
203 synchronize_rcu();
204
205 /* Make sure prior __dev_map_entry_free() have completed. */
206 rcu_barrier();
207
208 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
209 for (i = 0; i < dtab->n_buckets; i++) {
210 struct bpf_dtab_netdev *dev;
211 struct hlist_head *head;
212 struct hlist_node *next;
213
214 head = dev_map_index_hash(dtab, i);
215
216 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
217 hlist_del_rcu(&dev->index_hlist);
218 if (dev->xdp_prog)
219 bpf_prog_put(dev->xdp_prog);
220 dev_put(dev->dev);
221 kfree(dev);
222 }
223 }
224
225 bpf_map_area_free(dtab->dev_index_head);
226 } else {
227 for (i = 0; i < dtab->map.max_entries; i++) {
228 struct bpf_dtab_netdev *dev;
229
230 dev = rcu_dereference_raw(dtab->netdev_map[i]);
231 if (!dev)
232 continue;
233
234 if (dev->xdp_prog)
235 bpf_prog_put(dev->xdp_prog);
236 dev_put(dev->dev);
237 kfree(dev);
238 }
239
240 bpf_map_area_free(dtab->netdev_map);
241 }
242
243 bpf_map_area_free(dtab);
244 }
245
dev_map_get_next_key(struct bpf_map * map,void * key,void * next_key)246 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
247 {
248 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
249 u32 index = key ? *(u32 *)key : U32_MAX;
250 u32 *next = next_key;
251
252 if (index >= dtab->map.max_entries) {
253 *next = 0;
254 return 0;
255 }
256
257 if (index == dtab->map.max_entries - 1)
258 return -ENOENT;
259 *next = index + 1;
260 return 0;
261 }
262
263 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
264 * by local_bh_disable() (from XDP calls inside NAPI). The
265 * rcu_read_lock_bh_held() below makes lockdep accept both.
266 */
__dev_map_hash_lookup_elem(struct bpf_map * map,u32 key)267 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
268 {
269 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
270 struct hlist_head *head = dev_map_index_hash(dtab, key);
271 struct bpf_dtab_netdev *dev;
272
273 hlist_for_each_entry_rcu(dev, head, index_hlist,
274 lockdep_is_held(&dtab->index_lock))
275 if (dev->idx == key)
276 return dev;
277
278 return NULL;
279 }
280
dev_map_hash_get_next_key(struct bpf_map * map,void * key,void * next_key)281 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
282 void *next_key)
283 {
284 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
285 u32 idx, *next = next_key;
286 struct bpf_dtab_netdev *dev, *next_dev;
287 struct hlist_head *head;
288 int i = 0;
289
290 if (!key)
291 goto find_first;
292
293 idx = *(u32 *)key;
294
295 dev = __dev_map_hash_lookup_elem(map, idx);
296 if (!dev)
297 goto find_first;
298
299 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
300 struct bpf_dtab_netdev, index_hlist);
301
302 if (next_dev) {
303 *next = next_dev->idx;
304 return 0;
305 }
306
307 i = idx & (dtab->n_buckets - 1);
308 i++;
309
310 find_first:
311 for (; i < dtab->n_buckets; i++) {
312 head = dev_map_index_hash(dtab, i);
313
314 next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
315 struct bpf_dtab_netdev,
316 index_hlist);
317 if (next_dev) {
318 *next = next_dev->idx;
319 return 0;
320 }
321 }
322
323 return -ENOENT;
324 }
325
dev_map_bpf_prog_run(struct bpf_prog * xdp_prog,struct xdp_frame ** frames,int n,struct net_device * dev)326 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
327 struct xdp_frame **frames, int n,
328 struct net_device *dev)
329 {
330 struct xdp_txq_info txq = { .dev = dev };
331 struct xdp_buff xdp;
332 int i, nframes = 0;
333
334 for (i = 0; i < n; i++) {
335 struct xdp_frame *xdpf = frames[i];
336 u32 act;
337 int err;
338
339 xdp_convert_frame_to_buff(xdpf, &xdp);
340 xdp.txq = &txq;
341
342 act = bpf_prog_run_xdp(xdp_prog, &xdp);
343 switch (act) {
344 case XDP_PASS:
345 err = xdp_update_frame_from_buff(&xdp, xdpf);
346 if (unlikely(err < 0))
347 xdp_return_frame_rx_napi(xdpf);
348 else
349 frames[nframes++] = xdpf;
350 break;
351 default:
352 bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
353 fallthrough;
354 case XDP_ABORTED:
355 trace_xdp_exception(dev, xdp_prog, act);
356 fallthrough;
357 case XDP_DROP:
358 xdp_return_frame_rx_napi(xdpf);
359 break;
360 }
361 }
362 return nframes; /* sent frames count */
363 }
364
bq_xmit_all(struct xdp_dev_bulk_queue * bq,u32 flags)365 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
366 {
367 struct net_device *dev = bq->dev;
368 unsigned int cnt = bq->count;
369 int sent = 0, err = 0;
370 int to_send = cnt;
371 int i;
372
373 if (unlikely(!cnt))
374 return;
375
376 for (i = 0; i < cnt; i++) {
377 struct xdp_frame *xdpf = bq->q[i];
378
379 prefetch(xdpf);
380 }
381
382 if (bq->xdp_prog) {
383 to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
384 if (!to_send)
385 goto out;
386 }
387
388 sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
389 if (sent < 0) {
390 /* If ndo_xdp_xmit fails with an errno, no frames have
391 * been xmit'ed.
392 */
393 err = sent;
394 sent = 0;
395 }
396
397 /* If not all frames have been transmitted, it is our
398 * responsibility to free them
399 */
400 for (i = sent; unlikely(i < to_send); i++)
401 xdp_return_frame_rx_napi(bq->q[i]);
402
403 out:
404 bq->count = 0;
405 trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
406 }
407
408 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
409 * driver before returning from its napi->poll() routine. See the comment above
410 * xdp_do_flush() in filter.c.
411 */
__dev_flush(void)412 void __dev_flush(void)
413 {
414 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
415 struct xdp_dev_bulk_queue *bq, *tmp;
416
417 list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
418 bq_xmit_all(bq, XDP_XMIT_FLUSH);
419 bq->dev_rx = NULL;
420 bq->xdp_prog = NULL;
421 __list_del_clearprev(&bq->flush_node);
422 }
423 }
424
425 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
426 * by local_bh_disable() (from XDP calls inside NAPI). The
427 * rcu_read_lock_bh_held() below makes lockdep accept both.
428 */
__dev_map_lookup_elem(struct bpf_map * map,u32 key)429 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
430 {
431 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
432 struct bpf_dtab_netdev *obj;
433
434 if (key >= map->max_entries)
435 return NULL;
436
437 obj = rcu_dereference_check(dtab->netdev_map[key],
438 rcu_read_lock_bh_held());
439 return obj;
440 }
441
442 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
443 * variable access, and map elements stick around. See comment above
444 * xdp_do_flush() in filter.c.
445 */
bq_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_prog * xdp_prog)446 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
447 struct net_device *dev_rx, struct bpf_prog *xdp_prog)
448 {
449 struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
450 struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
451
452 if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
453 bq_xmit_all(bq, 0);
454
455 /* Ingress dev_rx will be the same for all xdp_frame's in
456 * bulk_queue, because bq stored per-CPU and must be flushed
457 * from net_device drivers NAPI func end.
458 *
459 * Do the same with xdp_prog and flush_list since these fields
460 * are only ever modified together.
461 */
462 if (!bq->dev_rx) {
463 bq->dev_rx = dev_rx;
464 bq->xdp_prog = xdp_prog;
465 list_add(&bq->flush_node, flush_list);
466 }
467
468 bq->q[bq->count++] = xdpf;
469 }
470
__xdp_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_prog * xdp_prog)471 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
472 struct net_device *dev_rx,
473 struct bpf_prog *xdp_prog)
474 {
475 int err;
476
477 if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
478 return -EOPNOTSUPP;
479
480 if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
481 xdp_frame_has_frags(xdpf)))
482 return -EOPNOTSUPP;
483
484 err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
485 if (unlikely(err))
486 return err;
487
488 bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
489 return 0;
490 }
491
dev_map_bpf_prog_run_skb(struct sk_buff * skb,struct bpf_dtab_netdev * dst)492 static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
493 {
494 struct xdp_txq_info txq = { .dev = dst->dev };
495 struct xdp_buff xdp;
496 u32 act;
497
498 if (!dst->xdp_prog)
499 return XDP_PASS;
500
501 __skb_pull(skb, skb->mac_len);
502 xdp.txq = &txq;
503
504 act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
505 switch (act) {
506 case XDP_PASS:
507 __skb_push(skb, skb->mac_len);
508 break;
509 default:
510 bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
511 fallthrough;
512 case XDP_ABORTED:
513 trace_xdp_exception(dst->dev, dst->xdp_prog, act);
514 fallthrough;
515 case XDP_DROP:
516 kfree_skb(skb);
517 break;
518 }
519
520 return act;
521 }
522
dev_xdp_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx)523 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
524 struct net_device *dev_rx)
525 {
526 return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
527 }
528
dev_map_enqueue(struct bpf_dtab_netdev * dst,struct xdp_frame * xdpf,struct net_device * dev_rx)529 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
530 struct net_device *dev_rx)
531 {
532 struct net_device *dev = dst->dev;
533
534 return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
535 }
536
is_valid_dst(struct bpf_dtab_netdev * obj,struct xdp_frame * xdpf)537 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
538 {
539 if (!obj)
540 return false;
541
542 if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
543 return false;
544
545 if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
546 xdp_frame_has_frags(xdpf)))
547 return false;
548
549 if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
550 return false;
551
552 return true;
553 }
554
dev_map_enqueue_clone(struct bpf_dtab_netdev * obj,struct net_device * dev_rx,struct xdp_frame * xdpf)555 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
556 struct net_device *dev_rx,
557 struct xdp_frame *xdpf)
558 {
559 struct xdp_frame *nxdpf;
560
561 nxdpf = xdpf_clone(xdpf);
562 if (!nxdpf)
563 return -ENOMEM;
564
565 bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
566
567 return 0;
568 }
569
is_ifindex_excluded(int * excluded,int num_excluded,int ifindex)570 static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
571 {
572 while (num_excluded--) {
573 if (ifindex == excluded[num_excluded])
574 return true;
575 }
576 return false;
577 }
578
579 /* Get ifindex of each upper device. 'indexes' must be able to hold at
580 * least MAX_NEST_DEV elements.
581 * Returns the number of ifindexes added.
582 */
get_upper_ifindexes(struct net_device * dev,int * indexes)583 static int get_upper_ifindexes(struct net_device *dev, int *indexes)
584 {
585 struct net_device *upper;
586 struct list_head *iter;
587 int n = 0;
588
589 netdev_for_each_upper_dev_rcu(dev, upper, iter) {
590 indexes[n++] = upper->ifindex;
591 }
592 return n;
593 }
594
dev_map_enqueue_multi(struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_map * map,bool exclude_ingress)595 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
596 struct bpf_map *map, bool exclude_ingress)
597 {
598 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
599 struct bpf_dtab_netdev *dst, *last_dst = NULL;
600 int excluded_devices[1+MAX_NEST_DEV];
601 struct hlist_head *head;
602 int num_excluded = 0;
603 unsigned int i;
604 int err;
605
606 if (exclude_ingress) {
607 num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
608 excluded_devices[num_excluded++] = dev_rx->ifindex;
609 }
610
611 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
612 for (i = 0; i < map->max_entries; i++) {
613 dst = rcu_dereference_check(dtab->netdev_map[i],
614 rcu_read_lock_bh_held());
615 if (!is_valid_dst(dst, xdpf))
616 continue;
617
618 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
619 continue;
620
621 /* we only need n-1 clones; last_dst enqueued below */
622 if (!last_dst) {
623 last_dst = dst;
624 continue;
625 }
626
627 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
628 if (err)
629 return err;
630
631 last_dst = dst;
632 }
633 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
634 for (i = 0; i < dtab->n_buckets; i++) {
635 head = dev_map_index_hash(dtab, i);
636 hlist_for_each_entry_rcu(dst, head, index_hlist,
637 lockdep_is_held(&dtab->index_lock)) {
638 if (!is_valid_dst(dst, xdpf))
639 continue;
640
641 if (is_ifindex_excluded(excluded_devices, num_excluded,
642 dst->dev->ifindex))
643 continue;
644
645 /* we only need n-1 clones; last_dst enqueued below */
646 if (!last_dst) {
647 last_dst = dst;
648 continue;
649 }
650
651 err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
652 if (err)
653 return err;
654
655 last_dst = dst;
656 }
657 }
658 }
659
660 /* consume the last copy of the frame */
661 if (last_dst)
662 bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
663 else
664 xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
665
666 return 0;
667 }
668
dev_map_generic_redirect(struct bpf_dtab_netdev * dst,struct sk_buff * skb,struct bpf_prog * xdp_prog)669 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
670 struct bpf_prog *xdp_prog)
671 {
672 int err;
673
674 err = xdp_ok_fwd_dev(dst->dev, skb->len);
675 if (unlikely(err))
676 return err;
677
678 /* Redirect has already succeeded semantically at this point, so we just
679 * return 0 even if packet is dropped. Helper below takes care of
680 * freeing skb.
681 */
682 if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
683 return 0;
684
685 skb->dev = dst->dev;
686 generic_xdp_tx(skb, xdp_prog);
687
688 return 0;
689 }
690
dev_map_redirect_clone(struct bpf_dtab_netdev * dst,struct sk_buff * skb,struct bpf_prog * xdp_prog)691 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
692 struct sk_buff *skb,
693 struct bpf_prog *xdp_prog)
694 {
695 struct sk_buff *nskb;
696 int err;
697
698 nskb = skb_clone(skb, GFP_ATOMIC);
699 if (!nskb)
700 return -ENOMEM;
701
702 err = dev_map_generic_redirect(dst, nskb, xdp_prog);
703 if (unlikely(err)) {
704 consume_skb(nskb);
705 return err;
706 }
707
708 return 0;
709 }
710
dev_map_redirect_multi(struct net_device * dev,struct sk_buff * skb,struct bpf_prog * xdp_prog,struct bpf_map * map,bool exclude_ingress)711 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
712 struct bpf_prog *xdp_prog, struct bpf_map *map,
713 bool exclude_ingress)
714 {
715 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
716 struct bpf_dtab_netdev *dst, *last_dst = NULL;
717 int excluded_devices[1+MAX_NEST_DEV];
718 struct hlist_head *head;
719 struct hlist_node *next;
720 int num_excluded = 0;
721 unsigned int i;
722 int err;
723
724 if (exclude_ingress) {
725 num_excluded = get_upper_ifindexes(dev, excluded_devices);
726 excluded_devices[num_excluded++] = dev->ifindex;
727 }
728
729 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
730 for (i = 0; i < map->max_entries; i++) {
731 dst = rcu_dereference_check(dtab->netdev_map[i],
732 rcu_read_lock_bh_held());
733 if (!dst)
734 continue;
735
736 if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
737 continue;
738
739 /* we only need n-1 clones; last_dst enqueued below */
740 if (!last_dst) {
741 last_dst = dst;
742 continue;
743 }
744
745 err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
746 if (err)
747 return err;
748
749 last_dst = dst;
750
751 }
752 } else { /* BPF_MAP_TYPE_DEVMAP_HASH */
753 for (i = 0; i < dtab->n_buckets; i++) {
754 head = dev_map_index_hash(dtab, i);
755 hlist_for_each_entry_safe(dst, next, head, index_hlist) {
756 if (!dst)
757 continue;
758
759 if (is_ifindex_excluded(excluded_devices, num_excluded,
760 dst->dev->ifindex))
761 continue;
762
763 /* we only need n-1 clones; last_dst enqueued below */
764 if (!last_dst) {
765 last_dst = dst;
766 continue;
767 }
768
769 err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
770 if (err)
771 return err;
772
773 last_dst = dst;
774 }
775 }
776 }
777
778 /* consume the first skb and return */
779 if (last_dst)
780 return dev_map_generic_redirect(last_dst, skb, xdp_prog);
781
782 /* dtab is empty */
783 consume_skb(skb);
784 return 0;
785 }
786
dev_map_lookup_elem(struct bpf_map * map,void * key)787 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
788 {
789 struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
790
791 return obj ? &obj->val : NULL;
792 }
793
dev_map_hash_lookup_elem(struct bpf_map * map,void * key)794 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
795 {
796 struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
797 *(u32 *)key);
798 return obj ? &obj->val : NULL;
799 }
800
__dev_map_entry_free(struct rcu_head * rcu)801 static void __dev_map_entry_free(struct rcu_head *rcu)
802 {
803 struct bpf_dtab_netdev *dev;
804
805 dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
806 if (dev->xdp_prog)
807 bpf_prog_put(dev->xdp_prog);
808 dev_put(dev->dev);
809 kfree(dev);
810 }
811
dev_map_delete_elem(struct bpf_map * map,void * key)812 static int dev_map_delete_elem(struct bpf_map *map, void *key)
813 {
814 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
815 struct bpf_dtab_netdev *old_dev;
816 int k = *(u32 *)key;
817
818 if (k >= map->max_entries)
819 return -EINVAL;
820
821 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
822 if (old_dev)
823 call_rcu(&old_dev->rcu, __dev_map_entry_free);
824 return 0;
825 }
826
dev_map_hash_delete_elem(struct bpf_map * map,void * key)827 static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
828 {
829 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
830 struct bpf_dtab_netdev *old_dev;
831 int k = *(u32 *)key;
832 unsigned long flags;
833 int ret = -ENOENT;
834
835 spin_lock_irqsave(&dtab->index_lock, flags);
836
837 old_dev = __dev_map_hash_lookup_elem(map, k);
838 if (old_dev) {
839 dtab->items--;
840 hlist_del_init_rcu(&old_dev->index_hlist);
841 call_rcu(&old_dev->rcu, __dev_map_entry_free);
842 ret = 0;
843 }
844 spin_unlock_irqrestore(&dtab->index_lock, flags);
845
846 return ret;
847 }
848
__dev_map_alloc_node(struct net * net,struct bpf_dtab * dtab,struct bpf_devmap_val * val,unsigned int idx)849 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
850 struct bpf_dtab *dtab,
851 struct bpf_devmap_val *val,
852 unsigned int idx)
853 {
854 struct bpf_prog *prog = NULL;
855 struct bpf_dtab_netdev *dev;
856
857 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
858 GFP_NOWAIT | __GFP_NOWARN,
859 dtab->map.numa_node);
860 if (!dev)
861 return ERR_PTR(-ENOMEM);
862
863 dev->dev = dev_get_by_index(net, val->ifindex);
864 if (!dev->dev)
865 goto err_out;
866
867 if (val->bpf_prog.fd > 0) {
868 prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
869 BPF_PROG_TYPE_XDP, false);
870 if (IS_ERR(prog))
871 goto err_put_dev;
872 if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
873 !bpf_prog_map_compatible(&dtab->map, prog))
874 goto err_put_prog;
875 }
876
877 dev->idx = idx;
878 dev->dtab = dtab;
879 if (prog) {
880 dev->xdp_prog = prog;
881 dev->val.bpf_prog.id = prog->aux->id;
882 } else {
883 dev->xdp_prog = NULL;
884 dev->val.bpf_prog.id = 0;
885 }
886 dev->val.ifindex = val->ifindex;
887
888 return dev;
889 err_put_prog:
890 bpf_prog_put(prog);
891 err_put_dev:
892 dev_put(dev->dev);
893 err_out:
894 kfree(dev);
895 return ERR_PTR(-EINVAL);
896 }
897
__dev_map_update_elem(struct net * net,struct bpf_map * map,void * key,void * value,u64 map_flags)898 static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
899 void *key, void *value, u64 map_flags)
900 {
901 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
902 struct bpf_dtab_netdev *dev, *old_dev;
903 struct bpf_devmap_val val = {};
904 u32 i = *(u32 *)key;
905
906 if (unlikely(map_flags > BPF_EXIST))
907 return -EINVAL;
908 if (unlikely(i >= dtab->map.max_entries))
909 return -E2BIG;
910 if (unlikely(map_flags == BPF_NOEXIST))
911 return -EEXIST;
912
913 /* already verified value_size <= sizeof val */
914 memcpy(&val, value, map->value_size);
915
916 if (!val.ifindex) {
917 dev = NULL;
918 /* can not specify fd if ifindex is 0 */
919 if (val.bpf_prog.fd > 0)
920 return -EINVAL;
921 } else {
922 dev = __dev_map_alloc_node(net, dtab, &val, i);
923 if (IS_ERR(dev))
924 return PTR_ERR(dev);
925 }
926
927 /* Use call_rcu() here to ensure rcu critical sections have completed
928 * Remembering the driver side flush operation will happen before the
929 * net device is removed.
930 */
931 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
932 if (old_dev)
933 call_rcu(&old_dev->rcu, __dev_map_entry_free);
934
935 return 0;
936 }
937
dev_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)938 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
939 u64 map_flags)
940 {
941 return __dev_map_update_elem(current->nsproxy->net_ns,
942 map, key, value, map_flags);
943 }
944
__dev_map_hash_update_elem(struct net * net,struct bpf_map * map,void * key,void * value,u64 map_flags)945 static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
946 void *key, void *value, u64 map_flags)
947 {
948 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
949 struct bpf_dtab_netdev *dev, *old_dev;
950 struct bpf_devmap_val val = {};
951 u32 idx = *(u32 *)key;
952 unsigned long flags;
953 int err = -EEXIST;
954
955 /* already verified value_size <= sizeof val */
956 memcpy(&val, value, map->value_size);
957
958 if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
959 return -EINVAL;
960
961 spin_lock_irqsave(&dtab->index_lock, flags);
962
963 old_dev = __dev_map_hash_lookup_elem(map, idx);
964 if (old_dev && (map_flags & BPF_NOEXIST))
965 goto out_err;
966
967 dev = __dev_map_alloc_node(net, dtab, &val, idx);
968 if (IS_ERR(dev)) {
969 err = PTR_ERR(dev);
970 goto out_err;
971 }
972
973 if (old_dev) {
974 hlist_del_rcu(&old_dev->index_hlist);
975 } else {
976 if (dtab->items >= dtab->map.max_entries) {
977 spin_unlock_irqrestore(&dtab->index_lock, flags);
978 call_rcu(&dev->rcu, __dev_map_entry_free);
979 return -E2BIG;
980 }
981 dtab->items++;
982 }
983
984 hlist_add_head_rcu(&dev->index_hlist,
985 dev_map_index_hash(dtab, idx));
986 spin_unlock_irqrestore(&dtab->index_lock, flags);
987
988 if (old_dev)
989 call_rcu(&old_dev->rcu, __dev_map_entry_free);
990
991 return 0;
992
993 out_err:
994 spin_unlock_irqrestore(&dtab->index_lock, flags);
995 return err;
996 }
997
dev_map_hash_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)998 static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
999 u64 map_flags)
1000 {
1001 return __dev_map_hash_update_elem(current->nsproxy->net_ns,
1002 map, key, value, map_flags);
1003 }
1004
dev_map_redirect(struct bpf_map * map,u64 ifindex,u64 flags)1005 static int dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1006 {
1007 return __bpf_xdp_redirect_map(map, ifindex, flags,
1008 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1009 __dev_map_lookup_elem);
1010 }
1011
dev_hash_map_redirect(struct bpf_map * map,u64 ifindex,u64 flags)1012 static int dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1013 {
1014 return __bpf_xdp_redirect_map(map, ifindex, flags,
1015 BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1016 __dev_map_hash_lookup_elem);
1017 }
1018
1019 BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1020 const struct bpf_map_ops dev_map_ops = {
1021 .map_meta_equal = bpf_map_meta_equal,
1022 .map_alloc = dev_map_alloc,
1023 .map_free = dev_map_free,
1024 .map_get_next_key = dev_map_get_next_key,
1025 .map_lookup_elem = dev_map_lookup_elem,
1026 .map_update_elem = dev_map_update_elem,
1027 .map_delete_elem = dev_map_delete_elem,
1028 .map_check_btf = map_check_no_btf,
1029 .map_btf_id = &dev_map_btf_ids[0],
1030 .map_redirect = dev_map_redirect,
1031 };
1032
1033 const struct bpf_map_ops dev_map_hash_ops = {
1034 .map_meta_equal = bpf_map_meta_equal,
1035 .map_alloc = dev_map_alloc,
1036 .map_free = dev_map_free,
1037 .map_get_next_key = dev_map_hash_get_next_key,
1038 .map_lookup_elem = dev_map_hash_lookup_elem,
1039 .map_update_elem = dev_map_hash_update_elem,
1040 .map_delete_elem = dev_map_hash_delete_elem,
1041 .map_check_btf = map_check_no_btf,
1042 .map_btf_id = &dev_map_btf_ids[0],
1043 .map_redirect = dev_hash_map_redirect,
1044 };
1045
dev_map_hash_remove_netdev(struct bpf_dtab * dtab,struct net_device * netdev)1046 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1047 struct net_device *netdev)
1048 {
1049 unsigned long flags;
1050 u32 i;
1051
1052 spin_lock_irqsave(&dtab->index_lock, flags);
1053 for (i = 0; i < dtab->n_buckets; i++) {
1054 struct bpf_dtab_netdev *dev;
1055 struct hlist_head *head;
1056 struct hlist_node *next;
1057
1058 head = dev_map_index_hash(dtab, i);
1059
1060 hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1061 if (netdev != dev->dev)
1062 continue;
1063
1064 dtab->items--;
1065 hlist_del_rcu(&dev->index_hlist);
1066 call_rcu(&dev->rcu, __dev_map_entry_free);
1067 }
1068 }
1069 spin_unlock_irqrestore(&dtab->index_lock, flags);
1070 }
1071
dev_map_notification(struct notifier_block * notifier,ulong event,void * ptr)1072 static int dev_map_notification(struct notifier_block *notifier,
1073 ulong event, void *ptr)
1074 {
1075 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1076 struct bpf_dtab *dtab;
1077 int i, cpu;
1078
1079 switch (event) {
1080 case NETDEV_REGISTER:
1081 if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1082 break;
1083
1084 /* will be freed in free_netdev() */
1085 netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1086 if (!netdev->xdp_bulkq)
1087 return NOTIFY_BAD;
1088
1089 for_each_possible_cpu(cpu)
1090 per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1091 break;
1092 case NETDEV_UNREGISTER:
1093 /* This rcu_read_lock/unlock pair is needed because
1094 * dev_map_list is an RCU list AND to ensure a delete
1095 * operation does not free a netdev_map entry while we
1096 * are comparing it against the netdev being unregistered.
1097 */
1098 rcu_read_lock();
1099 list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1100 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1101 dev_map_hash_remove_netdev(dtab, netdev);
1102 continue;
1103 }
1104
1105 for (i = 0; i < dtab->map.max_entries; i++) {
1106 struct bpf_dtab_netdev *dev, *odev;
1107
1108 dev = rcu_dereference(dtab->netdev_map[i]);
1109 if (!dev || netdev != dev->dev)
1110 continue;
1111 odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1112 if (dev == odev)
1113 call_rcu(&dev->rcu,
1114 __dev_map_entry_free);
1115 }
1116 }
1117 rcu_read_unlock();
1118 break;
1119 default:
1120 break;
1121 }
1122 return NOTIFY_OK;
1123 }
1124
1125 static struct notifier_block dev_map_notifier = {
1126 .notifier_call = dev_map_notification,
1127 };
1128
dev_map_init(void)1129 static int __init dev_map_init(void)
1130 {
1131 int cpu;
1132
1133 /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1134 BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1135 offsetof(struct _bpf_dtab_netdev, dev));
1136 register_netdevice_notifier(&dev_map_notifier);
1137
1138 for_each_possible_cpu(cpu)
1139 INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu));
1140 return 0;
1141 }
1142
1143 subsys_initcall(dev_map_init);
1144