1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2020 Intel Corporation. */
3
4 #ifndef XSK_BUFF_POOL_H_
5 #define XSK_BUFF_POOL_H_
6
7 #include <linux/if_xdp.h>
8 #include <linux/types.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/bpf.h>
11 #include <net/xdp.h>
12
13 struct xsk_buff_pool;
14 struct xdp_rxq_info;
15 struct xsk_queue;
16 struct xdp_desc;
17 struct xdp_umem;
18 struct xdp_sock;
19 struct device;
20 struct page;
21
22 struct xdp_buff_xsk {
23 struct xdp_buff xdp;
24 dma_addr_t dma;
25 dma_addr_t frame_dma;
26 struct xsk_buff_pool *pool;
27 u64 orig_addr;
28 struct list_head free_list_node;
29 };
30
31 struct xsk_dma_map {
32 dma_addr_t *dma_pages;
33 struct device *dev;
34 struct net_device *netdev;
35 refcount_t users;
36 struct list_head list; /* Protected by the RTNL_LOCK */
37 u32 dma_pages_cnt;
38 bool dma_need_sync;
39 };
40
41 struct xsk_buff_pool {
42 /* Members only used in the control path first. */
43 struct device *dev;
44 struct net_device *netdev;
45 struct list_head xsk_tx_list;
46 /* Protects modifications to the xsk_tx_list */
47 spinlock_t xsk_tx_list_lock;
48 refcount_t users;
49 struct xdp_umem *umem;
50 struct work_struct work;
51 struct list_head free_list;
52 u32 heads_cnt;
53 u16 queue_id;
54
55 /* Data path members as close to free_heads at the end as possible. */
56 struct xsk_queue *fq ____cacheline_aligned_in_smp;
57 struct xsk_queue *cq;
58 /* For performance reasons, each buff pool has its own array of dma_pages
59 * even when they are identical.
60 */
61 dma_addr_t *dma_pages;
62 struct xdp_buff_xsk *heads;
63 u64 chunk_mask;
64 u64 addrs_cnt;
65 u32 free_list_cnt;
66 u32 dma_pages_cnt;
67 u32 free_heads_cnt;
68 u32 headroom;
69 u32 chunk_size;
70 u32 chunk_shift;
71 u32 frame_len;
72 u8 cached_need_wakeup;
73 bool uses_need_wakeup;
74 bool dma_need_sync;
75 bool unaligned;
76 void *addrs;
77 /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
78 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
79 * sockets share a single cq when the same netdev and queue id is shared.
80 */
81 spinlock_t cq_lock;
82 struct xdp_buff_xsk *free_heads[];
83 };
84
85 /* Masks for xdp_umem_page flags.
86 * The low 12-bits of the addr will be 0 since this is the page address, so we
87 * can use them for flags.
88 */
89 #define XSK_NEXT_PG_CONTIG_SHIFT 0
90 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
91
92 /* AF_XDP core. */
93 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
94 struct xdp_umem *umem);
95 int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
96 u16 queue_id, u16 flags);
97 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
98 struct net_device *dev, u16 queue_id);
99 void xp_destroy(struct xsk_buff_pool *pool);
100 void xp_get_pool(struct xsk_buff_pool *pool);
101 bool xp_put_pool(struct xsk_buff_pool *pool);
102 void xp_clear_dev(struct xsk_buff_pool *pool);
103 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
104 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
105
106 /* AF_XDP, and XDP core. */
107 void xp_free(struct xdp_buff_xsk *xskb);
108
xp_init_xskb_addr(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,u64 addr)109 static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
110 u64 addr)
111 {
112 xskb->orig_addr = addr;
113 xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
114 }
115
xp_init_xskb_dma(struct xdp_buff_xsk * xskb,struct xsk_buff_pool * pool,dma_addr_t * dma_pages,u64 addr)116 static inline void xp_init_xskb_dma(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool,
117 dma_addr_t *dma_pages, u64 addr)
118 {
119 xskb->frame_dma = (dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) +
120 (addr & ~PAGE_MASK);
121 xskb->dma = xskb->frame_dma + pool->headroom + XDP_PACKET_HEADROOM;
122 }
123
124 /* AF_XDP ZC drivers, via xdp_sock_buff.h */
125 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
126 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
127 unsigned long attrs, struct page **pages, u32 nr_pages);
128 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
129 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
130 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max);
131 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
132 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
133 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
xp_get_dma(struct xdp_buff_xsk * xskb)134 static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
135 {
136 return xskb->dma;
137 }
138
xp_get_frame_dma(struct xdp_buff_xsk * xskb)139 static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
140 {
141 return xskb->frame_dma;
142 }
143
144 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
xp_dma_sync_for_cpu(struct xdp_buff_xsk * xskb)145 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
146 {
147 xp_dma_sync_for_cpu_slow(xskb);
148 }
149
150 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
151 size_t size);
xp_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size)152 static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
153 dma_addr_t dma, size_t size)
154 {
155 if (!pool->dma_need_sync)
156 return;
157
158 xp_dma_sync_for_device_slow(pool, dma, size);
159 }
160
161 /* Masks for xdp_umem_page flags.
162 * The low 12-bits of the addr will be 0 since this is the page address, so we
163 * can use them for flags.
164 */
165 #define XSK_NEXT_PG_CONTIG_SHIFT 0
166 #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
167
xp_desc_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr,u32 len)168 static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
169 u64 addr, u32 len)
170 {
171 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
172
173 if (likely(!cross_pg))
174 return false;
175
176 if (pool->dma_pages_cnt) {
177 return !(pool->dma_pages[addr >> PAGE_SHIFT] &
178 XSK_NEXT_PG_CONTIG_MASK);
179 }
180
181 /* skb path */
182 return addr + len > pool->addrs_cnt;
183 }
184
xp_aligned_extract_addr(struct xsk_buff_pool * pool,u64 addr)185 static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
186 {
187 return addr & pool->chunk_mask;
188 }
189
xp_unaligned_extract_addr(u64 addr)190 static inline u64 xp_unaligned_extract_addr(u64 addr)
191 {
192 return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
193 }
194
xp_unaligned_extract_offset(u64 addr)195 static inline u64 xp_unaligned_extract_offset(u64 addr)
196 {
197 return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
198 }
199
xp_unaligned_add_offset_to_addr(u64 addr)200 static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
201 {
202 return xp_unaligned_extract_addr(addr) +
203 xp_unaligned_extract_offset(addr);
204 }
205
xp_aligned_extract_idx(struct xsk_buff_pool * pool,u64 addr)206 static inline u32 xp_aligned_extract_idx(struct xsk_buff_pool *pool, u64 addr)
207 {
208 return xp_aligned_extract_addr(pool, addr) >> pool->chunk_shift;
209 }
210
xp_release(struct xdp_buff_xsk * xskb)211 static inline void xp_release(struct xdp_buff_xsk *xskb)
212 {
213 if (xskb->pool->unaligned)
214 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
215 }
216
xp_get_handle(struct xdp_buff_xsk * xskb)217 static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb)
218 {
219 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
220
221 offset += xskb->pool->headroom;
222 if (!xskb->pool->unaligned)
223 return xskb->orig_addr + offset;
224 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
225 }
226
227 #endif /* XSK_BUFF_POOL_H_ */
228