1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) 2016 Facebook
4   * Copyright (C) 2013-2014 Jens Axboe
5   */
6  
7  #include <linux/sched.h>
8  #include <linux/random.h>
9  #include <linux/sbitmap.h>
10  #include <linux/seq_file.h>
11  
init_alloc_hint(struct sbitmap * sb,gfp_t flags)12  static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
13  {
14  	unsigned depth = sb->depth;
15  
16  	sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
17  	if (!sb->alloc_hint)
18  		return -ENOMEM;
19  
20  	if (depth && !sb->round_robin) {
21  		int i;
22  
23  		for_each_possible_cpu(i)
24  			*per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth);
25  	}
26  	return 0;
27  }
28  
update_alloc_hint_before_get(struct sbitmap * sb,unsigned int depth)29  static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
30  						    unsigned int depth)
31  {
32  	unsigned hint;
33  
34  	hint = this_cpu_read(*sb->alloc_hint);
35  	if (unlikely(hint >= depth)) {
36  		hint = depth ? get_random_u32_below(depth) : 0;
37  		this_cpu_write(*sb->alloc_hint, hint);
38  	}
39  
40  	return hint;
41  }
42  
update_alloc_hint_after_get(struct sbitmap * sb,unsigned int depth,unsigned int hint,unsigned int nr)43  static inline void update_alloc_hint_after_get(struct sbitmap *sb,
44  					       unsigned int depth,
45  					       unsigned int hint,
46  					       unsigned int nr)
47  {
48  	if (nr == -1) {
49  		/* If the map is full, a hint won't do us much good. */
50  		this_cpu_write(*sb->alloc_hint, 0);
51  	} else if (nr == hint || unlikely(sb->round_robin)) {
52  		/* Only update the hint if we used it. */
53  		hint = nr + 1;
54  		if (hint >= depth - 1)
55  			hint = 0;
56  		this_cpu_write(*sb->alloc_hint, hint);
57  	}
58  }
59  
60  /*
61   * See if we have deferred clears that we can batch move
62   */
sbitmap_deferred_clear(struct sbitmap_word * map)63  static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
64  {
65  	unsigned long mask;
66  
67  	if (!READ_ONCE(map->cleared))
68  		return false;
69  
70  	/*
71  	 * First get a stable cleared mask, setting the old mask to 0.
72  	 */
73  	mask = xchg(&map->cleared, 0);
74  
75  	/*
76  	 * Now clear the masked bits in our free word
77  	 */
78  	atomic_long_andnot(mask, (atomic_long_t *)&map->word);
79  	BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
80  	return true;
81  }
82  
sbitmap_init_node(struct sbitmap * sb,unsigned int depth,int shift,gfp_t flags,int node,bool round_robin,bool alloc_hint)83  int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
84  		      gfp_t flags, int node, bool round_robin,
85  		      bool alloc_hint)
86  {
87  	unsigned int bits_per_word;
88  
89  	if (shift < 0)
90  		shift = sbitmap_calculate_shift(depth);
91  
92  	bits_per_word = 1U << shift;
93  	if (bits_per_word > BITS_PER_LONG)
94  		return -EINVAL;
95  
96  	sb->shift = shift;
97  	sb->depth = depth;
98  	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
99  	sb->round_robin = round_robin;
100  
101  	if (depth == 0) {
102  		sb->map = NULL;
103  		return 0;
104  	}
105  
106  	if (alloc_hint) {
107  		if (init_alloc_hint(sb, flags))
108  			return -ENOMEM;
109  	} else {
110  		sb->alloc_hint = NULL;
111  	}
112  
113  	sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
114  	if (!sb->map) {
115  		free_percpu(sb->alloc_hint);
116  		return -ENOMEM;
117  	}
118  
119  	return 0;
120  }
121  EXPORT_SYMBOL_GPL(sbitmap_init_node);
122  
sbitmap_resize(struct sbitmap * sb,unsigned int depth)123  void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
124  {
125  	unsigned int bits_per_word = 1U << sb->shift;
126  	unsigned int i;
127  
128  	for (i = 0; i < sb->map_nr; i++)
129  		sbitmap_deferred_clear(&sb->map[i]);
130  
131  	sb->depth = depth;
132  	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
133  }
134  EXPORT_SYMBOL_GPL(sbitmap_resize);
135  
__sbitmap_get_word(unsigned long * word,unsigned long depth,unsigned int hint,bool wrap)136  static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
137  			      unsigned int hint, bool wrap)
138  {
139  	int nr;
140  
141  	/* don't wrap if starting from 0 */
142  	wrap = wrap && hint;
143  
144  	while (1) {
145  		nr = find_next_zero_bit(word, depth, hint);
146  		if (unlikely(nr >= depth)) {
147  			/*
148  			 * We started with an offset, and we didn't reset the
149  			 * offset to 0 in a failure case, so start from 0 to
150  			 * exhaust the map.
151  			 */
152  			if (hint && wrap) {
153  				hint = 0;
154  				continue;
155  			}
156  			return -1;
157  		}
158  
159  		if (!test_and_set_bit_lock(nr, word))
160  			break;
161  
162  		hint = nr + 1;
163  		if (hint >= depth - 1)
164  			hint = 0;
165  	}
166  
167  	return nr;
168  }
169  
sbitmap_find_bit_in_word(struct sbitmap_word * map,unsigned int depth,unsigned int alloc_hint,bool wrap)170  static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
171  				    unsigned int depth,
172  				    unsigned int alloc_hint,
173  				    bool wrap)
174  {
175  	int nr;
176  
177  	do {
178  		nr = __sbitmap_get_word(&map->word, depth,
179  					alloc_hint, wrap);
180  		if (nr != -1)
181  			break;
182  		if (!sbitmap_deferred_clear(map))
183  			break;
184  	} while (1);
185  
186  	return nr;
187  }
188  
sbitmap_find_bit(struct sbitmap * sb,unsigned int depth,unsigned int index,unsigned int alloc_hint,bool wrap)189  static int sbitmap_find_bit(struct sbitmap *sb,
190  			    unsigned int depth,
191  			    unsigned int index,
192  			    unsigned int alloc_hint,
193  			    bool wrap)
194  {
195  	unsigned int i;
196  	int nr = -1;
197  
198  	for (i = 0; i < sb->map_nr; i++) {
199  		nr = sbitmap_find_bit_in_word(&sb->map[index],
200  					      min_t(unsigned int,
201  						    __map_depth(sb, index),
202  						    depth),
203  					      alloc_hint, wrap);
204  
205  		if (nr != -1) {
206  			nr += index << sb->shift;
207  			break;
208  		}
209  
210  		/* Jump to next index. */
211  		alloc_hint = 0;
212  		if (++index >= sb->map_nr)
213  			index = 0;
214  	}
215  
216  	return nr;
217  }
218  
__sbitmap_get(struct sbitmap * sb,unsigned int alloc_hint)219  static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
220  {
221  	unsigned int index;
222  
223  	index = SB_NR_TO_INDEX(sb, alloc_hint);
224  
225  	/*
226  	 * Unless we're doing round robin tag allocation, just use the
227  	 * alloc_hint to find the right word index. No point in looping
228  	 * twice in find_next_zero_bit() for that case.
229  	 */
230  	if (sb->round_robin)
231  		alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
232  	else
233  		alloc_hint = 0;
234  
235  	return sbitmap_find_bit(sb, UINT_MAX, index, alloc_hint,
236  				!sb->round_robin);
237  }
238  
sbitmap_get(struct sbitmap * sb)239  int sbitmap_get(struct sbitmap *sb)
240  {
241  	int nr;
242  	unsigned int hint, depth;
243  
244  	if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
245  		return -1;
246  
247  	depth = READ_ONCE(sb->depth);
248  	hint = update_alloc_hint_before_get(sb, depth);
249  	nr = __sbitmap_get(sb, hint);
250  	update_alloc_hint_after_get(sb, depth, hint, nr);
251  
252  	return nr;
253  }
254  EXPORT_SYMBOL_GPL(sbitmap_get);
255  
__sbitmap_get_shallow(struct sbitmap * sb,unsigned int alloc_hint,unsigned long shallow_depth)256  static int __sbitmap_get_shallow(struct sbitmap *sb,
257  				 unsigned int alloc_hint,
258  				 unsigned long shallow_depth)
259  {
260  	unsigned int index;
261  
262  	index = SB_NR_TO_INDEX(sb, alloc_hint);
263  	alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
264  
265  	return sbitmap_find_bit(sb, shallow_depth, index, alloc_hint, true);
266  }
267  
sbitmap_get_shallow(struct sbitmap * sb,unsigned long shallow_depth)268  int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
269  {
270  	int nr;
271  	unsigned int hint, depth;
272  
273  	if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
274  		return -1;
275  
276  	depth = READ_ONCE(sb->depth);
277  	hint = update_alloc_hint_before_get(sb, depth);
278  	nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
279  	update_alloc_hint_after_get(sb, depth, hint, nr);
280  
281  	return nr;
282  }
283  EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
284  
sbitmap_any_bit_set(const struct sbitmap * sb)285  bool sbitmap_any_bit_set(const struct sbitmap *sb)
286  {
287  	unsigned int i;
288  
289  	for (i = 0; i < sb->map_nr; i++) {
290  		if (sb->map[i].word & ~sb->map[i].cleared)
291  			return true;
292  	}
293  	return false;
294  }
295  EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
296  
__sbitmap_weight(const struct sbitmap * sb,bool set)297  static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
298  {
299  	unsigned int i, weight = 0;
300  
301  	for (i = 0; i < sb->map_nr; i++) {
302  		const struct sbitmap_word *word = &sb->map[i];
303  		unsigned int word_depth = __map_depth(sb, i);
304  
305  		if (set)
306  			weight += bitmap_weight(&word->word, word_depth);
307  		else
308  			weight += bitmap_weight(&word->cleared, word_depth);
309  	}
310  	return weight;
311  }
312  
sbitmap_cleared(const struct sbitmap * sb)313  static unsigned int sbitmap_cleared(const struct sbitmap *sb)
314  {
315  	return __sbitmap_weight(sb, false);
316  }
317  
sbitmap_weight(const struct sbitmap * sb)318  unsigned int sbitmap_weight(const struct sbitmap *sb)
319  {
320  	return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
321  }
322  EXPORT_SYMBOL_GPL(sbitmap_weight);
323  
sbitmap_show(struct sbitmap * sb,struct seq_file * m)324  void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
325  {
326  	seq_printf(m, "depth=%u\n", sb->depth);
327  	seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
328  	seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
329  	seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
330  	seq_printf(m, "map_nr=%u\n", sb->map_nr);
331  }
332  EXPORT_SYMBOL_GPL(sbitmap_show);
333  
emit_byte(struct seq_file * m,unsigned int offset,u8 byte)334  static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
335  {
336  	if ((offset & 0xf) == 0) {
337  		if (offset != 0)
338  			seq_putc(m, '\n');
339  		seq_printf(m, "%08x:", offset);
340  	}
341  	if ((offset & 0x1) == 0)
342  		seq_putc(m, ' ');
343  	seq_printf(m, "%02x", byte);
344  }
345  
sbitmap_bitmap_show(struct sbitmap * sb,struct seq_file * m)346  void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
347  {
348  	u8 byte = 0;
349  	unsigned int byte_bits = 0;
350  	unsigned int offset = 0;
351  	int i;
352  
353  	for (i = 0; i < sb->map_nr; i++) {
354  		unsigned long word = READ_ONCE(sb->map[i].word);
355  		unsigned long cleared = READ_ONCE(sb->map[i].cleared);
356  		unsigned int word_bits = __map_depth(sb, i);
357  
358  		word &= ~cleared;
359  
360  		while (word_bits > 0) {
361  			unsigned int bits = min(8 - byte_bits, word_bits);
362  
363  			byte |= (word & (BIT(bits) - 1)) << byte_bits;
364  			byte_bits += bits;
365  			if (byte_bits == 8) {
366  				emit_byte(m, offset, byte);
367  				byte = 0;
368  				byte_bits = 0;
369  				offset++;
370  			}
371  			word >>= bits;
372  			word_bits -= bits;
373  		}
374  	}
375  	if (byte_bits) {
376  		emit_byte(m, offset, byte);
377  		offset++;
378  	}
379  	if (offset)
380  		seq_putc(m, '\n');
381  }
382  EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
383  
sbq_calc_wake_batch(struct sbitmap_queue * sbq,unsigned int depth)384  static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
385  					unsigned int depth)
386  {
387  	unsigned int wake_batch;
388  	unsigned int shallow_depth;
389  
390  	/*
391  	 * For each batch, we wake up one queue. We need to make sure that our
392  	 * batch size is small enough that the full depth of the bitmap,
393  	 * potentially limited by a shallow depth, is enough to wake up all of
394  	 * the queues.
395  	 *
396  	 * Each full word of the bitmap has bits_per_word bits, and there might
397  	 * be a partial word. There are depth / bits_per_word full words and
398  	 * depth % bits_per_word bits left over. In bitwise arithmetic:
399  	 *
400  	 * bits_per_word = 1 << shift
401  	 * depth / bits_per_word = depth >> shift
402  	 * depth % bits_per_word = depth & ((1 << shift) - 1)
403  	 *
404  	 * Each word can be limited to sbq->min_shallow_depth bits.
405  	 */
406  	shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
407  	depth = ((depth >> sbq->sb.shift) * shallow_depth +
408  		 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
409  	wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
410  			     SBQ_WAKE_BATCH);
411  
412  	return wake_batch;
413  }
414  
sbitmap_queue_init_node(struct sbitmap_queue * sbq,unsigned int depth,int shift,bool round_robin,gfp_t flags,int node)415  int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
416  			    int shift, bool round_robin, gfp_t flags, int node)
417  {
418  	int ret;
419  	int i;
420  
421  	ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
422  				round_robin, true);
423  	if (ret)
424  		return ret;
425  
426  	sbq->min_shallow_depth = UINT_MAX;
427  	sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
428  	atomic_set(&sbq->wake_index, 0);
429  	atomic_set(&sbq->ws_active, 0);
430  	atomic_set(&sbq->completion_cnt, 0);
431  	atomic_set(&sbq->wakeup_cnt, 0);
432  
433  	sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
434  	if (!sbq->ws) {
435  		sbitmap_free(&sbq->sb);
436  		return -ENOMEM;
437  	}
438  
439  	for (i = 0; i < SBQ_WAIT_QUEUES; i++)
440  		init_waitqueue_head(&sbq->ws[i].wait);
441  
442  	return 0;
443  }
444  EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
445  
sbitmap_queue_update_wake_batch(struct sbitmap_queue * sbq,unsigned int depth)446  static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
447  					    unsigned int depth)
448  {
449  	unsigned int wake_batch;
450  
451  	wake_batch = sbq_calc_wake_batch(sbq, depth);
452  	if (sbq->wake_batch != wake_batch)
453  		WRITE_ONCE(sbq->wake_batch, wake_batch);
454  }
455  
sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue * sbq,unsigned int users)456  void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
457  					    unsigned int users)
458  {
459  	unsigned int wake_batch;
460  	unsigned int depth = (sbq->sb.depth + users - 1) / users;
461  
462  	wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
463  			1, SBQ_WAKE_BATCH);
464  
465  	WRITE_ONCE(sbq->wake_batch, wake_batch);
466  }
467  EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
468  
sbitmap_queue_resize(struct sbitmap_queue * sbq,unsigned int depth)469  void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
470  {
471  	sbitmap_queue_update_wake_batch(sbq, depth);
472  	sbitmap_resize(&sbq->sb, depth);
473  }
474  EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
475  
__sbitmap_queue_get(struct sbitmap_queue * sbq)476  int __sbitmap_queue_get(struct sbitmap_queue *sbq)
477  {
478  	return sbitmap_get(&sbq->sb);
479  }
480  EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
481  
__sbitmap_queue_get_batch(struct sbitmap_queue * sbq,int nr_tags,unsigned int * offset)482  unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
483  					unsigned int *offset)
484  {
485  	struct sbitmap *sb = &sbq->sb;
486  	unsigned int hint, depth;
487  	unsigned long index, nr;
488  	int i;
489  
490  	if (unlikely(sb->round_robin))
491  		return 0;
492  
493  	depth = READ_ONCE(sb->depth);
494  	hint = update_alloc_hint_before_get(sb, depth);
495  
496  	index = SB_NR_TO_INDEX(sb, hint);
497  
498  	for (i = 0; i < sb->map_nr; i++) {
499  		struct sbitmap_word *map = &sb->map[index];
500  		unsigned long get_mask;
501  		unsigned int map_depth = __map_depth(sb, index);
502  
503  		sbitmap_deferred_clear(map);
504  		if (map->word == (1UL << (map_depth - 1)) - 1)
505  			goto next;
506  
507  		nr = find_first_zero_bit(&map->word, map_depth);
508  		if (nr + nr_tags <= map_depth) {
509  			atomic_long_t *ptr = (atomic_long_t *) &map->word;
510  			unsigned long val;
511  
512  			get_mask = ((1UL << nr_tags) - 1) << nr;
513  			val = READ_ONCE(map->word);
514  			while (!atomic_long_try_cmpxchg(ptr, &val,
515  							  get_mask | val))
516  				;
517  			get_mask = (get_mask & ~val) >> nr;
518  			if (get_mask) {
519  				*offset = nr + (index << sb->shift);
520  				update_alloc_hint_after_get(sb, depth, hint,
521  							*offset + nr_tags - 1);
522  				return get_mask;
523  			}
524  		}
525  next:
526  		/* Jump to next index. */
527  		if (++index >= sb->map_nr)
528  			index = 0;
529  	}
530  
531  	return 0;
532  }
533  
sbitmap_queue_get_shallow(struct sbitmap_queue * sbq,unsigned int shallow_depth)534  int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
535  			      unsigned int shallow_depth)
536  {
537  	WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
538  
539  	return sbitmap_get_shallow(&sbq->sb, shallow_depth);
540  }
541  EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow);
542  
sbitmap_queue_min_shallow_depth(struct sbitmap_queue * sbq,unsigned int min_shallow_depth)543  void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
544  				     unsigned int min_shallow_depth)
545  {
546  	sbq->min_shallow_depth = min_shallow_depth;
547  	sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
548  }
549  EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
550  
__sbitmap_queue_wake_up(struct sbitmap_queue * sbq,int nr)551  static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
552  {
553  	int i, wake_index;
554  
555  	if (!atomic_read(&sbq->ws_active))
556  		return;
557  
558  	wake_index = atomic_read(&sbq->wake_index);
559  	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
560  		struct sbq_wait_state *ws = &sbq->ws[wake_index];
561  
562  		/*
563  		 * Advance the index before checking the current queue.
564  		 * It improves fairness, by ensuring the queue doesn't
565  		 * need to be fully emptied before trying to wake up
566  		 * from the next one.
567  		 */
568  		wake_index = sbq_index_inc(wake_index);
569  
570  		/*
571  		 * It is sufficient to wake up at least one waiter to
572  		 * guarantee forward progress.
573  		 */
574  		if (waitqueue_active(&ws->wait) &&
575  		    wake_up_nr(&ws->wait, nr))
576  			break;
577  	}
578  
579  	if (wake_index != atomic_read(&sbq->wake_index))
580  		atomic_set(&sbq->wake_index, wake_index);
581  }
582  
sbitmap_queue_wake_up(struct sbitmap_queue * sbq,int nr)583  void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
584  {
585  	unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
586  	unsigned int wakeups;
587  
588  	if (!atomic_read(&sbq->ws_active))
589  		return;
590  
591  	atomic_add(nr, &sbq->completion_cnt);
592  	wakeups = atomic_read(&sbq->wakeup_cnt);
593  
594  	do {
595  		if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
596  			return;
597  	} while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
598  				     &wakeups, wakeups + wake_batch));
599  
600  	__sbitmap_queue_wake_up(sbq, wake_batch);
601  }
602  EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
603  
sbitmap_update_cpu_hint(struct sbitmap * sb,int cpu,int tag)604  static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
605  {
606  	if (likely(!sb->round_robin && tag < sb->depth))
607  		data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
608  }
609  
sbitmap_queue_clear_batch(struct sbitmap_queue * sbq,int offset,int * tags,int nr_tags)610  void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
611  				int *tags, int nr_tags)
612  {
613  	struct sbitmap *sb = &sbq->sb;
614  	unsigned long *addr = NULL;
615  	unsigned long mask = 0;
616  	int i;
617  
618  	smp_mb__before_atomic();
619  	for (i = 0; i < nr_tags; i++) {
620  		const int tag = tags[i] - offset;
621  		unsigned long *this_addr;
622  
623  		/* since we're clearing a batch, skip the deferred map */
624  		this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
625  		if (!addr) {
626  			addr = this_addr;
627  		} else if (addr != this_addr) {
628  			atomic_long_andnot(mask, (atomic_long_t *) addr);
629  			mask = 0;
630  			addr = this_addr;
631  		}
632  		mask |= (1UL << SB_NR_TO_BIT(sb, tag));
633  	}
634  
635  	if (mask)
636  		atomic_long_andnot(mask, (atomic_long_t *) addr);
637  
638  	smp_mb__after_atomic();
639  	sbitmap_queue_wake_up(sbq, nr_tags);
640  	sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
641  					tags[nr_tags - 1] - offset);
642  }
643  
sbitmap_queue_clear(struct sbitmap_queue * sbq,unsigned int nr,unsigned int cpu)644  void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
645  			 unsigned int cpu)
646  {
647  	/*
648  	 * Once the clear bit is set, the bit may be allocated out.
649  	 *
650  	 * Orders READ/WRITE on the associated instance(such as request
651  	 * of blk_mq) by this bit for avoiding race with re-allocation,
652  	 * and its pair is the memory barrier implied in __sbitmap_get_word.
653  	 *
654  	 * One invariant is that the clear bit has to be zero when the bit
655  	 * is in use.
656  	 */
657  	smp_mb__before_atomic();
658  	sbitmap_deferred_clear_bit(&sbq->sb, nr);
659  
660  	/*
661  	 * Pairs with the memory barrier in set_current_state() to ensure the
662  	 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
663  	 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
664  	 * waiter. See the comment on waitqueue_active().
665  	 */
666  	smp_mb__after_atomic();
667  	sbitmap_queue_wake_up(sbq, 1);
668  	sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
669  }
670  EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
671  
sbitmap_queue_wake_all(struct sbitmap_queue * sbq)672  void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
673  {
674  	int i, wake_index;
675  
676  	/*
677  	 * Pairs with the memory barrier in set_current_state() like in
678  	 * sbitmap_queue_wake_up().
679  	 */
680  	smp_mb();
681  	wake_index = atomic_read(&sbq->wake_index);
682  	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
683  		struct sbq_wait_state *ws = &sbq->ws[wake_index];
684  
685  		if (waitqueue_active(&ws->wait))
686  			wake_up(&ws->wait);
687  
688  		wake_index = sbq_index_inc(wake_index);
689  	}
690  }
691  EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
692  
sbitmap_queue_show(struct sbitmap_queue * sbq,struct seq_file * m)693  void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
694  {
695  	bool first;
696  	int i;
697  
698  	sbitmap_show(&sbq->sb, m);
699  
700  	seq_puts(m, "alloc_hint={");
701  	first = true;
702  	for_each_possible_cpu(i) {
703  		if (!first)
704  			seq_puts(m, ", ");
705  		first = false;
706  		seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
707  	}
708  	seq_puts(m, "}\n");
709  
710  	seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
711  	seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
712  	seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
713  
714  	seq_puts(m, "ws={\n");
715  	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
716  		struct sbq_wait_state *ws = &sbq->ws[i];
717  		seq_printf(m, "\t{.wait=%s},\n",
718  			   waitqueue_active(&ws->wait) ? "active" : "inactive");
719  	}
720  	seq_puts(m, "}\n");
721  
722  	seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
723  	seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
724  }
725  EXPORT_SYMBOL_GPL(sbitmap_queue_show);
726  
sbitmap_add_wait_queue(struct sbitmap_queue * sbq,struct sbq_wait_state * ws,struct sbq_wait * sbq_wait)727  void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
728  			    struct sbq_wait_state *ws,
729  			    struct sbq_wait *sbq_wait)
730  {
731  	if (!sbq_wait->sbq) {
732  		sbq_wait->sbq = sbq;
733  		atomic_inc(&sbq->ws_active);
734  		add_wait_queue(&ws->wait, &sbq_wait->wait);
735  	}
736  }
737  EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
738  
sbitmap_del_wait_queue(struct sbq_wait * sbq_wait)739  void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
740  {
741  	list_del_init(&sbq_wait->wait.entry);
742  	if (sbq_wait->sbq) {
743  		atomic_dec(&sbq_wait->sbq->ws_active);
744  		sbq_wait->sbq = NULL;
745  	}
746  }
747  EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
748  
sbitmap_prepare_to_wait(struct sbitmap_queue * sbq,struct sbq_wait_state * ws,struct sbq_wait * sbq_wait,int state)749  void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
750  			     struct sbq_wait_state *ws,
751  			     struct sbq_wait *sbq_wait, int state)
752  {
753  	if (!sbq_wait->sbq) {
754  		atomic_inc(&sbq->ws_active);
755  		sbq_wait->sbq = sbq;
756  	}
757  	prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
758  }
759  EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
760  
sbitmap_finish_wait(struct sbitmap_queue * sbq,struct sbq_wait_state * ws,struct sbq_wait * sbq_wait)761  void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
762  			 struct sbq_wait *sbq_wait)
763  {
764  	finish_wait(&ws->wait, &sbq_wait->wait);
765  	if (sbq_wait->sbq) {
766  		atomic_dec(&sbq->ws_active);
767  		sbq_wait->sbq = NULL;
768  	}
769  }
770  EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
771