diff options
Diffstat (limited to 'lib/sbitmap.c')
| -rw-r--r-- | lib/sbitmap.c | 95 | 
1 files changed, 92 insertions, 3 deletions
| diff --git a/lib/sbitmap.c b/lib/sbitmap.c index b25db9be938a..2709ab825499 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -489,6 +489,57 @@ int __sbitmap_queue_get(struct sbitmap_queue *sbq)  }  EXPORT_SYMBOL_GPL(__sbitmap_queue_get); +unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, +					unsigned int *offset) +{ +	struct sbitmap *sb = &sbq->sb; +	unsigned int hint, depth; +	unsigned long index, nr; +	int i; + +	if (unlikely(sb->round_robin)) +		return 0; + +	depth = READ_ONCE(sb->depth); +	hint = update_alloc_hint_before_get(sb, depth); + +	index = SB_NR_TO_INDEX(sb, hint); + +	for (i = 0; i < sb->map_nr; i++) { +		struct sbitmap_word *map = &sb->map[index]; +		unsigned long get_mask; + +		sbitmap_deferred_clear(map); +		if (map->word == (1UL << (map->depth - 1)) - 1) +			continue; + +		nr = find_first_zero_bit(&map->word, map->depth); +		if (nr + nr_tags <= map->depth) { +			atomic_long_t *ptr = (atomic_long_t *) &map->word; +			int map_tags = min_t(int, nr_tags, map->depth); +			unsigned long val, ret; + +			get_mask = ((1UL << map_tags) - 1) << nr; +			do { +				val = READ_ONCE(map->word); +				ret = atomic_long_cmpxchg(ptr, val, get_mask | val); +			} while (ret != val); +			get_mask = (get_mask & ~ret) >> nr; +			if (get_mask) { +				*offset = nr + (index << sb->shift); +				update_alloc_hint_after_get(sb, depth, hint, +							*offset + map_tags - 1); +				return get_mask; +			} +		} +		/* Jump to next index. */ +		if (++index >= sb->map_nr) +			index = 0; +	} + +	return 0; +} +  int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,  				unsigned int shallow_depth)  { @@ -577,6 +628,46 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)  }  EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); +static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag) +{ +	if (likely(!sb->round_robin && tag < sb->depth)) +		data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag); +} + +void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, +				int *tags, int nr_tags) +{ +	struct sbitmap *sb = &sbq->sb; +	unsigned long *addr = NULL; +	unsigned long mask = 0; +	int i; + +	smp_mb__before_atomic(); +	for (i = 0; i < nr_tags; i++) { +		const int tag = tags[i] - offset; +		unsigned long *this_addr; + +		/* since we're clearing a batch, skip the deferred map */ +		this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word; +		if (!addr) { +			addr = this_addr; +		} else if (addr != this_addr) { +			atomic_long_andnot(mask, (atomic_long_t *) addr); +			mask = 0; +			addr = this_addr; +		} +		mask |= (1UL << SB_NR_TO_BIT(sb, tag)); +	} + +	if (mask) +		atomic_long_andnot(mask, (atomic_long_t *) addr); + +	smp_mb__after_atomic(); +	sbitmap_queue_wake_up(sbq); +	sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), +					tags[nr_tags - 1] - offset); +} +  void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,  			 unsigned int cpu)  { @@ -601,9 +692,7 @@ void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,  	 */  	smp_mb__after_atomic();  	sbitmap_queue_wake_up(sbq); - -	if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth)) -		*per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr; +	sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);  }  EXPORT_SYMBOL_GPL(sbitmap_queue_clear); |