aboutsummaryrefslogtreecommitdiffstats
path: root/lib/sbitmap.c
diff options
context:
space:
mode:
authorYu Kuai <yukuai3@huawei.com>2025-08-07 11:24:12 +0800
committerJens Axboe <axboe@kernel.dk>2025-08-07 06:30:17 -0600
commit42e6c6ce03fd3e41e39a0f93f9b1a1d9fa664338 (patch)
treeb58739caedd28044ba06837ddb6461c2fc3e73dc /lib/sbitmap.c
parent80f21806b8e34ae1e24c0fc6a0f0dfd9b055e130 (diff)
downloadlinux-42e6c6ce03fd3e41e39a0f93f9b1a1d9fa664338.tar.gz
lib/sbitmap: convert shallow_depth from one word to the whole sbitmap
Currently elevators will record internal 'async_depth' to throttle asynchronous requests, and they both calculate shallow_dpeth based on sb->shift, with the respect that sb->shift is the available tags in one word. However, sb->shift is not the availbale tags in the last word, see __map_depth: if (index == sb->map_nr - 1) return sb->depth - (index << sb->shift); For consequence, if the last word is used, more tags can be get than expected, for example, assume nr_requests=256 and there are four words, in the worst case if user set nr_requests=32, then the first word is the last word, and still use bits per word, which is 64, to calculate async_depth is wrong. One the ohter hand, due to cgroup qos, bfq can allow only one request to be allocated, and set shallow_dpeth=1 will still allow the number of words request to be allocated. Fix this problems by using shallow_depth to the whole sbitmap instead of per word, also change kyber, mq-deadline and bfq to follow this, a new helper __map_depth_with_shallow() is introduced to calculate available bits in each word. Signed-off-by: Yu Kuai <yukuai3@huawei.com> Link: https://lore.kernel.org/r/20250807032413.1469456-2-yukuai1@huaweicloud.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'lib/sbitmap.c')
-rw-r--r--lib/sbitmap.c56
1 files changed, 29 insertions, 27 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index d3412984170c03..c07e3cd82e29d7 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -208,8 +208,28 @@ static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
return nr;
}
+static unsigned int __map_depth_with_shallow(const struct sbitmap *sb,
+ int index,
+ unsigned int shallow_depth)
+{
+ u64 shallow_word_depth;
+ unsigned int word_depth, reminder;
+
+ word_depth = __map_depth(sb, index);
+ if (shallow_depth >= sb->depth)
+ return word_depth;
+
+ shallow_word_depth = word_depth * shallow_depth;
+ reminder = do_div(shallow_word_depth, sb->depth);
+
+ if (reminder >= (index + 1) * word_depth)
+ shallow_word_depth++;
+
+ return (unsigned int)shallow_word_depth;
+}
+
static int sbitmap_find_bit(struct sbitmap *sb,
- unsigned int depth,
+ unsigned int shallow_depth,
unsigned int index,
unsigned int alloc_hint,
bool wrap)
@@ -218,12 +238,12 @@ static int sbitmap_find_bit(struct sbitmap *sb,
int nr = -1;
for (i = 0; i < sb->map_nr; i++) {
- nr = sbitmap_find_bit_in_word(&sb->map[index],
- min_t(unsigned int,
- __map_depth(sb, index),
- depth),
- alloc_hint, wrap);
+ unsigned int depth = __map_depth_with_shallow(sb, index,
+ shallow_depth);
+ if (depth)
+ nr = sbitmap_find_bit_in_word(&sb->map[index], depth,
+ alloc_hint, wrap);
if (nr != -1) {
nr += index << sb->shift;
break;
@@ -406,27 +426,9 @@ EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
{
- unsigned int wake_batch;
- unsigned int shallow_depth;
-
- /*
- * Each full word of the bitmap has bits_per_word bits, and there might
- * be a partial word. There are depth / bits_per_word full words and
- * depth % bits_per_word bits left over. In bitwise arithmetic:
- *
- * bits_per_word = 1 << shift
- * depth / bits_per_word = depth >> shift
- * depth % bits_per_word = depth & ((1 << shift) - 1)
- *
- * Each word can be limited to sbq->min_shallow_depth bits.
- */
- shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
- depth = ((depth >> sbq->sb.shift) * shallow_depth +
- min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
- wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
- SBQ_WAKE_BATCH);
-
- return wake_batch;
+ return clamp_t(unsigned int,
+ min(depth, sbq->min_shallow_depth) / SBQ_WAIT_QUEUES,
+ 1, SBQ_WAKE_BATCH);
}
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,