blk-mq-tag.c 11.3 KB
Newer Older
1
/*
2 3 4
 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
 * fairer distribution of tags between multiple submitters when a shared tag map
 * is used.
5 6 7
 *
 * Copyright (C) 2013-2014 Jens Axboe
 */
8 9 10 11 12 13 14 15 16 17
#include <linux/kernel.h>
#include <linux/module.h>

#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"

bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
{
18 19 20
	if (!tags)
		return true;

21
	return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
22 23 24 25
}

/*
 * If a previously inactive queue goes active, bump the active user count.
26 27 28
 * We need to do this before try to allocate driver tag, then even if fail
 * to get tag when first time, the other shared-tag users could reserve
 * budget for it.
29 30 31 32 33 34 35 36 37 38 39
 */
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		atomic_inc(&hctx->tags->active_queues);

	return true;
}

/*
40
 * Wakeup all potentially sleeping on tags
41
 */
42
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
43
{
44 45 46
	sbitmap_queue_wake_all(&tags->bitmap_tags);
	if (include_reserve)
		sbitmap_queue_wake_all(&tags->breserved_tags);
47 48
}

49 50 51 52 53 54 55 56 57 58 59 60 61
/*
 * If a previously busy queue goes inactive, potential waiters could now
 * be allowed to queue. Wake them up and check.
 */
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
	struct blk_mq_tags *tags = hctx->tags;

	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		return;

	atomic_dec(&tags->active_queues);

62
	blk_mq_tag_wakeup_all(tags, false);
63 64
}

65 66 67 68 69
/*
 * For shared tag users, we track the number of currently active users
 * and attempt to provide a fair share of the tag depth for each of them.
 */
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
70
				  struct sbitmap_queue *bt)
71 72 73 74 75 76 77 78 79 80 81
{
	unsigned int depth, users;

	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
		return true;
	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		return true;

	/*
	 * Don't try dividing an ant
	 */
82
	if (bt->sb.depth == 1)
83 84 85 86 87 88 89 90 91
		return true;

	users = atomic_read(&hctx->tags->active_queues);
	if (!users)
		return true;

	/*
	 * Allow at least some tags
	 */
92
	depth = max((bt->sb.depth + users - 1) / users, 4U);
93 94 95
	return atomic_read(&hctx->nr_active) < depth;
}

96 97
static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
			    struct sbitmap_queue *bt)
98
{
99 100
	if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
	    !hctx_may_queue(data->hctx, bt))
101
		return -1;
102 103 104 105
	if (data->shallow_depth)
		return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
	else
		return __sbitmap_queue_get(bt);
106 107
}

108
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
109
{
110 111
	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
	struct sbitmap_queue *bt;
112
	struct sbq_wait_state *ws;
113
	DEFINE_WAIT(wait);
114
	unsigned int tag_offset;
115
	bool drop_ctx;
116 117
	int tag;

118 119 120 121 122 123 124 125 126 127 128 129
	if (data->flags & BLK_MQ_REQ_RESERVED) {
		if (unlikely(!tags->nr_reserved_tags)) {
			WARN_ON_ONCE(1);
			return BLK_MQ_TAG_FAIL;
		}
		bt = &tags->breserved_tags;
		tag_offset = 0;
	} else {
		bt = &tags->bitmap_tags;
		tag_offset = tags->nr_reserved_tags;
	}

130
	tag = __blk_mq_get_tag(data, bt);
131
	if (tag != -1)
132
		goto found_tag;
133

134
	if (data->flags & BLK_MQ_REQ_NOWAIT)
135
		return BLK_MQ_TAG_FAIL;
136

137
	ws = bt_wait_ptr(bt, data->hctx);
138
	drop_ctx = data->ctx == NULL;
139
	do {
140 141
		struct sbitmap_queue *bt_prev;

142 143 144
		/*
		 * We're out of tags on this hardware queue, kick any
		 * pending IO submits before going to sleep waiting for
145
		 * some to complete.
146
		 */
147
		blk_mq_run_hw_queue(data->hctx, false);
148

149 150 151 152
		/*
		 * Retry tag allocation after running the hardware queue,
		 * as running the queue may also have found completions.
		 */
153
		tag = __blk_mq_get_tag(data, bt);
154 155 156
		if (tag != -1)
			break;

157 158 159 160 161 162 163
		prepare_to_wait_exclusive(&ws->wait, &wait,
						TASK_UNINTERRUPTIBLE);

		tag = __blk_mq_get_tag(data, bt);
		if (tag != -1)
			break;

164 165
		if (data->ctx)
			blk_mq_put_ctx(data->ctx);
166

167
		bt_prev = bt;
168
		io_schedule();
169 170

		data->ctx = blk_mq_get_ctx(data->q);
171
		data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
172 173 174 175 176 177
		tags = blk_mq_tags_from_data(data);
		if (data->flags & BLK_MQ_REQ_RESERVED)
			bt = &tags->breserved_tags;
		else
			bt = &tags->bitmap_tags;

178
		finish_wait(&ws->wait, &wait);
179 180 181 182 183 184 185 186 187

		/*
		 * If destination hw queue is changed, fake wake up on
		 * previous queue for compensating the wake up miss, so
		 * other allocations on previous queue won't be starved.
		 */
		if (bt != bt_prev)
			sbitmap_queue_wake_up(bt_prev);

188
		ws = bt_wait_ptr(bt, data->hctx);
189 190
	} while (1);

191 192 193
	if (drop_ctx && data->ctx)
		blk_mq_put_ctx(data->ctx);

194
	finish_wait(&ws->wait, &wait);
195

196 197
found_tag:
	return tag + tag_offset;
198 199
}

200 201
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
		    struct blk_mq_ctx *ctx, unsigned int tag)
202
{
203
	if (!blk_mq_tag_is_reserved(tags, tag)) {
204 205
		const int real_tag = tag - tags->nr_reserved_tags;

206
		BUG_ON(real_tag >= tags->nr_tags);
207
		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
208 209
	} else {
		BUG_ON(tag >= tags->nr_reserved_tags);
210
		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
211
	}
212 213
}

214 215 216 217 218 219 220 221
struct bt_iter_data {
	struct blk_mq_hw_ctx *hctx;
	busy_iter_fn *fn;
	void *data;
	bool reserved;
};

static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
222
{
223 224 225 226
	struct bt_iter_data *iter_data = data;
	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
	struct blk_mq_tags *tags = hctx->tags;
	bool reserved = iter_data->reserved;
227
	struct request *rq;
228

229 230 231
	if (!reserved)
		bitnr += tags->nr_reserved_tags;
	rq = tags->rqs[bitnr];
232

233 234 235 236 237
	/*
	 * We can hit rq == NULL here, because the tagging functions
	 * test and set the bit before assining ->rqs[].
	 */
	if (rq && rq->q == hctx->queue)
238 239 240
		iter_data->fn(hctx, rq, iter_data->data, reserved);
	return true;
}
241

242 243 244 245 246 247 248 249 250 251 252
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
			busy_iter_fn *fn, void *data, bool reserved)
{
	struct bt_iter_data iter_data = {
		.hctx = hctx,
		.fn = fn,
		.data = data,
		.reserved = reserved,
	};

	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
253 254
}

255 256 257 258 259 260 261 262
struct bt_tags_iter_data {
	struct blk_mq_tags *tags;
	busy_tag_iter_fn *fn;
	void *data;
	bool reserved;
};

static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
263
{
264 265 266
	struct bt_tags_iter_data *iter_data = data;
	struct blk_mq_tags *tags = iter_data->tags;
	bool reserved = iter_data->reserved;
267 268
	struct request *rq;

269 270
	if (!reserved)
		bitnr += tags->nr_reserved_tags;
271 272 273 274 275

	/*
	 * We can hit rq == NULL here, because the tagging functions
	 * test and set the bit before assining ->rqs[].
	 */
276
	rq = tags->rqs[bitnr];
277
	if (rq && blk_mq_request_started(rq))
278
		iter_data->fn(rq, iter_data->data, reserved);
279

280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
	return true;
}

static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
			     busy_tag_iter_fn *fn, void *data, bool reserved)
{
	struct bt_tags_iter_data iter_data = {
		.tags = tags,
		.fn = fn,
		.data = data,
		.reserved = reserved,
	};

	if (tags->rqs)
		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
295 296
}

297 298
static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
		busy_tag_iter_fn *fn, void *priv)
299 300
{
	if (tags->nr_reserved_tags)
301 302
		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
303 304
}

305 306 307 308 309 310 311 312 313 314 315 316
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
		busy_tag_iter_fn *fn, void *priv)
{
	int i;

	for (i = 0; i < tagset->nr_hw_queues; i++) {
		if (tagset->tags && tagset->tags[i])
			blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
	}
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);

317
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
318
		void *priv)
319
{
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
	struct blk_mq_hw_ctx *hctx;
	int i;


	queue_for_each_hw_ctx(q, hctx, i) {
		struct blk_mq_tags *tags = hctx->tags;

		/*
		 * If not software queues are currently mapped to this
		 * hardware queue, there's nothing to check
		 */
		if (!blk_mq_hw_queue_mapped(hctx))
			continue;

		if (tags->nr_reserved_tags)
335 336
			bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
		bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
337 338 339 340
	}

}

341 342
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
		    bool round_robin, int node)
343
{
344 345
	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
				       node);
346 347 348
}

static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
349
						   int node, int alloc_policy)
350 351
{
	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
352
	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
353

354
	if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
355
		goto free_tags;
356 357
	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
		     node))
358
		goto free_bitmap_tags;
359 360

	return tags;
361 362 363
free_bitmap_tags:
	sbitmap_queue_free(&tags->bitmap_tags);
free_tags:
364 365 366 367
	kfree(tags);
	return NULL;
}

368
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
369 370
				     unsigned int reserved_tags,
				     int node, int alloc_policy)
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
{
	struct blk_mq_tags *tags;

	if (total_tags > BLK_MQ_TAG_MAX) {
		pr_err("blk-mq: tag depth too large\n");
		return NULL;
	}

	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
	if (!tags)
		return NULL;

	tags->nr_tags = total_tags;
	tags->nr_reserved_tags = reserved_tags;

386
	return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
387 388 389 390
}

void blk_mq_free_tags(struct blk_mq_tags *tags)
{
391 392
	sbitmap_queue_free(&tags->bitmap_tags);
	sbitmap_queue_free(&tags->breserved_tags);
393 394 395
	kfree(tags);
}

396 397 398
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
			    bool can_grow)
399
{
400 401 402
	struct blk_mq_tags *tags = *tagsptr;

	if (tdepth <= tags->nr_reserved_tags)
403 404 405
		return -EINVAL;

	/*
406 407
	 * If we are allowed to grow beyond the original size, allocate
	 * a new set of tags before freeing the old one.
408
	 */
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
	if (tdepth > tags->nr_tags) {
		struct blk_mq_tag_set *set = hctx->queue->tag_set;
		struct blk_mq_tags *new;
		bool ret;

		if (!can_grow)
			return -EINVAL;

		/*
		 * We need some sort of upper limit, set it high enough that
		 * no valid use cases should require more.
		 */
		if (tdepth > 16 * BLKDEV_MAX_RQ)
			return -EINVAL;

424 425
		new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
				tags->nr_reserved_tags);
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
		if (!new)
			return -ENOMEM;
		ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
		if (ret) {
			blk_mq_free_rq_map(new);
			return -ENOMEM;
		}

		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
		blk_mq_free_rq_map(*tagsptr);
		*tagsptr = new;
	} else {
		/*
		 * Don't need (or can't) update reserved tags here, they
		 * remain static and should never need resizing.
		 */
442 443
		sbitmap_queue_resize(&tags->bitmap_tags,
				tdepth - tags->nr_reserved_tags);
444
	}
445

446 447 448
	return 0;
}

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
/**
 * blk_mq_unique_tag() - return a tag that is unique queue-wide
 * @rq: request for which to compute a unique tag
 *
 * The tag field in struct request is unique per hardware queue but not over
 * all hardware queues. Hence this function that returns a tag with the
 * hardware context index in the upper bits and the per hardware queue tag in
 * the lower bits.
 *
 * Note: When called for a request that is queued on a non-multiqueue request
 * queue, the hardware context index is set to zero.
 */
u32 blk_mq_unique_tag(struct request *rq)
{
	struct request_queue *q = rq->q;
	struct blk_mq_hw_ctx *hctx;
	int hwq = 0;

	if (q->mq_ops) {
468
		hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
469 470 471 472 473 474 475
		hwq = hctx->queue_num;
	}

	return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
}
EXPORT_SYMBOL(blk_mq_unique_tag);