blk-mq-tag.c 11.2 KB
Newer Older
1
/*
2 3 4
 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
 * fairer distribution of tags between multiple submitters when a shared tag map
 * is used.
5 6 7
 *
 * Copyright (C) 2013-2014 Jens Axboe
 */
8 9 10 11 12 13 14 15 16 17
#include <linux/kernel.h>
#include <linux/module.h>

#include <linux/blk-mq.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"

bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
{
18 19 20
	if (!tags)
		return true;

21
	return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
}

/*
 * If a previously inactive queue goes active, bump the active user count.
 */
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		atomic_inc(&hctx->tags->active_queues);

	return true;
}

/*
37
 * Wakeup all potentially sleeping on tags
38
 */
39
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
40
{
41 42 43
	sbitmap_queue_wake_all(&tags->bitmap_tags);
	if (include_reserve)
		sbitmap_queue_wake_all(&tags->breserved_tags);
44 45
}

46 47 48 49 50 51 52 53 54 55 56 57 58
/*
 * If a previously busy queue goes inactive, potential waiters could now
 * be allowed to queue. Wake them up and check.
 */
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
	struct blk_mq_tags *tags = hctx->tags;

	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		return;

	atomic_dec(&tags->active_queues);

59
	blk_mq_tag_wakeup_all(tags, false);
60 61
}

62 63 64 65 66
/*
 * For shared tag users, we track the number of currently active users
 * and attempt to provide a fair share of the tag depth for each of them.
 */
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
67
				  struct sbitmap_queue *bt)
68 69 70 71 72 73 74 75 76 77 78
{
	unsigned int depth, users;

	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
		return true;
	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
		return true;

	/*
	 * Don't try dividing an ant
	 */
79
	if (bt->sb.depth == 1)
80 81 82 83 84 85 86 87 88
		return true;

	users = atomic_read(&hctx->tags->active_queues);
	if (!users)
		return true;

	/*
	 * Allow at least some tags
	 */
89
	depth = max((bt->sb.depth + users - 1) / users, 4U);
90 91 92
	return atomic_read(&hctx->nr_active) < depth;
}

93 94
static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
			    struct sbitmap_queue *bt)
95
{
96 97
	if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
	    !hctx_may_queue(data->hctx, bt))
98
		return -1;
99 100 101 102
	if (data->shallow_depth)
		return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
	else
		return __sbitmap_queue_get(bt);
103 104
}

105
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
106
{
107 108
	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
	struct sbitmap_queue *bt;
109
	struct sbq_wait_state *ws;
110
	DEFINE_WAIT(wait);
111
	unsigned int tag_offset;
112
	bool drop_ctx;
113 114
	int tag;

115 116 117 118 119 120 121 122 123 124 125 126
	if (data->flags & BLK_MQ_REQ_RESERVED) {
		if (unlikely(!tags->nr_reserved_tags)) {
			WARN_ON_ONCE(1);
			return BLK_MQ_TAG_FAIL;
		}
		bt = &tags->breserved_tags;
		tag_offset = 0;
	} else {
		bt = &tags->bitmap_tags;
		tag_offset = tags->nr_reserved_tags;
	}

127
	tag = __blk_mq_get_tag(data, bt);
128
	if (tag != -1)
129
		goto found_tag;
130

131
	if (data->flags & BLK_MQ_REQ_NOWAIT)
132
		return BLK_MQ_TAG_FAIL;
133

134
	ws = bt_wait_ptr(bt, data->hctx);
135
	drop_ctx = data->ctx == NULL;
136
	do {
137 138
		struct sbitmap_queue *bt_prev;

139 140 141
		/*
		 * We're out of tags on this hardware queue, kick any
		 * pending IO submits before going to sleep waiting for
142
		 * some to complete.
143
		 */
144
		blk_mq_run_hw_queue(data->hctx, false);
145

146 147 148 149
		/*
		 * Retry tag allocation after running the hardware queue,
		 * as running the queue may also have found completions.
		 */
150
		tag = __blk_mq_get_tag(data, bt);
151 152 153
		if (tag != -1)
			break;

154 155 156 157 158 159 160
		prepare_to_wait_exclusive(&ws->wait, &wait,
						TASK_UNINTERRUPTIBLE);

		tag = __blk_mq_get_tag(data, bt);
		if (tag != -1)
			break;

161 162
		if (data->ctx)
			blk_mq_put_ctx(data->ctx);
163

164
		bt_prev = bt;
165
		io_schedule();
166 167

		data->ctx = blk_mq_get_ctx(data->q);
168
		data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
169 170 171 172 173 174
		tags = blk_mq_tags_from_data(data);
		if (data->flags & BLK_MQ_REQ_RESERVED)
			bt = &tags->breserved_tags;
		else
			bt = &tags->bitmap_tags;

175
		finish_wait(&ws->wait, &wait);
176 177 178 179 180 181 182 183 184

		/*
		 * If destination hw queue is changed, fake wake up on
		 * previous queue for compensating the wake up miss, so
		 * other allocations on previous queue won't be starved.
		 */
		if (bt != bt_prev)
			sbitmap_queue_wake_up(bt_prev);

185
		ws = bt_wait_ptr(bt, data->hctx);
186 187
	} while (1);

188 189 190
	if (drop_ctx && data->ctx)
		blk_mq_put_ctx(data->ctx);

191
	finish_wait(&ws->wait, &wait);
192

193 194
found_tag:
	return tag + tag_offset;
195 196
}

197 198
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
		    struct blk_mq_ctx *ctx, unsigned int tag)
199
{
200
	if (!blk_mq_tag_is_reserved(tags, tag)) {
201 202
		const int real_tag = tag - tags->nr_reserved_tags;

203
		BUG_ON(real_tag >= tags->nr_tags);
204
		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
205 206
	} else {
		BUG_ON(tag >= tags->nr_reserved_tags);
207
		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
208
	}
209 210
}

211 212 213 214 215 216 217 218
struct bt_iter_data {
	struct blk_mq_hw_ctx *hctx;
	busy_iter_fn *fn;
	void *data;
	bool reserved;
};

static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
219
{
220 221 222 223
	struct bt_iter_data *iter_data = data;
	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
	struct blk_mq_tags *tags = hctx->tags;
	bool reserved = iter_data->reserved;
224
	struct request *rq;
225

226 227 228
	if (!reserved)
		bitnr += tags->nr_reserved_tags;
	rq = tags->rqs[bitnr];
229

230 231 232 233 234
	/*
	 * We can hit rq == NULL here, because the tagging functions
	 * test and set the bit before assining ->rqs[].
	 */
	if (rq && rq->q == hctx->queue)
235 236 237
		iter_data->fn(hctx, rq, iter_data->data, reserved);
	return true;
}
238

239 240 241 242 243 244 245 246 247 248 249
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
			busy_iter_fn *fn, void *data, bool reserved)
{
	struct bt_iter_data iter_data = {
		.hctx = hctx,
		.fn = fn,
		.data = data,
		.reserved = reserved,
	};

	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
250 251
}

252 253 254 255 256 257 258 259
struct bt_tags_iter_data {
	struct blk_mq_tags *tags;
	busy_tag_iter_fn *fn;
	void *data;
	bool reserved;
};

static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
260
{
261 262 263
	struct bt_tags_iter_data *iter_data = data;
	struct blk_mq_tags *tags = iter_data->tags;
	bool reserved = iter_data->reserved;
264 265
	struct request *rq;

266 267
	if (!reserved)
		bitnr += tags->nr_reserved_tags;
268 269 270 271 272

	/*
	 * We can hit rq == NULL here, because the tagging functions
	 * test and set the bit before assining ->rqs[].
	 */
273
	rq = tags->rqs[bitnr];
274
	if (rq && blk_mq_request_started(rq))
275
		iter_data->fn(rq, iter_data->data, reserved);
276

277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
	return true;
}

static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
			     busy_tag_iter_fn *fn, void *data, bool reserved)
{
	struct bt_tags_iter_data iter_data = {
		.tags = tags,
		.fn = fn,
		.data = data,
		.reserved = reserved,
	};

	if (tags->rqs)
		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
292 293
}

294 295
static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
		busy_tag_iter_fn *fn, void *priv)
296 297
{
	if (tags->nr_reserved_tags)
298 299
		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
300 301
}

302 303 304 305 306 307 308 309 310 311 312 313
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
		busy_tag_iter_fn *fn, void *priv)
{
	int i;

	for (i = 0; i < tagset->nr_hw_queues; i++) {
		if (tagset->tags && tagset->tags[i])
			blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
	}
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);

314
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
315
		void *priv)
316
{
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
	struct blk_mq_hw_ctx *hctx;
	int i;


	queue_for_each_hw_ctx(q, hctx, i) {
		struct blk_mq_tags *tags = hctx->tags;

		/*
		 * If not software queues are currently mapped to this
		 * hardware queue, there's nothing to check
		 */
		if (!blk_mq_hw_queue_mapped(hctx))
			continue;

		if (tags->nr_reserved_tags)
332 333
			bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
		bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
334 335 336 337
	}

}

338 339
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
		    bool round_robin, int node)
340
{
341 342
	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
				       node);
343 344 345
}

static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
346
						   int node, int alloc_policy)
347 348
{
	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
349
	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
350

351
	if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
352
		goto free_tags;
353 354
	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
		     node))
355
		goto free_bitmap_tags;
356 357

	return tags;
358 359 360
free_bitmap_tags:
	sbitmap_queue_free(&tags->bitmap_tags);
free_tags:
361 362 363 364
	kfree(tags);
	return NULL;
}

365
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
366 367
				     unsigned int reserved_tags,
				     int node, int alloc_policy)
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
{
	struct blk_mq_tags *tags;

	if (total_tags > BLK_MQ_TAG_MAX) {
		pr_err("blk-mq: tag depth too large\n");
		return NULL;
	}

	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
	if (!tags)
		return NULL;

	tags->nr_tags = total_tags;
	tags->nr_reserved_tags = reserved_tags;

383
	return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
384 385 386 387
}

void blk_mq_free_tags(struct blk_mq_tags *tags)
{
388 389
	sbitmap_queue_free(&tags->bitmap_tags);
	sbitmap_queue_free(&tags->breserved_tags);
390 391 392
	kfree(tags);
}

393 394 395
int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
			    bool can_grow)
396
{
397 398 399
	struct blk_mq_tags *tags = *tagsptr;

	if (tdepth <= tags->nr_reserved_tags)
400 401
		return -EINVAL;

402 403
	tdepth -= tags->nr_reserved_tags;

404
	/*
405 406
	 * If we are allowed to grow beyond the original size, allocate
	 * a new set of tags before freeing the old one.
407
	 */
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
	if (tdepth > tags->nr_tags) {
		struct blk_mq_tag_set *set = hctx->queue->tag_set;
		struct blk_mq_tags *new;
		bool ret;

		if (!can_grow)
			return -EINVAL;

		/*
		 * We need some sort of upper limit, set it high enough that
		 * no valid use cases should require more.
		 */
		if (tdepth > 16 * BLKDEV_MAX_RQ)
			return -EINVAL;

		new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
		if (!new)
			return -ENOMEM;
		ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
		if (ret) {
			blk_mq_free_rq_map(new);
			return -ENOMEM;
		}

		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
		blk_mq_free_rq_map(*tagsptr);
		*tagsptr = new;
	} else {
		/*
		 * Don't need (or can't) update reserved tags here, they
		 * remain static and should never need resizing.
		 */
		sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
	}
442

443 444 445
	return 0;
}

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
/**
 * blk_mq_unique_tag() - return a tag that is unique queue-wide
 * @rq: request for which to compute a unique tag
 *
 * The tag field in struct request is unique per hardware queue but not over
 * all hardware queues. Hence this function that returns a tag with the
 * hardware context index in the upper bits and the per hardware queue tag in
 * the lower bits.
 *
 * Note: When called for a request that is queued on a non-multiqueue request
 * queue, the hardware context index is set to zero.
 */
u32 blk_mq_unique_tag(struct request *rq)
{
	struct request_queue *q = rq->q;
	struct blk_mq_hw_ctx *hctx;
	int hwq = 0;

	if (q->mq_ops) {
465
		hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
466 467 468 469 470 471 472
		hwq = hctx->queue_num;
	}

	return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
}
EXPORT_SYMBOL(blk_mq_unique_tag);