blk-sysfs.c 25.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5
/*
 * Functions related to sysfs handling
 */
#include <linux/kernel.h>
6
#include <linux/slab.h>
7 8 9
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
10
#include <linux/backing-dev.h>
11
#include <linux/blktrace_api.h>
12
#include <linux/blk-mq.h>
13
#include <linux/blk-cgroup.h>
14 15

#include "blk.h"
16
#include "blk-mq.h"
17
#include "blk-mq-debugfs.h"
18
#include "blk-wbt.h"
19 20 21 22 23 24 25 26

struct queue_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct request_queue *, char *);
	ssize_t (*store)(struct request_queue *, const char *, size_t);
};

static ssize_t
27
queue_var_show(unsigned long var, char *page)
28
{
29
	return sprintf(page, "%lu\n", var);
30 31 32 33 34
}

static ssize_t
queue_var_store(unsigned long *var, const char *page, size_t count)
{
35 36 37
	int err;
	unsigned long v;

38
	err = kstrtoul(page, 10, &v);
39 40 41 42
	if (err || v > UINT_MAX)
		return -EINVAL;

	*var = v;
43 44 45 46

	return count;
}

47
static ssize_t queue_var_store64(s64 *var, const char *page)
48 49
{
	int err;
50
	s64 v;
51

52
	err = kstrtos64(page, 10, &v);
53 54 55 56 57 58 59
	if (err < 0)
		return err;

	*var = v;
	return 0;
}

60 61 62 63 64 65 66 67 68
static ssize_t queue_requests_show(struct request_queue *q, char *page)
{
	return queue_var_show(q->nr_requests, (page));
}

static ssize_t
queue_requests_store(struct request_queue *q, const char *page, size_t count)
{
	unsigned long nr;
69
	int ret, err;
70

71
	if (!queue_is_mq(q))
72 73 74
		return -EINVAL;

	ret = queue_var_store(&nr, page, count);
75 76 77
	if (ret < 0)
		return ret;

78 79 80
	if (nr < BLKDEV_MIN_RQ)
		nr = BLKDEV_MIN_RQ;

81
	err = blk_mq_update_nr_requests(q, nr);
82 83 84
	if (err)
		return err;

85 86 87 88 89
	return ret;
}

static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
90
	unsigned long ra_kb = q->backing_dev_info->ra_pages <<
91
					(PAGE_SHIFT - 10);
92 93 94 95 96 97 98 99 100 101

	return queue_var_show(ra_kb, (page));
}

static ssize_t
queue_ra_store(struct request_queue *q, const char *page, size_t count)
{
	unsigned long ra_kb;
	ssize_t ret = queue_var_store(&ra_kb, page, count);

102 103 104
	if (ret < 0)
		return ret;

105
	q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
106 107 108 109 110 111

	return ret;
}

static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
{
112
	int max_sectors_kb = queue_max_sectors(q) >> 1;
113 114 115 116

	return queue_var_show(max_sectors_kb, (page));
}

117 118 119 120 121
static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
{
	return queue_var_show(queue_max_segments(q), (page));
}

122 123 124 125 126 127
static ssize_t queue_max_discard_segments_show(struct request_queue *q,
		char *page)
{
	return queue_var_show(queue_max_discard_segments(q), (page));
}

128 129 130 131 132
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
{
	return queue_var_show(q->limits.max_integrity_segments, (page));
}

133 134
static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
{
135
	return queue_var_show(queue_max_segment_size(q), (page));
136 137
}

138
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
139
{
140
	return queue_var_show(queue_logical_block_size(q), page);
141 142
}

143 144 145 146 147
static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
{
	return queue_var_show(queue_physical_block_size(q), page);
}

148 149 150 151 152
static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
{
	return queue_var_show(q->limits.chunk_sectors, page);
}

153 154 155 156 157 158 159 160
static ssize_t queue_io_min_show(struct request_queue *q, char *page)
{
	return queue_var_show(queue_io_min(q), page);
}

static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
{
	return queue_var_show(queue_io_opt(q), page);
161 162
}

163 164 165 166 167
static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
{
	return queue_var_show(q->limits.discard_granularity, page);
}

168 169 170
static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
{

171 172
	return sprintf(page, "%llu\n",
		(unsigned long long)q->limits.max_hw_discard_sectors << 9);
173 174
}

175 176
static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
{
177 178
	return sprintf(page, "%llu\n",
		       (unsigned long long)q->limits.max_discard_sectors << 9);
179 180
}

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
static ssize_t queue_discard_max_store(struct request_queue *q,
				       const char *page, size_t count)
{
	unsigned long max_discard;
	ssize_t ret = queue_var_store(&max_discard, page, count);

	if (ret < 0)
		return ret;

	if (max_discard & (q->limits.discard_granularity - 1))
		return -EINVAL;

	max_discard >>= 9;
	if (max_discard > UINT_MAX)
		return -EINVAL;

	if (max_discard > q->limits.max_hw_discard_sectors)
		max_discard = q->limits.max_hw_discard_sectors;

	q->limits.max_discard_sectors = max_discard;
	return ret;
}

204 205
static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
{
206
	return queue_var_show(0, page);
207 208
}

209 210 211 212 213 214
static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
{
	return sprintf(page, "%llu\n",
		(unsigned long long)q->limits.max_write_same_sectors << 9);
}

215 216 217 218 219
static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
{
	return sprintf(page, "%llu\n",
		(unsigned long long)q->limits.max_write_zeroes_sectors << 9);
}
220

221 222 223 224
static ssize_t
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
	unsigned long max_sectors_kb,
225
		max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
226
			page_kb = 1 << (PAGE_SHIFT - 10);
227 228
	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);

229 230 231
	if (ret < 0)
		return ret;

232 233 234
	max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
					 q->limits.max_dev_sectors >> 1);

235 236
	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
		return -EINVAL;
237

238
	spin_lock_irq(&q->queue_lock);
239
	q->limits.max_sectors = max_sectors_kb << 1;
240
	q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
241
	spin_unlock_irq(&q->queue_lock);
242 243 244 245 246 247

	return ret;
}

static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
{
248
	int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
249 250 251 252

	return queue_var_show(max_hw_sectors_kb, (page));
}

253 254 255 256 257 258 259 260 261 262 263 264 265 266
#define QUEUE_SYSFS_BIT_FNS(name, flag, neg)				\
static ssize_t								\
queue_show_##name(struct request_queue *q, char *page)			\
{									\
	int bit;							\
	bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);		\
	return queue_var_show(neg ? !bit : bit, page);			\
}									\
static ssize_t								\
queue_store_##name(struct request_queue *q, const char *page, size_t count) \
{									\
	unsigned long val;						\
	ssize_t ret;							\
	ret = queue_var_store(&val, page, count);			\
267 268
	if (ret < 0)							\
		 return ret;						\
269 270 271 272
	if (neg)							\
		val = !val;						\
									\
	if (val)							\
273
		blk_queue_flag_set(QUEUE_FLAG_##flag, q);		\
274
	else								\
275
		blk_queue_flag_clear(QUEUE_FLAG_##flag, q);		\
276
	return ret;							\
277 278
}

279 280 281 282
QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
#undef QUEUE_SYSFS_BIT_FNS
283

284 285 286 287 288 289 290 291 292 293 294 295
static ssize_t queue_zoned_show(struct request_queue *q, char *page)
{
	switch (blk_queue_zoned_model(q)) {
	case BLK_ZONED_HA:
		return sprintf(page, "host-aware\n");
	case BLK_ZONED_HM:
		return sprintf(page, "host-managed\n");
	default:
		return sprintf(page, "none\n");
	}
}

296 297 298 299 300
static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
{
	return queue_var_show(blk_queue_nr_zones(q), page);
}

301 302
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{
303 304
	return queue_var_show((blk_queue_nomerges(q) << 1) |
			       blk_queue_noxmerges(q), page);
305 306 307 308 309 310 311 312
}

static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
				    size_t count)
{
	unsigned long nm;
	ssize_t ret = queue_var_store(&nm, page, count);

313 314 315
	if (ret < 0)
		return ret;

316 317
	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
	blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
318
	if (nm == 2)
319
		blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
320
	else if (nm)
321
		blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
322

323 324 325
	return ret;
}

326 327
static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
{
328
	bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
Dan Williams's avatar
Dan Williams committed
329
	bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
330

Dan Williams's avatar
Dan Williams committed
331
	return queue_var_show(set << force, page);
332 333 334 335 336 337
}

static ssize_t
queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
{
	ssize_t ret = -EINVAL;
338
#ifdef CONFIG_SMP
339 340 341
	unsigned long val;

	ret = queue_var_store(&val, page, count);
342 343 344
	if (ret < 0)
		return ret;

345
	if (val == 2) {
346 347
		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
348
	} else if (val == 1) {
349 350
		blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
351
	} else if (val == 0) {
352 353
		blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
		blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
Dan Williams's avatar
Dan Williams committed
354
	}
355 356 357
#endif
	return ret;
}
358

359 360
static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
{
361 362 363 364 365 366 367 368
	int val;

	if (q->poll_nsec == -1)
		val = -1;
	else
		val = q->poll_nsec / 1000;

	return sprintf(page, "%d\n", val);
369 370 371 372 373
}

static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
				size_t count)
{
374
	int err, val;
375 376 377 378

	if (!q->mq_ops || !q->mq_ops->poll)
		return -EINVAL;

379 380 381
	err = kstrtoint(page, 10, &val);
	if (err < 0)
		return err;
382

383 384 385 386 387 388
	if (val == -1)
		q->poll_nsec = -1;
	else
		q->poll_nsec = val * 1000;

	return count;
389 390
}

391 392 393 394 395 396 397 398 399 400 401
static ssize_t queue_poll_show(struct request_queue *q, char *page)
{
	return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
}

static ssize_t queue_poll_store(struct request_queue *q, const char *page,
				size_t count)
{
	unsigned long poll_on;
	ssize_t ret;

402 403
	if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
	    !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
404 405 406 407 408 409 410
		return -EINVAL;

	ret = queue_var_store(&poll_on, page, count);
	if (ret < 0)
		return ret;

	if (poll_on)
411
		blk_queue_flag_set(QUEUE_FLAG_POLL, q);
412
	else
413
		blk_queue_flag_clear(QUEUE_FLAG_POLL, q);
414 415 416 417

	return ret;
}

418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
{
	return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
}

static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
				  size_t count)
{
	unsigned int val;
	int err;

	err = kstrtou32(page, 10, &val);
	if (err || val == 0)
		return -EINVAL;

	blk_queue_rq_timeout(q, msecs_to_jiffies(val));

	return count;
}

438 439
static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
{
440
	if (!wbt_rq_qos(q))
441 442
		return -EINVAL;

443
	return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000));
444 445 446 447 448
}

static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
				  size_t count)
{
449
	struct rq_qos *rqos;
450
	ssize_t ret;
451
	s64 val;
452 453 454 455

	ret = queue_var_store64(&val, page);
	if (ret < 0)
		return ret;
456 457 458
	if (val < -1)
		return -EINVAL;

459 460
	rqos = wbt_rq_qos(q);
	if (!rqos) {
461 462 463 464
		ret = wbt_init(q);
		if (ret)
			return ret;
	}
465

466
	if (val == -1)
467
		val = wbt_default_latency_nsec(q);
468
	else if (val >= 0)
469
		val *= 1000ULL;
470

471 472 473 474 475
	/*
	 * Ensure that the queue is idled, in case the latency update
	 * ends up either enabling or disabling wbt completely. We can't
	 * have IO inflight if that happens.
	 */
476 477
	blk_mq_freeze_queue(q);
	blk_mq_quiesce_queue(q);
478

479
	wbt_set_min_lat(q, val);
480
	wbt_update_limits(q);
481

482 483
	blk_mq_unquiesce_queue(q);
	blk_mq_unfreeze_queue(q);
484

485 486 487
	return count;
}

488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
	if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
		return sprintf(page, "write back\n");

	return sprintf(page, "write through\n");
}

static ssize_t queue_wc_store(struct request_queue *q, const char *page,
			      size_t count)
{
	int set = -1;

	if (!strncmp(page, "write back", 10))
		set = 1;
	else if (!strncmp(page, "write through", 13) ||
		 !strncmp(page, "none", 4))
		set = 0;

	if (set == -1)
		return -EINVAL;

	if (set)
511
		blk_queue_flag_set(QUEUE_FLAG_WC, q);
512
	else
513
		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
514 515 516 517

	return count;
}

518 519 520 521 522
static ssize_t queue_fua_show(struct request_queue *q, char *page)
{
	return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
}

523 524 525 526 527
static ssize_t queue_dax_show(struct request_queue *q, char *page)
{
	return queue_var_show(blk_queue_dax(q), page);
}

528
static struct queue_sysfs_entry queue_requests_entry = {
529
	.attr = {.name = "nr_requests", .mode = 0644 },
530 531 532 533 534
	.show = queue_requests_show,
	.store = queue_requests_store,
};

static struct queue_sysfs_entry queue_ra_entry = {
535
	.attr = {.name = "read_ahead_kb", .mode = 0644 },
536 537 538 539 540
	.show = queue_ra_show,
	.store = queue_ra_store,
};

static struct queue_sysfs_entry queue_max_sectors_entry = {
541
	.attr = {.name = "max_sectors_kb", .mode = 0644 },
542 543 544 545 546
	.show = queue_max_sectors_show,
	.store = queue_max_sectors_store,
};

static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
547
	.attr = {.name = "max_hw_sectors_kb", .mode = 0444 },
548 549 550
	.show = queue_max_hw_sectors_show,
};

551
static struct queue_sysfs_entry queue_max_segments_entry = {
552
	.attr = {.name = "max_segments", .mode = 0444 },
553 554 555
	.show = queue_max_segments_show,
};

556
static struct queue_sysfs_entry queue_max_discard_segments_entry = {
557
	.attr = {.name = "max_discard_segments", .mode = 0444 },
558 559 560
	.show = queue_max_discard_segments_show,
};

561
static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
562
	.attr = {.name = "max_integrity_segments", .mode = 0444 },
563 564 565
	.show = queue_max_integrity_segments_show,
};

566
static struct queue_sysfs_entry queue_max_segment_size_entry = {
567
	.attr = {.name = "max_segment_size", .mode = 0444 },
568 569 570
	.show = queue_max_segment_size_show,
};

571
static struct queue_sysfs_entry queue_iosched_entry = {
572
	.attr = {.name = "scheduler", .mode = 0644 },
573 574 575 576
	.show = elv_iosched_show,
	.store = elv_iosched_store,
};

577
static struct queue_sysfs_entry queue_hw_sector_size_entry = {
578
	.attr = {.name = "hw_sector_size", .mode = 0444 },
579 580 581 582
	.show = queue_logical_block_size_show,
};

static struct queue_sysfs_entry queue_logical_block_size_entry = {
583
	.attr = {.name = "logical_block_size", .mode = 0444 },
584
	.show = queue_logical_block_size_show,
585 586
};

587
static struct queue_sysfs_entry queue_physical_block_size_entry = {
588
	.attr = {.name = "physical_block_size", .mode = 0444 },
589 590 591
	.show = queue_physical_block_size_show,
};

592
static struct queue_sysfs_entry queue_chunk_sectors_entry = {
593
	.attr = {.name = "chunk_sectors", .mode = 0444 },
594 595 596
	.show = queue_chunk_sectors_show,
};

597
static struct queue_sysfs_entry queue_io_min_entry = {
598
	.attr = {.name = "minimum_io_size", .mode = 0444 },
599 600 601 602
	.show = queue_io_min_show,
};

static struct queue_sysfs_entry queue_io_opt_entry = {
603
	.attr = {.name = "optimal_io_size", .mode = 0444 },
604
	.show = queue_io_opt_show,
605 606
};

607
static struct queue_sysfs_entry queue_discard_granularity_entry = {
608
	.attr = {.name = "discard_granularity", .mode = 0444 },
609 610 611
	.show = queue_discard_granularity_show,
};

612
static struct queue_sysfs_entry queue_discard_max_hw_entry = {
613
	.attr = {.name = "discard_max_hw_bytes", .mode = 0444 },
614 615 616
	.show = queue_discard_max_hw_show,
};

617
static struct queue_sysfs_entry queue_discard_max_entry = {
618
	.attr = {.name = "discard_max_bytes", .mode = 0644 },
619
	.show = queue_discard_max_show,
620
	.store = queue_discard_max_store,
621 622
};

623
static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
624
	.attr = {.name = "discard_zeroes_data", .mode = 0444 },
625 626 627
	.show = queue_discard_zeroes_data_show,
};

628
static struct queue_sysfs_entry queue_write_same_max_entry = {
629
	.attr = {.name = "write_same_max_bytes", .mode = 0444 },
630 631 632
	.show = queue_write_same_max_show,
};

633
static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
634
	.attr = {.name = "write_zeroes_max_bytes", .mode = 0444 },
635 636 637
	.show = queue_write_zeroes_max_show,
};

638
static struct queue_sysfs_entry queue_nonrot_entry = {
639
	.attr = {.name = "rotational", .mode = 0644 },
640 641
	.show = queue_show_nonrot,
	.store = queue_store_nonrot,
642 643
};

644
static struct queue_sysfs_entry queue_zoned_entry = {
645
	.attr = {.name = "zoned", .mode = 0444 },
646 647 648
	.show = queue_zoned_show,
};

649 650 651 652 653
static struct queue_sysfs_entry queue_nr_zones_entry = {
	.attr = {.name = "nr_zones", .mode = 0444 },
	.show = queue_nr_zones_show,
};

654
static struct queue_sysfs_entry queue_nomerges_entry = {
655
	.attr = {.name = "nomerges", .mode = 0644 },
656 657 658 659
	.show = queue_nomerges_show,
	.store = queue_nomerges_store,
};

660
static struct queue_sysfs_entry queue_rq_affinity_entry = {
661
	.attr = {.name = "rq_affinity", .mode = 0644 },
662 663 664 665
	.show = queue_rq_affinity_show,
	.store = queue_rq_affinity_store,
};

666
static struct queue_sysfs_entry queue_iostats_entry = {
667
	.attr = {.name = "iostats", .mode = 0644 },
668 669
	.show = queue_show_iostats,
	.store = queue_store_iostats,
670 671
};

672
static struct queue_sysfs_entry queue_random_entry = {
673
	.attr = {.name = "add_random", .mode = 0644 },
674 675
	.show = queue_show_random,
	.store = queue_store_random,
676 677
};

678
static struct queue_sysfs_entry queue_poll_entry = {
679
	.attr = {.name = "io_poll", .mode = 0644 },
680 681 682 683
	.show = queue_poll_show,
	.store = queue_poll_store,
};

684
static struct queue_sysfs_entry queue_poll_delay_entry = {
685
	.attr = {.name = "io_poll_delay", .mode = 0644 },
686 687 688 689
	.show = queue_poll_delay_show,
	.store = queue_poll_delay_store,
};

690
static struct queue_sysfs_entry queue_wc_entry = {
691
	.attr = {.name = "write_cache", .mode = 0644 },
692 693 694 695
	.show = queue_wc_show,
	.store = queue_wc_store,
};

696
static struct queue_sysfs_entry queue_fua_entry = {
697
	.attr = {.name = "fua", .mode = 0444 },
698 699 700
	.show = queue_fua_show,
};

701
static struct queue_sysfs_entry queue_dax_entry = {
702
	.attr = {.name = "dax", .mode = 0444 },
703 704 705
	.show = queue_dax_show,
};

706 707 708 709 710 711
static struct queue_sysfs_entry queue_io_timeout_entry = {
	.attr = {.name = "io_timeout", .mode = 0644 },
	.show = queue_io_timeout_show,
	.store = queue_io_timeout_store,
};

712
static struct queue_sysfs_entry queue_wb_lat_entry = {
713
	.attr = {.name = "wbt_lat_usec", .mode = 0644 },
714 715 716 717
	.show = queue_wb_lat_show,
	.store = queue_wb_lat_store,
};

718 719
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
static struct queue_sysfs_entry throtl_sample_time_entry = {
720
	.attr = {.name = "throttle_sample_time", .mode = 0644 },
721 722 723 724 725
	.show = blk_throtl_sample_time_show,
	.store = blk_throtl_sample_time_store,
};
#endif

726 727 728 729 730
static struct attribute *default_attrs[] = {
	&queue_requests_entry.attr,
	&queue_ra_entry.attr,
	&queue_max_hw_sectors_entry.attr,
	&queue_max_sectors_entry.attr,
731
	&queue_max_segments_entry.attr,
732
	&queue_max_discard_segments_entry.attr,
733
	&queue_max_integrity_segments_entry.attr,
734
	&queue_max_segment_size_entry.attr,
735
	&queue_iosched_entry.attr,
736
	&queue_hw_sector_size_entry.attr,
737
	&queue_logical_block_size_entry.attr,
738
	&queue_physical_block_size_entry.attr,
739
	&queue_chunk_sectors_entry.attr,
740 741
	&queue_io_min_entry.attr,
	&queue_io_opt_entry.attr,
742 743
	&queue_discard_granularity_entry.attr,
	&queue_discard_max_entry.attr,
744
	&queue_discard_max_hw_entry.attr,
745
	&queue_discard_zeroes_data_entry.attr,
746
	&queue_write_same_max_entry.attr,
747
	&queue_write_zeroes_max_entry.attr,
748
	&queue_nonrot_entry.attr,
749
	&queue_zoned_entry.attr,
750
	&queue_nr_zones_entry.attr,
751
	&queue_nomerges_entry.attr,
752
	&queue_rq_affinity_entry.attr,
753
	&queue_iostats_entry.attr,
754
	&queue_random_entry.attr,
755
	&queue_poll_entry.attr,
756
	&queue_wc_entry.attr,
757
	&queue_fua_entry.attr,
758
	&queue_dax_entry.attr,
759
	&queue_wb_lat_entry.attr,
760
	&queue_poll_delay_entry.attr,
761
	&queue_io_timeout_entry.attr,
762 763 764
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
	&throtl_sample_time_entry.attr,
#endif
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
	NULL,
};

#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)

static ssize_t
queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
	struct queue_sysfs_entry *entry = to_queue(attr);
	struct request_queue *q =
		container_of(kobj, struct request_queue, kobj);
	ssize_t res;

	if (!entry->show)
		return -EIO;
	mutex_lock(&q->sysfs_lock);
781
	if (blk_queue_dying(q)) {
782 783 784 785 786 787 788 789 790 791 792 793 794
		mutex_unlock(&q->sysfs_lock);
		return -ENOENT;
	}
	res = entry->show(q, page);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t
queue_attr_store(struct kobject *kobj, struct attribute *attr,
		    const char *page, size_t length)
{
	struct queue_sysfs_entry *entry = to_queue(attr);
795
	struct request_queue *q;
796 797 798 799
	ssize_t res;

	if (!entry->store)
		return -EIO;
800 801

	q = container_of(kobj, struct request_queue, kobj);
802
	mutex_lock(&q->sysfs_lock);
803
	if (blk_queue_dying(q)) {
804 805 806 807 808 809 810 811
		mutex_unlock(&q->sysfs_lock);
		return -ENOENT;
	}
	res = entry->store(q, page, length);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

812 813 814 815 816 817 818
static void blk_free_queue_rcu(struct rcu_head *rcu_head)
{
	struct request_queue *q = container_of(rcu_head, struct request_queue,
					       rcu_head);
	kmem_cache_free(blk_requestq_cachep, q);
}

819
/**
820 821
 * __blk_release_queue - release a request queue when it is no longer needed
 * @work: pointer to the release_work member of the request queue to be released
822 823
 *
 * Description:
824 825 826 827
 *     blk_release_queue is the counterpart of blk_init_queue(). It should be
 *     called when a request queue is being released; typically when a block
 *     device is being de-registered. Its primary task it to free the queue
 *     itself.
828
 *
829
 * Notes:
830 831
 *     The low level driver must have finished any outstanding requests first
 *     via blk_cleanup_queue().
832 833 834 835 836
 *
 *     Although blk_release_queue() may be called with preemption disabled,
 *     __blk_release_queue() may sleep.
 */
static void __blk_release_queue(struct work_struct *work)
837
{
838
	struct request_queue *q = container_of(work, typeof(*q), release_work);
839

840 841 842
	if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
		blk_stat_remove_callback(q, q->poll_cb);
	blk_stat_free_callback(q->poll_cb);
843

844 845 846 847 848 849 850 851 852 853 854
	if (!blk_queue_dead(q)) {
		/*
		 * Last reference was dropped without having called
		 * blk_cleanup_queue().
		 */
		WARN_ONCE(blk_queue_init_done(q),
			  "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n",
			  q);
		blk_exit_queue(q);
	}

855
	WARN(blk_queue_root_blkg(q),
856 857 858
	     "request queue %p is being released but it has not yet been removed from the blkcg controller\n",
	     q);

859 860
	blk_free_queue_stats(q->stats);

861 862
	blk_queue_free_zone_bitmaps(q);

863
	if (queue_is_mq(q))
864
		blk_mq_release(q);
865

866 867
	blk_trace_shutdown(q);

868
	if (queue_is_mq(q))
869 870
		blk_mq_debugfs_unregister(q);

871
	bioset_exit(&q->bio_split);
872

873
	ida_simple_remove(&blk_queue_ida, q->id);
874
	call_rcu(&q->rcu_head, blk_free_queue_rcu);
875 876
}

877 878 879 880 881 882 883 884 885
static void blk_release_queue(struct kobject *kobj)
{
	struct request_queue *q =
		container_of(kobj, struct request_queue, kobj);

	INIT_WORK(&q->release_work, __blk_release_queue);
	schedule_work(&q->release_work);
}

886
static const struct sysfs_ops queue_sysfs_ops = {
887 888 889 890 891 892 893 894 895 896
	.show	= queue_attr_show,
	.store	= queue_attr_store,
};

struct kobj_type blk_queue_ktype = {
	.sysfs_ops	= &queue_sysfs_ops,
	.default_attrs	= default_attrs,
	.release	= blk_release_queue,
};

897 898 899 900
/**
 * blk_register_queue - register a block layer queue with sysfs
 * @disk: Disk of which the request queue should be registered with sysfs.
 */
901 902 903
int blk_register_queue(struct gendisk *disk)
{
	int ret;
904
	struct device *dev = disk_to_dev(disk);
905 906
	struct request_queue *q = disk->queue;

907
	if (WARN_ON(!q))
908 909
		return -ENXIO;

910 911 912
	WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
		  "%s is registering an already registered queue\n",
		  kobject_name(&dev->kobj));
913
	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
914

915
	/*
916 917 918 919 920 921 922
	 * SCSI probing may synchronously create and destroy a lot of
	 * request_queues for non-existent devices.  Shutting down a fully
	 * functional queue takes measureable wallclock time as RCU grace
	 * periods are involved.  To avoid excessive latency in these
	 * cases, a request_queue starts out in a degraded mode which is
	 * faster to shut down and is made fully functional here as
	 * request_queues for non-existent devices never get registered.
923
	 */
924
	if (!blk_queue_init_done(q)) {
925
		blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
926
		percpu_ref_switch_to_percpu(&q->q_usage_counter);
927
	}
928

929 930 931 932
	ret = blk_trace_init_sysfs(dev);
	if (ret)
		return ret;

933 934 935
	/* Prevent changes through sysfs until registration is completed. */
	mutex_lock(&q->sysfs_lock);

936
	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
937 938
	if (ret < 0) {
		blk_trace_remove_sysfs(dev);
939
		goto unlock;
940
	}
941

942
	if (queue_is_mq(q)) {
943
		__blk_mq_register_dev(dev, q);
944 945
		blk_mq_debugfs_register(q);
	}
946

947 948
	kobject_uevent(&q->kobj, KOBJ_ADD);

949
	wbt_enable_default(q);
950

951 952
	blk_throtl_register_queue(q);

953
	if (q->elevator) {
954 955
		ret = elv_register_queue(q);
		if (ret) {
956
			mutex_unlock(&q->sysfs_lock);
957 958 959 960
			kobject_uevent(&q->kobj, KOBJ_REMOVE);
			kobject_del(&q->kobj);
			blk_trace_remove_sysfs(dev);
			kobject_put(&dev->kobj);
961
			return ret;
962
		}
963
	}
964 965 966 967
	ret = 0;
unlock:
	mutex_unlock(&q->sysfs_lock);
	return ret;
968
}
969
EXPORT_SYMBOL_GPL(blk_register_queue);
970

971 972 973 974 975 976 977
/**
 * blk_unregister_queue - counterpart of blk_register_queue()
 * @disk: Disk of which the request queue should be unregistered from sysfs.
 *
 * Note: the caller is responsible for guaranteeing that this function is called
 * after blk_register_queue() has finished.
 */
978 979 980 981
void blk_unregister_queue(struct gendisk *disk)
{
	struct request_queue *q = disk->queue;

982 983 984
	if (WARN_ON(!q))
		return;

985 986 987 988
	/* Return early if disk->queue was never registered. */
	if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
		return;

989
	/*
990 991 992
	 * Since sysfs_remove_dir() prevents adding new directory entries
	 * before removal of existing entries starts, protect against
	 * concurrent elv_iosched_store() calls.
993
	 */
994
	mutex_lock(&q->sysfs_lock);
995

996
	blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q);
997

998 999 1000 1001
	/*
	 * Remove the sysfs attributes before unregistering the queue data
	 * structures that can be modified through sysfs.
	 */
1002
	if (queue_is_mq(q))
1003
		blk_mq_unregister_dev(disk_to_dev(disk), q);
1004
	mutex_unlock(&q->sysfs_lock);
1005

1006 1007 1008
	kobject_uevent(&q->kobj, KOBJ_REMOVE);
	kobject_del(&q->kobj);
	blk_trace_remove_sysfs(disk_to_dev(disk));
1009

1010
	mutex_lock(&q->sysfs_lock);
1011
	if (q->elevator)
1012
		elv_unregister_queue(q);
1013
	mutex_unlock(&q->sysfs_lock);
1014 1015

	kobject_put(&disk_to_dev(disk)->kobj);
1016
}