blk-mq-sysfs.c 8.39 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/smp.h>

#include <linux/blk-mq.h>
#include "blk-mq.h"
#include "blk-mq-tag.h"

static void blk_mq_sysfs_release(struct kobject *kobj)
{
18 19 20 21 22 23 24 25 26 27 28 29
	struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);

	free_percpu(ctxs->queue_ctx);
	kfree(ctxs);
}

static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
{
	struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);

	/* ctx->ctxs won't be released until all ctx are freed */
	kobject_put(&ctx->ctxs->kobj);
30 31
}

32 33 34 35
static void blk_mq_hw_sysfs_release(struct kobject *kobj)
{
	struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
						  kobj);
36
	free_cpumask_var(hctx->cpumask);
37 38 39 40
	kfree(hctx->ctxs);
	kfree(hctx);
}

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
struct blk_mq_ctx_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct blk_mq_ctx *, char *);
	ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
};

struct blk_mq_hw_ctx_sysfs_entry {
	struct attribute attr;
	ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
	ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
};

static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
				 char *page)
{
	struct blk_mq_ctx_sysfs_entry *entry;
	struct blk_mq_ctx *ctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
	q = ctx->queue;

	if (!entry->show)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->show(ctx, page);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
				  const char *page, size_t length)
{
	struct blk_mq_ctx_sysfs_entry *entry;
	struct blk_mq_ctx *ctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
	q = ctx->queue;

	if (!entry->store)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->store(ctx, page, length);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
				    struct attribute *attr, char *page)
{
	struct blk_mq_hw_ctx_sysfs_entry *entry;
	struct blk_mq_hw_ctx *hctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
	q = hctx->queue;

	if (!entry->show)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->show(hctx, page);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
				     struct attribute *attr, const char *page,
				     size_t length)
{
	struct blk_mq_hw_ctx_sysfs_entry *entry;
	struct blk_mq_hw_ctx *hctx;
	struct request_queue *q;
	ssize_t res;

	entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
	q = hctx->queue;

	if (!entry->store)
		return -EIO;

	res = -ENOENT;
	mutex_lock(&q->sysfs_lock);
	if (!blk_queue_dying(q))
		res = entry->store(hctx, page, length);
	mutex_unlock(&q->sysfs_lock);
	return res;
}

146 147
static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
					    char *page)
148
{
149
	return sprintf(page, "%u\n", hctx->tags->nr_tags);
150 151
}

152 153
static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
						     char *page)
154
{
155
	return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
156 157
}

158 159
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
{
160
	unsigned int i, first = 1;
161 162
	ssize_t ret = 0;

163
	for_each_cpu(i, hctx->cpumask) {
164 165 166 167 168 169 170 171 172 173 174 175
		if (first)
			ret += sprintf(ret + page, "%u", i);
		else
			ret += sprintf(ret + page, ", %u", i);

		first = 0;
	}

	ret += sprintf(ret + page, "\n");
	return ret;
}

176 177 178 179
static struct attribute *default_ctx_attrs[] = {
	NULL,
};

180
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
181
	.attr = {.name = "nr_tags", .mode = 0444 },
182 183 184
	.show = blk_mq_hw_sysfs_nr_tags_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
185
	.attr = {.name = "nr_reserved_tags", .mode = 0444 },
186 187
	.show = blk_mq_hw_sysfs_nr_reserved_tags_show,
};
188
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
189
	.attr = {.name = "cpu_list", .mode = 0444 },
190 191
	.show = blk_mq_hw_sysfs_cpus_show,
};
192 193

static struct attribute *default_hw_ctx_attrs[] = {
194 195
	&blk_mq_hw_sysfs_nr_tags.attr,
	&blk_mq_hw_sysfs_nr_reserved_tags.attr,
196
	&blk_mq_hw_sysfs_cpus.attr,
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
	NULL,
};

static const struct sysfs_ops blk_mq_sysfs_ops = {
	.show	= blk_mq_sysfs_show,
	.store	= blk_mq_sysfs_store,
};

static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
	.show	= blk_mq_hw_sysfs_show,
	.store	= blk_mq_hw_sysfs_store,
};

static struct kobj_type blk_mq_ktype = {
	.sysfs_ops	= &blk_mq_sysfs_ops,
	.release	= blk_mq_sysfs_release,
};

static struct kobj_type blk_mq_ctx_ktype = {
	.sysfs_ops	= &blk_mq_sysfs_ops,
	.default_attrs	= default_ctx_attrs,
218
	.release	= blk_mq_ctx_sysfs_release,
219 220 221 222 223
};

static struct kobj_type blk_mq_hw_ktype = {
	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
	.default_attrs	= default_hw_ctx_attrs,
224
	.release	= blk_mq_hw_sysfs_release,
225 226
};

227
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
228 229 230 231
{
	struct blk_mq_ctx *ctx;
	int i;

232
	if (!hctx->nr_ctx)
233 234 235 236 237 238 239 240
		return;

	hctx_for_each_ctx(hctx, ctx, i)
		kobject_del(&ctx->kobj);

	kobject_del(&hctx->kobj);
}

241
static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
242 243 244 245 246
{
	struct request_queue *q = hctx->queue;
	struct blk_mq_ctx *ctx;
	int i, ret;

247
	if (!hctx->nr_ctx)
248 249
		return 0;

250
	ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
251 252 253 254 255 256 257 258 259 260 261 262
	if (ret)
		return ret;

	hctx_for_each_ctx(hctx, ctx, i) {
		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
		if (ret)
			break;
	}

	return ret;
}

263
void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
264
{
265
	struct blk_mq_hw_ctx *hctx;
266
	int i;
267

268 269
	lockdep_assert_held(&q->sysfs_lock);

270
	queue_for_each_hw_ctx(q, hctx, i)
271 272
		blk_mq_unregister_hctx(hctx);

273 274
	kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
	kobject_del(q->mq_kobj);
275
	kobject_put(&dev->kobj);
276 277

	q->mq_sysfs_init_done = false;
278 279
}

280 281 282 283 284
void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
{
	kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
}

285 286 287 288 289 290 291 292 293
void blk_mq_sysfs_deinit(struct request_queue *q)
{
	struct blk_mq_ctx *ctx;
	int cpu;

	for_each_possible_cpu(cpu) {
		ctx = per_cpu_ptr(q->queue_ctx, cpu);
		kobject_put(&ctx->kobj);
	}
294
	kobject_put(q->mq_kobj);
295 296
}

297
void blk_mq_sysfs_init(struct request_queue *q)
298 299
{
	struct blk_mq_ctx *ctx;
300
	int cpu;
301

302
	kobject_init(q->mq_kobj, &blk_mq_ktype);
303

304 305
	for_each_possible_cpu(cpu) {
		ctx = per_cpu_ptr(q->queue_ctx, cpu);
306 307

		kobject_get(q->mq_kobj);
308
		kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
309
	}
310 311
}

312
int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
313 314
{
	struct blk_mq_hw_ctx *hctx;
315
	int ret, i;
316

317 318
	WARN_ON_ONCE(!q->kobj.parent);
	lockdep_assert_held(&q->sysfs_lock);
319

320
	ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
321
	if (ret < 0)
322
		goto out;
323

324
	kobject_uevent(q->mq_kobj, KOBJ_ADD);
325 326

	queue_for_each_hw_ctx(q, hctx, i) {
327
		ret = blk_mq_register_hctx(hctx);
328
		if (ret)
329
			goto unreg;
330 331
	}

332
	q->mq_sysfs_init_done = true;
333

334
out:
335
	return ret;
336 337 338 339 340

unreg:
	while (--i >= 0)
		blk_mq_unregister_hctx(q->queue_hw_ctx[i]);

341 342
	kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
	kobject_del(q->mq_kobj);
343 344
	kobject_put(&dev->kobj);
	return ret;
345 346 347 348 349 350 351 352 353
}

int blk_mq_register_dev(struct device *dev, struct request_queue *q)
{
	int ret;

	mutex_lock(&q->sysfs_lock);
	ret = __blk_mq_register_dev(dev, q);
	mutex_unlock(&q->sysfs_lock);
354

355
	return ret;
356
}
357 358 359 360 361 362

void blk_mq_sysfs_unregister(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i;

363
	mutex_lock(&q->sysfs_lock);
364
	if (!q->mq_sysfs_init_done)
365
		goto unlock;
366

367 368
	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_unregister_hctx(hctx);
369 370 371

unlock:
	mutex_unlock(&q->sysfs_lock);
372 373 374 375 376 377 378
}

int blk_mq_sysfs_register(struct request_queue *q)
{
	struct blk_mq_hw_ctx *hctx;
	int i, ret = 0;

379
	mutex_lock(&q->sysfs_lock);
380
	if (!q->mq_sysfs_init_done)
381
		goto unlock;
382

383 384 385 386 387 388
	queue_for_each_hw_ctx(q, hctx, i) {
		ret = blk_mq_register_hctx(hctx);
		if (ret)
			break;
	}

389 390 391
unlock:
	mutex_unlock(&q->sysfs_lock);

392 393
	return ret;
}