pci.c 59.3 KB
Newer Older
Matthew Wilcox's avatar
Matthew Wilcox committed
1 2
/*
 * NVM Express device driver
3
 * Copyright (c) 2011-2014, Intel Corporation.
Matthew Wilcox's avatar
Matthew Wilcox committed
4 5 6 7 8 9 10 11 12 13 14
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

15
#include <linux/aer.h>
16
#include <linux/bitops.h>
Matthew Wilcox's avatar
Matthew Wilcox committed
17
#include <linux/blkdev.h>
18
#include <linux/blk-mq.h>
19
#include <linux/blk-mq-pci.h>
Keith Busch's avatar
Keith Busch committed
20
#include <linux/cpu.h>
21
#include <linux/delay.h>
22
#include <linux/dmi.h>
Matthew Wilcox's avatar
Matthew Wilcox committed
23 24 25
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/genhd.h>
26
#include <linux/hdreg.h>
27
#include <linux/idr.h>
Matthew Wilcox's avatar
Matthew Wilcox committed
28 29 30 31 32 33 34 35
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
36
#include <linux/mutex.h>
Matthew Wilcox's avatar
Matthew Wilcox committed
37
#include <linux/pci.h>
38
#include <linux/poison.h>
39
#include <linux/ptrace.h>
Matthew Wilcox's avatar
Matthew Wilcox committed
40 41
#include <linux/sched.h>
#include <linux/slab.h>
42
#include <linux/t10-pi.h>
43
#include <linux/timer.h>
Matthew Wilcox's avatar
Matthew Wilcox committed
44
#include <linux/types.h>
45
#include <linux/io-64-nonatomic-lo-hi.h>
46
#include <asm/unaligned.h>
47
#include <linux/sed-opal.h>
48

49 50
#include "nvme.h"

51
#define NVME_Q_DEPTH		1024
52
#define NVME_AQ_DEPTH		256
Matthew Wilcox's avatar
Matthew Wilcox committed
53 54
#define SQ_SIZE(depth)		(depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth)		(depth * sizeof(struct nvme_completion))
55

56 57 58 59
/*
 * We handle AEN commands ourselves and don't even let the
 * block layer know about them.
 */
60
#define NVME_AQ_BLKMQ_DEPTH	(NVME_AQ_DEPTH - NVME_NR_AERS)
61

62 63 64
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);

65 66 67 68
static bool use_cmb_sqes = true;
module_param(use_cmb_sqes, bool, 0644);
MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");

69
static struct workqueue_struct *nvme_workq;
70

71 72
struct nvme_dev;
struct nvme_queue;
73

74
static int nvme_reset(struct nvme_dev *dev);
75
static void nvme_process_cq(struct nvme_queue *nvmeq);
76
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
77

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
/*
 * Represents an NVM Express device.  Each nvme_dev is a PCI function.
 */
struct nvme_dev {
	struct nvme_queue **queues;
	struct blk_mq_tag_set tagset;
	struct blk_mq_tag_set admin_tagset;
	u32 __iomem *dbs;
	struct device *dev;
	struct dma_pool *prp_page_pool;
	struct dma_pool *prp_small_pool;
	unsigned queue_count;
	unsigned online_queues;
	unsigned max_qid;
	int q_depth;
	u32 db_stride;
	void __iomem *bar;
	struct work_struct reset_work;
96
	struct work_struct remove_work;
97
	struct timer_list watchdog_timer;
98
	struct mutex shutdown_lock;
99 100 101 102 103
	bool subsystem;
	void __iomem *cmb;
	dma_addr_t cmb_dma_addr;
	u64 cmb_size;
	u32 cmbsz;
104
	u32 cmbloc;
105
	struct nvme_ctrl ctrl;
106
	struct completion ioq_wait;
107 108 109 110
	u32 *dbbuf_dbs;
	dma_addr_t dbbuf_dbs_dma_addr;
	u32 *dbbuf_eis;
	dma_addr_t dbbuf_eis_dma_addr;
111
};
112

113 114 115 116 117 118 119 120 121 122
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
{
	return qid * 2 * stride;
}

static inline unsigned int cq_idx(unsigned int qid, u32 stride)
{
	return (qid * 2 + 1) * stride;
}

123 124 125 126 127
static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
{
	return container_of(ctrl, struct nvme_dev, ctrl);
}

Matthew Wilcox's avatar
Matthew Wilcox committed
128 129 130 131 132 133
/*
 * An NVM Express queue.  Each device has at least two (one for admin
 * commands and one for I/O commands).
 */
struct nvme_queue {
	struct device *q_dmadev;
134
	struct nvme_dev *dev;
Matthew Wilcox's avatar
Matthew Wilcox committed
135 136
	spinlock_t q_lock;
	struct nvme_command *sq_cmds;
137
	struct nvme_command __iomem *sq_cmds_io;
Matthew Wilcox's avatar
Matthew Wilcox committed
138
	volatile struct nvme_completion *cqes;
139
	struct blk_mq_tags **tags;
Matthew Wilcox's avatar
Matthew Wilcox committed
140 141 142 143
	dma_addr_t sq_dma_addr;
	dma_addr_t cq_dma_addr;
	u32 __iomem *q_db;
	u16 q_depth;
144
	s16 cq_vector;
Matthew Wilcox's avatar
Matthew Wilcox committed
145 146
	u16 sq_tail;
	u16 cq_head;
147
	u16 qid;
148 149
	u8 cq_phase;
	u8 cqe_seen;
150 151 152 153
	u32 *dbbuf_sq_db;
	u32 *dbbuf_cq_db;
	u32 *dbbuf_sq_ei;
	u32 *dbbuf_cq_ei;
Matthew Wilcox's avatar
Matthew Wilcox committed
154 155
};

156 157 158
/*
 * The nvme_iod describes the data in an I/O, including the list of PRP
 * entries.  You can't see it in this data structure because C doesn't let
159
 * me express that.  Use nvme_init_iod to ensure there's enough space
160 161 162
 * allocated to store the PRP list.
 */
struct nvme_iod {
163
	struct nvme_request req;
164 165
	struct nvme_queue *nvmeq;
	int aborted;
166 167 168 169
	int npages;		/* In the PRP list. 0 means small pool in use */
	int nents;		/* Used in scatterlist */
	int length;		/* Of data, in bytes */
	dma_addr_t first_dma;
170
	struct scatterlist meta_sg; /* metadata requires single contiguous buffer */
171 172
	struct scatterlist *sg;
	struct scatterlist inline_sg[0];
Matthew Wilcox's avatar
Matthew Wilcox committed
173 174 175 176 177 178 179 180 181 182 183 184
};

/*
 * Check we didin't inadvertently grow the command struct
 */
static inline void _nvme_check_size(void)
{
	BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
185
	BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
186
	BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
Matthew Wilcox's avatar
Matthew Wilcox committed
187 188 189 190
	BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
	BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
	BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
Keith Busch's avatar
Keith Busch committed
191
	BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
	BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
}

static inline unsigned int nvme_dbbuf_size(u32 stride)
{
	return ((num_possible_cpus() + 1) * 8 * stride);
}

static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);

	if (dev->dbbuf_dbs)
		return 0;

	dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
					    &dev->dbbuf_dbs_dma_addr,
					    GFP_KERNEL);
	if (!dev->dbbuf_dbs)
		return -ENOMEM;
	dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size,
					    &dev->dbbuf_eis_dma_addr,
					    GFP_KERNEL);
	if (!dev->dbbuf_eis) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
		dev->dbbuf_dbs = NULL;
		return -ENOMEM;
	}

	return 0;
}

static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
{
	unsigned int mem_size = nvme_dbbuf_size(dev->db_stride);

	if (dev->dbbuf_dbs) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr);
		dev->dbbuf_dbs = NULL;
	}
	if (dev->dbbuf_eis) {
		dma_free_coherent(dev->dev, mem_size,
				  dev->dbbuf_eis, dev->dbbuf_eis_dma_addr);
		dev->dbbuf_eis = NULL;
	}
}

static void nvme_dbbuf_init(struct nvme_dev *dev,
			    struct nvme_queue *nvmeq, int qid)
{
	if (!dev->dbbuf_dbs || !qid)
		return;

	nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
	nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
}

static void nvme_dbbuf_set(struct nvme_dev *dev)
{
	struct nvme_command c;

	if (!dev->dbbuf_dbs)
		return;

	memset(&c, 0, sizeof(c));
	c.dbbuf.opcode = nvme_admin_dbbuf;
	c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr);
	c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr);

	if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) {
266
		dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
		/* Free memory and continue on */
		nvme_dbbuf_dma_free(dev);
	}
}

static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
{
	return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
}

/* Update dbbuf and return true if an MMIO is required */
static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
					      volatile u32 *dbbuf_ei)
{
	if (dbbuf_db) {
		u16 old_value;

		/*
		 * Ensure that the queue is written before updating
		 * the doorbell in memory
		 */
		wmb();

		old_value = *dbbuf_db;
		*dbbuf_db = value;

		if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
			return false;
	}

	return true;
Matthew Wilcox's avatar
Matthew Wilcox committed
298 299
}

300 301 302 303
/*
 * Max size of iod being embedded in the request payload
 */
#define NVME_INT_PAGES		2
304
#define NVME_INT_BYTES(dev)	(NVME_INT_PAGES * (dev)->ctrl.page_size)
305 306 307 308 309 310 311 312

/*
 * Will slightly overestimate the number of pages needed.  This is OK
 * as it only leads to a small amount of wasted memory for the lifetime of
 * the I/O.
 */
static int nvme_npages(unsigned size, struct nvme_dev *dev)
{
313 314
	unsigned nprps = DIV_ROUND_UP(size + dev->ctrl.page_size,
				      dev->ctrl.page_size);
315 316 317
	return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
}

318 319
static unsigned int nvme_iod_alloc_size(struct nvme_dev *dev,
		unsigned int size, unsigned int nseg)
320
{
321 322 323
	return sizeof(__le64 *) * nvme_npages(size, dev) +
			sizeof(struct scatterlist) * nseg;
}
324

325 326 327 328
static unsigned int nvme_cmd_size(struct nvme_dev *dev)
{
	return sizeof(struct nvme_iod) +
		nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
329 330
}

331 332
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
				unsigned int hctx_idx)
333
{
334 335 336
	struct nvme_dev *dev = data;
	struct nvme_queue *nvmeq = dev->queues[0];

337 338 339 340
	WARN_ON(hctx_idx != 0);
	WARN_ON(dev->admin_tagset.tags[0] != hctx->tags);
	WARN_ON(nvmeq->tags);

341
	hctx->driver_data = nvmeq;
342
	nvmeq->tags = &dev->admin_tagset.tags[0];
343
	return 0;
344 345
}

346 347 348 349 350 351 352
static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	nvmeq->tags = NULL;
}

353 354 355
static int nvme_admin_init_request(struct blk_mq_tag_set *set,
		struct request *req, unsigned int hctx_idx,
		unsigned int numa_node)
356
{
357
	struct nvme_dev *dev = set->driver_data;
358
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
359 360 361
	struct nvme_queue *nvmeq = dev->queues[0];

	BUG_ON(!nvmeq);
362
	iod->nvmeq = nvmeq;
363
	return 0;
364 365
}

366 367
static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int hctx_idx)
Matthew Wilcox's avatar
Matthew Wilcox committed
368
{
369
	struct nvme_dev *dev = data;
370
	struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];
371

372 373
	if (!nvmeq->tags)
		nvmeq->tags = &dev->tagset.tags[hctx_idx];
Matthew Wilcox's avatar
Matthew Wilcox committed
374

375
	WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags);
376 377
	hctx->driver_data = nvmeq;
	return 0;
Matthew Wilcox's avatar
Matthew Wilcox committed
378 379
}

380 381
static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
		unsigned int hctx_idx, unsigned int numa_node)
Matthew Wilcox's avatar
Matthew Wilcox committed
382
{
383
	struct nvme_dev *dev = set->driver_data;
384
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
385 386 387
	struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1];

	BUG_ON(!nvmeq);
388
	iod->nvmeq = nvmeq;
389 390 391
	return 0;
}

392 393 394 395 396 397 398
static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
	struct nvme_dev *dev = set->driver_data;

	return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev));
}

Matthew Wilcox's avatar
Matthew Wilcox committed
399
/**
400
 * __nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
Matthew Wilcox's avatar
Matthew Wilcox committed
401 402 403 404 405
 * @nvmeq: The queue to use
 * @cmd: The command to send
 *
 * Safe to use from interrupt context
 */
406 407
static void __nvme_submit_cmd(struct nvme_queue *nvmeq,
						struct nvme_command *cmd)
Matthew Wilcox's avatar
Matthew Wilcox committed
408
{
409 410
	u16 tail = nvmeq->sq_tail;

411 412 413 414 415
	if (nvmeq->sq_cmds_io)
		memcpy_toio(&nvmeq->sq_cmds_io[tail], cmd, sizeof(*cmd));
	else
		memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));

Matthew Wilcox's avatar
Matthew Wilcox committed
416 417
	if (++tail == nvmeq->q_depth)
		tail = 0;
418 419 420
	if (nvme_dbbuf_update_and_check_event(tail, nvmeq->dbbuf_sq_db,
					      nvmeq->dbbuf_sq_ei))
		writel(tail, nvmeq->q_db);
Matthew Wilcox's avatar
Matthew Wilcox committed
421 422 423
	nvmeq->sq_tail = tail;
}

424
static __le64 **iod_list(struct request *req)
Matthew Wilcox's avatar
Matthew Wilcox committed
425
{
426
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
427
	return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
Matthew Wilcox's avatar
Matthew Wilcox committed
428 429
}

430
static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
431
{
432
	struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
433
	int nseg = blk_rq_nr_phys_segments(rq);
434
	unsigned int size = blk_rq_payload_bytes(rq);
435

436 437 438 439 440 441
	if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
		iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
		if (!iod->sg)
			return BLK_MQ_RQ_QUEUE_BUSY;
	} else {
		iod->sg = iod->inline_sg;
442 443
	}

444 445 446 447
	iod->aborted = 0;
	iod->npages = -1;
	iod->nents = 0;
	iod->length = size;
448

449
	return BLK_MQ_RQ_QUEUE_OK;
450 451
}

452
static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
Matthew Wilcox's avatar
Matthew Wilcox committed
453
{
454
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
455
	const int last_prp = dev->ctrl.page_size / 8 - 1;
456
	int i;
457
	__le64 **list = iod_list(req);
458 459 460 461 462 463 464 465 466 467
	dma_addr_t prp_dma = iod->first_dma;

	if (iod->npages == 0)
		dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
	for (i = 0; i < iod->npages; i++) {
		__le64 *prp_list = list[i];
		dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
		dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
		prp_dma = next_prp_dma;
	}
468

469 470
	if (iod->sg != iod->inline_sg)
		kfree(iod->sg);
471 472
}

473
#ifdef CONFIG_BLK_DEV_INTEGRITY
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
{
	if (be32_to_cpu(pi->ref_tag) == v)
		pi->ref_tag = cpu_to_be32(p);
}

static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
{
	if (be32_to_cpu(pi->ref_tag) == p)
		pi->ref_tag = cpu_to_be32(v);
}

/**
 * nvme_dif_remap - remaps ref tags to bip seed and physical lba
 *
 * The virtual start sector is the one that was originally submitted by the
 * block layer.	Due to partitioning, MD/DM cloning, etc. the actual physical
 * start sector may be different. Remap protection information to match the
 * physical LBA on writes, and back to the original seed on reads.
 *
 * Type 0 and 3 do not have a ref tag, so no remapping required.
 */
static void nvme_dif_remap(struct request *req,
			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
{
	struct nvme_ns *ns = req->rq_disk->private_data;
	struct bio_integrity_payload *bip;
	struct t10_pi_tuple *pi;
	void *p, *pmap;
	u32 i, nlb, ts, phys, virt;

	if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
		return;

	bip = bio_integrity(req->bio);
	if (!bip)
		return;

	pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;

	p = pmap;
	virt = bip_get_seed(bip);
	phys = nvme_block_nr(ns, blk_rq_pos(req));
	nlb = (blk_rq_bytes(req) >> ns->lba_shift);
518
	ts = ns->disk->queue->integrity.tuple_size;
519 520 521 522 523 524 525 526

	for (i = 0; i < nlb; i++, virt++, phys++) {
		pi = (struct t10_pi_tuple *)p;
		dif_swap(phys, virt, pi);
		p += ts;
	}
	kunmap_atomic(pmap);
}
527 528 529 530 531 532 533 534 535 536 537 538 539
#else /* CONFIG_BLK_DEV_INTEGRITY */
static void nvme_dif_remap(struct request *req,
			void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
{
}
static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
{
}
static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
{
}
#endif

540
static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
541
{
542
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
543
	struct dma_pool *pool;
544
	int length = blk_rq_payload_bytes(req);
545
	struct scatterlist *sg = iod->sg;
546 547
	int dma_len = sg_dma_len(sg);
	u64 dma_addr = sg_dma_address(sg);
548
	u32 page_size = dev->ctrl.page_size;
549
	int offset = dma_addr & (page_size - 1);
550
	__le64 *prp_list;
551
	__le64 **list = iod_list(req);
552
	dma_addr_t prp_dma;
553
	int nprps, i;
554

555
	length -= (page_size - offset);
556
	if (length <= 0)
557
		return true;
558

559
	dma_len -= (page_size - offset);
560
	if (dma_len) {
561
		dma_addr += (page_size - offset);
562 563 564 565 566 567
	} else {
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
	}

568
	if (length <= page_size) {
569
		iod->first_dma = dma_addr;
570
		return true;
571 572
	}

573
	nprps = DIV_ROUND_UP(length, page_size);
574 575
	if (nprps <= (256 / 8)) {
		pool = dev->prp_small_pool;
576
		iod->npages = 0;
577 578
	} else {
		pool = dev->prp_page_pool;
579
		iod->npages = 1;
580 581
	}

582
	prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
583
	if (!prp_list) {
584
		iod->first_dma = dma_addr;
585
		iod->npages = -1;
586
		return false;
587
	}
588 589
	list[0] = prp_list;
	iod->first_dma = prp_dma;
590 591
	i = 0;
	for (;;) {
592
		if (i == page_size >> 3) {
593
			__le64 *old_prp_list = prp_list;
594
			prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
595
			if (!prp_list)
596
				return false;
597
			list[iod->npages++] = prp_list;
598 599 600
			prp_list[0] = old_prp_list[i - 1];
			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
			i = 1;
601 602
		}
		prp_list[i++] = cpu_to_le64(dma_addr);
603 604 605
		dma_len -= page_size;
		dma_addr += page_size;
		length -= page_size;
606 607 608 609 610 611 612 613
		if (length <= 0)
			break;
		if (dma_len > 0)
			continue;
		BUG_ON(dma_len < 0);
		sg = sg_next(sg);
		dma_addr = sg_dma_address(sg);
		dma_len = sg_dma_len(sg);
614 615
	}

616
	return true;
617 618
}

619
static int nvme_map_data(struct nvme_dev *dev, struct request *req,
620
		struct nvme_command *cmnd)
621
{
622
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
623 624 625 626
	struct request_queue *q = req->q;
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;
	int ret = BLK_MQ_RQ_QUEUE_ERROR;
627

628
	sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
629 630 631
	iod->nents = blk_rq_map_sg(q, req, iod->sg);
	if (!iod->nents)
		goto out;
632

633
	ret = BLK_MQ_RQ_QUEUE_BUSY;
634 635
	if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
				DMA_ATTR_NO_WARN))
636
		goto out;
637

638
	if (!nvme_setup_prps(dev, req))
639
		goto out_unmap;
640

641 642 643 644
	ret = BLK_MQ_RQ_QUEUE_ERROR;
	if (blk_integrity_rq(req)) {
		if (blk_rq_count_integrity_sg(q, req->bio) != 1)
			goto out_unmap;
645

646 647
		sg_init_table(&iod->meta_sg, 1);
		if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1)
648
			goto out_unmap;
649

650 651
		if (rq_data_dir(req))
			nvme_dif_remap(req, nvme_dif_prep);
652

653
		if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
654
			goto out_unmap;
655
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
656

657 658
	cmnd->rw.dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
	cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
659
	if (blk_integrity_rq(req))
660
		cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
661
	return BLK_MQ_RQ_QUEUE_OK;
Matthew Wilcox's avatar
Matthew Wilcox committed
662

663 664 665 666
out_unmap:
	dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
out:
	return ret;
Matthew Wilcox's avatar
Matthew Wilcox committed
667 668
}

669
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
Matthew Wilcox's avatar
Matthew Wilcox committed
670
{
671
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
672 673 674 675 676 677 678 679
	enum dma_data_direction dma_dir = rq_data_dir(req) ?
			DMA_TO_DEVICE : DMA_FROM_DEVICE;

	if (iod->nents) {
		dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
		if (blk_integrity_rq(req)) {
			if (!rq_data_dir(req))
				nvme_dif_remap(req, nvme_dif_complete);
680
			dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
681
		}
682
	}
683

684
	nvme_cleanup_cmd(req);
685
	nvme_free_iod(dev, req);
686
}
Matthew Wilcox's avatar
Matthew Wilcox committed
687

688 689 690
/*
 * NOTE: ns is NULL when called on the admin queue.
 */
691 692
static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
			 const struct blk_mq_queue_data *bd)
693
{
694 695
	struct nvme_ns *ns = hctx->queue->queuedata;
	struct nvme_queue *nvmeq = hctx->driver_data;
696
	struct nvme_dev *dev = nvmeq->dev;
697
	struct request *req = bd->rq;
698 699
	struct nvme_command cmnd;
	int ret = BLK_MQ_RQ_QUEUE_OK;
700

701 702 703 704 705
	/*
	 * If formated with metadata, require the block layer provide a buffer
	 * unless this namespace is formated such that the metadata can be
	 * stripped/generated by the controller with PRACT=1.
	 */
706
	if (ns && ns->ms && !blk_integrity_rq(req)) {
707
		if (!(ns->pi_type && ns->ms == 8) &&
708
		    !blk_rq_is_passthrough(req)) {
709
			blk_mq_end_request(req, -EFAULT);
710 711 712 713
			return BLK_MQ_RQ_QUEUE_OK;
		}
	}

714
	ret = nvme_setup_cmd(ns, req, &cmnd);
715
	if (ret != BLK_MQ_RQ_QUEUE_OK)
716
		return ret;
717

718
	ret = nvme_init_iod(req, dev);
719
	if (ret != BLK_MQ_RQ_QUEUE_OK)
720
		goto out_free_cmd;
721

722
	if (blk_rq_nr_phys_segments(req))
723
		ret = nvme_map_data(dev, req, &cmnd);
724

725
	if (ret != BLK_MQ_RQ_QUEUE_OK)
726
		goto out_cleanup_iod;
727

728
	blk_mq_start_request(req);
729

730
	spin_lock_irq(&nvmeq->q_lock);
731
	if (unlikely(nvmeq->cq_vector < 0)) {
732
		ret = BLK_MQ_RQ_QUEUE_ERROR;
733
		spin_unlock_irq(&nvmeq->q_lock);
734
		goto out_cleanup_iod;
735
	}
736
	__nvme_submit_cmd(nvmeq, &cmnd);
737 738 739
	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
	return BLK_MQ_RQ_QUEUE_OK;
740
out_cleanup_iod:
741
	nvme_free_iod(dev, req);
742 743
out_free_cmd:
	nvme_cleanup_cmd(req);
744
	return ret;
Matthew Wilcox's avatar
Matthew Wilcox committed
745
}
746

747
static void nvme_pci_complete_rq(struct request *req)
748
{
749
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
750

751 752
	nvme_unmap_data(iod->nvmeq->dev, req);
	nvme_complete_rq(req);
Matthew Wilcox's avatar
Matthew Wilcox committed
753 754
}

755 756 757 758 759 760 761
/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_valid(struct nvme_queue *nvmeq, u16 head,
		u16 phase)
{
	return (le16_to_cpu(nvmeq->cqes[head].status) & 1) == phase;
}

762
static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
Matthew Wilcox's avatar
Matthew Wilcox committed
763
{
764
	u16 head, phase;
Matthew Wilcox's avatar
Matthew Wilcox committed
765 766

	head = nvmeq->cq_head;
767
	phase = nvmeq->cq_phase;
Matthew Wilcox's avatar
Matthew Wilcox committed
768

769
	while (nvme_cqe_valid(nvmeq, head, phase)) {
Matthew Wilcox's avatar
Matthew Wilcox committed
770
		struct nvme_completion cqe = nvmeq->cqes[head];
771
		struct request *req;
772

Matthew Wilcox's avatar
Matthew Wilcox committed
773 774
		if (++head == nvmeq->q_depth) {
			head = 0;
775
			phase = !phase;
Matthew Wilcox's avatar
Matthew Wilcox committed
776
		}
777

778 779
		if (tag && *tag == cqe.command_id)
			*tag = -1;
780

781
		if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
782
			dev_warn(nvmeq->dev->ctrl.device,
783 784 785 786 787
				"invalid id %d completed on queue %d\n",
				cqe.command_id, le16_to_cpu(cqe.sq_id));
			continue;
		}

788 789 790 791 792 793 794 795
		/*
		 * AEN requests are special as they don't time out and can
		 * survive any kind of queue freeze and often don't respond to
		 * aborts.  We don't even bother to allocate a struct request
		 * for them but rather special case them here.
		 */
		if (unlikely(nvmeq->qid == 0 &&
				cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) {
796 797
			nvme_complete_async_event(&nvmeq->dev->ctrl,
					cqe.status, &cqe.result);
798 799 800
			continue;
		}

801
		req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
802
		nvme_end_request(req, cqe.status, cqe.result);
Matthew Wilcox's avatar
Matthew Wilcox committed
803 804
	}

805
	if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
806
		return;
Matthew Wilcox's avatar
Matthew Wilcox committed
807

808
	if (likely(nvmeq->cq_vector >= 0))
809 810 811
		if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
						      nvmeq->dbbuf_cq_ei))
			writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
Matthew Wilcox's avatar
Matthew Wilcox committed
812
	nvmeq->cq_head = head;
813
	nvmeq->cq_phase = phase;
Matthew Wilcox's avatar
Matthew Wilcox committed
814

815
	nvmeq->cqe_seen = 1;
816 817 818 819 820
}

static void nvme_process_cq(struct nvme_queue *nvmeq)
{
	__nvme_process_cq(nvmeq, NULL);
Matthew Wilcox's avatar
Matthew Wilcox committed
821 822 823
}

static irqreturn_t nvme_irq(int irq, void *data)
824 825 826 827
{
	irqreturn_t result;
	struct nvme_queue *nvmeq = data;
	spin_lock(&nvmeq->q_lock);
828 829 830
	nvme_process_cq(nvmeq);
	result = nvmeq->cqe_seen ? IRQ_HANDLED : IRQ_NONE;
	nvmeq->cqe_seen = 0;
831 832 833 834 835 836 837
	spin_unlock(&nvmeq->q_lock);
	return result;
}

static irqreturn_t nvme_irq_check(int irq, void *data)
{
	struct nvme_queue *nvmeq = data;
838 839 840
	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
		return IRQ_WAKE_THREAD;
	return IRQ_NONE;
841 842
}

843
static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
844
{
845
	if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
846 847 848 849 850 851 852 853 854 855 856
		spin_lock_irq(&nvmeq->q_lock);
		__nvme_process_cq(nvmeq, &tag);
		spin_unlock_irq(&nvmeq->q_lock);

		if (tag == -1)
			return 1;
	}

	return 0;
}

857 858 859 860 861 862 863
static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	return __nvme_poll(nvmeq, tag);
}

864
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl, int aer_idx)
Matthew Wilcox's avatar
Matthew Wilcox committed
865
{
866
	struct nvme_dev *dev = to_nvme_dev(ctrl);
867
	struct nvme_queue *nvmeq = dev->queues[0];
868
	struct nvme_command c;
Matthew Wilcox's avatar
Matthew Wilcox committed
869

870 871
	memset(&c, 0, sizeof(c));
	c.common.opcode = nvme_admin_async_event;
872
	c.common.command_id = NVME_AQ_BLKMQ_DEPTH + aer_idx;
873

874
	spin_lock_irq(&nvmeq->q_lock);
875
	__nvme_submit_cmd(nvmeq, &c);
876
	spin_unlock_irq(&nvmeq->q_lock);
877 878
}

Matthew Wilcox's avatar
Matthew Wilcox committed
879
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
880
{
Matthew Wilcox's avatar
Matthew Wilcox committed
881 882 883 884 885 886
	struct nvme_command c;

	memset(&c, 0, sizeof(c));
	c.delete_queue.opcode = opcode;
	c.delete_queue.qid = cpu_to_le16(id);

887
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
Matthew Wilcox's avatar
Matthew Wilcox committed
888 889 890 891 892 893 894 895
}

static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
	int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;

896 897 898 899
	/*
	 * Note: we (ab)use the fact the the prp fields survive if no data
	 * is attached to the request.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
900 901 902 903 904 905 906 907
	memset(&c, 0, sizeof(c));
	c.create_cq.opcode = nvme_admin_create_cq;
	c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
	c.create_cq.cqid = cpu_to_le16(qid);
	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_cq.cq_flags = cpu_to_le16(flags);
	c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);

908
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
Matthew Wilcox's avatar
Matthew Wilcox committed
909 910 911 912 913 914
}

static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
						struct nvme_queue *nvmeq)
{
	struct nvme_command c;
915
	int flags = NVME_QUEUE_PHYS_CONTIG;
Matthew Wilcox's avatar
Matthew Wilcox committed
916

917 918 919 920
	/*
	 * Note: we (ab)use the fact the the prp fields survive if no data
	 * is attached to the request.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
921 922 923 924 925 926 927 928
	memset(&c, 0, sizeof(c));
	c.create_sq.opcode = nvme_admin_create_sq;
	c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
	c.create_sq.sqid = cpu_to_le16(qid);
	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
	c.create_sq.sq_flags = cpu_to_le16(flags);
	c.create_sq.cqid = cpu_to_le16(qid);

929
	return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
Matthew Wilcox's avatar
Matthew Wilcox committed
930 931 932 933 934 935 936 937 938 939 940 941
}

static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
}

static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
{
	return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
}

942
static void abort_endio(struct request *req, int error)
943
{
944 945
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
946

947 948
	dev_warn(nvmeq->dev->ctrl.device,
		 "Abort status: 0x%x", nvme_req(req)->status);
949 950
	atomic_inc(&nvmeq->dev->ctrl.abort_limit);
	blk_mq_free_request(req);
951 952
}

953
static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
954
{
955 956
	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
	struct nvme_queue *nvmeq = iod->nvmeq;
957
	struct nvme_dev *dev = nvmeq->dev;
958 959
	struct request *abort_req;
	struct nvme_command cmd;
960

961 962 963 964 965 966 967 968 969 970
	/*
	 * Did we miss an interrupt?
	 */
	if (__nvme_poll(nvmeq, req->tag)) {
		dev_warn(dev->ctrl.device,
			 "I/O %d QID %d timeout, completion polled\n",
			 req->tag, nvmeq->qid);
		return BLK_EH_HANDLED;
	}

971
	/*
972 973 974 975 976
	 * Shutdown immediately if controller times out while starting. The
	 * reset work will see the pci device disabled when it gets the forced
	 * cancellation error. All outstanding requests are completed on
	 * shutdown, so we return BLK_EH_HANDLED.
	 */
977
	if (dev->ctrl.state == NVME_CTRL_RESETTING) {
978
		dev_warn(dev->ctrl.device,
979 980
			 "I/O %d QID %d timeout, disable controller\n",
			 req->tag, nvmeq->qid);
981
		nvme_dev_disable(dev, false);
982
		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
983
		return BLK_EH_HANDLED;
984 985
	}

986 987 988 989
	/*
 	 * Shutdown the controller immediately and schedule a reset if the
 	 * command was already aborted once before and still hasn't been
 	 * returned to the driver, or if this is the admin queue.
990
	 */
991
	if (!nvmeq->qid || iod->aborted) {
992
		dev_warn(dev->ctrl.device,
993 994
			 "I/O %d QID %d timeout, reset controller\n",
			 req->tag, nvmeq->qid);
995
		nvme_dev_disable(dev, false);
996
		nvme_reset(dev);
997

998 999 1000 1001
		/*
		 * Mark the request as handled, since the inline shutdown
		 * forces all outstanding requests to complete.
		 */
1002
		nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1003
		return BLK_EH_HANDLED;
1004 1005
	}

1006
	if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) {
1007
		atomic_inc(&dev->ctrl.abort_limit);
1008
		return BLK_EH_RESET_TIMER;
1009
	}
1010
	iod->aborted = 1;
1011

1012 1013
	memset(&cmd, 0, sizeof(cmd));
	cmd.abort.opcode = nvme_admin_abort_cmd;
1014
	cmd.abort.cid = req->tag;
1015 1016
	cmd.abort.sqid = cpu_to_le16(nvmeq->qid);

1017 1018 1019
	dev_warn(nvmeq->dev->ctrl.device,
		"I/O %d QID %d timeout, aborting\n",
		 req->tag, nvmeq->qid);
1020 1021

	abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
1022
			BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
1023 1024 1025 1026 1027 1028 1029 1030
	if (IS_ERR(abort_req)) {
		atomic_inc(&dev->ctrl.abort_limit);
		return BLK_EH_RESET_TIMER;
	}

	abort_req->timeout = ADMIN_TIMEOUT;
	abort_req->end_io_data = NULL;
	blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
1031

1032 1033 1034 1035 1036 1037
	/*
	 * The aborted req will be completed on receiving the abort req.
	 * We enable the timer again. If hit twice, it'll cause a device reset,
	 * as the device then is in a faulty state.
	 */
	return BLK_EH_RESET_TIMER;
1038 1039
}

1040 1041
static void nvme_free_queue(struct nvme_queue *nvmeq)
{
1042 1043
	dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
				(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1044 1045
	if (nvmeq->sq_cmds)
		dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
1046 1047 1048 1049
					nvmeq->sq_cmds, nvmeq->sq_dma_addr);
	kfree(nvmeq);
}

1050
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1051 1052 1053
{
	int i;

1054
	for (i = dev->queue_count - 1; i >= lowest; i--) {
1055
		struct nvme_queue *nvmeq = dev->queues[i];
1056
		dev->queue_count--;
1057
		dev->queues[i] = NULL;
1058
		nvme_free_queue(nvmeq);
1059
	}
1060 1061
}

1062 1063 1064 1065 1066
/**
 * nvme_suspend_queue - put queue into suspended state
 * @nvmeq - queue to suspend
 */
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
Matthew Wilcox's avatar
Matthew Wilcox committed
1067
{
Keith Busch's avatar
Keith Busch committed
1068
	int vector;
Matthew Wilcox's avatar
Matthew Wilcox committed
1069

1070
	spin_lock_irq(&nvmeq->q_lock);
Keith Busch's avatar
Keith Busch committed
1071 1072 1073 1074
	if (nvmeq->cq_vector == -1) {
		spin_unlock_irq(&nvmeq->q_lock);
		return 1;
	}
1075
	vector = nvmeq->cq_vector;
Keith Busch's avatar
Keith Busch committed
1076
	nvmeq->dev->online_queues--;
Keith Busch's avatar
Keith Busch committed
1077
	nvmeq->cq_vector = -1;
1078 1079
	spin_unlock_irq(&nvmeq->q_lock);

1080
	if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1081
		blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
1082

1083
	pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
Matthew Wilcox's avatar
Matthew Wilcox committed
1084

1085 1086
	return 0;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
1087

1088
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
1089
{
1090
	struct nvme_queue *nvmeq = dev->queues[0];
1091 1092 1093 1094 1095 1096

	if (!nvmeq)
		return;
	if (nvme_suspend_queue(nvmeq))
		return;

1097 1098 1099 1100 1101
	if (shutdown)
		nvme_shutdown_ctrl(&dev->ctrl);
	else
		nvme_disable_ctrl(&dev->ctrl, lo_hi_readq(
						dev->bar + NVME_REG_CAP));
1102 1103 1104 1105

	spin_lock_irq(&nvmeq->q_lock);
	nvme_process_cq(nvmeq);
	spin_unlock_irq(&nvmeq->q_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
1106 1107
}

1108 1109 1110 1111
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
				int entry_size)
{
	int q_depth = dev->q_depth;
1112 1113
	unsigned q_size_aligned = roundup(q_depth * entry_size,
					  dev->ctrl.page_size);
1114 1115

	if (q_size_aligned * nr_io_queues > dev->cmb_size) {
1116
		u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues);
1117
		mem_per_q = round_down(mem_per_q, dev->ctrl.page_size);
1118
		q_depth = div_u64(mem_per_q, entry_size);
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135

		/*
		 * Ensure the reduced q_depth is above some threshold where it
		 * would be better to map queues in system memory with the
		 * original depth
		 */
		if (q_depth < 64)
			return -ENOMEM;
	}

	return q_depth;
}

static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
				int qid, int depth)
{
	if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
1136 1137
		unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
						      dev->ctrl.page_size);
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
		nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
		nvmeq->sq_cmds_io = dev->cmb + offset;
	} else {
		nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
					&nvmeq->sq_dma_addr, GFP_KERNEL);
		if (!nvmeq->sq_cmds)
			return -ENOMEM;
	}

	return 0;
}

Matthew Wilcox's avatar
Matthew Wilcox committed
1150
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1151
							int depth, int node)
Matthew Wilcox's avatar
Matthew Wilcox committed
1152
{
1153 1154
	struct nvme_queue *nvmeq = kzalloc_node(sizeof(*nvmeq), GFP_KERNEL,
							node);
Matthew Wilcox's avatar
Matthew Wilcox committed
1155 1156 1157
	if (!nvmeq)
		return NULL;

1158
	nvmeq->cqes = dma_zalloc_coherent(dev->dev, CQ_SIZE(depth),
1159
					  &nvmeq->cq_dma_addr, GFP_KERNEL);
Matthew Wilcox's avatar
Matthew Wilcox committed
1160 1161 1162
	if (!nvmeq->cqes)
		goto free_nvmeq;

1163
	if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth))
Matthew Wilcox's avatar
Matthew Wilcox committed
1164 1165
		goto free_cqdma;

1166
	nvmeq->q_dmadev = dev->dev;
1167
	nvmeq->dev = dev;
Matthew Wilcox's avatar
Matthew Wilcox committed
1168 1169
	spin_lock_init(&nvmeq->q_lock);
	nvmeq->cq_head = 0;
1170
	nvmeq->cq_phase = 1;
1171
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
Matthew Wilcox's avatar
Matthew Wilcox committed
1172
	nvmeq->q_depth = depth;
1173
	nvmeq->qid = qid;
1174
	nvmeq->cq_vector = -1;
1175
	dev->queues[qid] = nvmeq;
1176 1177
	dev->queue_count++;

Matthew Wilcox's avatar
Matthew Wilcox committed
1178 1179 1180
	return nvmeq;

 free_cqdma:
1181
	dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes,
Matthew Wilcox's avatar
Matthew Wilcox committed
1182 1183 1184 1185 1186 1187
							nvmeq->cq_dma_addr);
 free_nvmeq:
	kfree(nvmeq);
	return NULL;
}

1188
static int queue_request_irq(struct nvme_queue *nvmeq)
1189
{
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
	struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
	int nr = nvmeq->dev->ctrl.instance;

	if (use_threaded_interrupts) {
		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
				nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
	} else {
		return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
				NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
	}
1200 1201
}

1202
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
Matthew Wilcox's avatar
Matthew Wilcox committed
1203
{
1204
	struct nvme_dev *dev = nvmeq->dev;
Matthew Wilcox's avatar
Matthew Wilcox committed
1205

1206
	spin_lock_irq(&nvmeq->q_lock);
1207 1208 1209
	nvmeq->sq_tail = 0;
	nvmeq->cq_head = 0;
	nvmeq->cq_phase = 1;
1210
	nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1211
	memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
1212
	nvme_dbbuf_init(dev, nvmeq, qid);
Keith Busch's avatar
Keith Busch committed
1213
	dev->online_queues++;
Keith Busch's avatar