blk-exec.c 2.9 KB
Newer Older
1 2 3 4 5 6 7
/*
 * Functions related to setting various queue properties from drivers
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
8
#include <linux/blk-mq.h>
9
#include <linux/sched/sysctl.h>
10 11

#include "blk.h"
12
#include "blk-mq-sched.h"
13 14 15 16

/**
 * blk_end_sync_rq - executes a completion event on a request
 * @rq: request to complete
17
 * @error: end I/O status of the request
18
 */
19
static void blk_end_sync_rq(struct request *rq, blk_status_t error)
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
{
	struct completion *waiting = rq->end_io_data;

	rq->end_io_data = NULL;

	/*
	 * complete last, if this is a stack request the process (and thus
	 * the rq pointer) could be invalid right after this complete()
	 */
	complete(waiting);
}

/**
 * blk_execute_rq_nowait - insert a request into queue for execution
 * @q:		queue to insert the request in
 * @bd_disk:	matching gendisk
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 * @done:	I/O completion handler
 *
 * Description:
41
 *    Insert a fully prepared request at the back of the I/O scheduler queue
42
 *    for execution.  Don't wait for completion.
43 44 45
 *
 * Note:
 *    This function will invoke @done directly if the queue is dead.
46 47 48 49 50 51 52
 */
void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
			   struct request *rq, int at_head,
			   rq_end_io_fn *done)
{
	int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;

53
	WARN_ON(irqs_disabled());
54
	WARN_ON(!blk_rq_is_passthrough(rq));
55 56 57

	rq->rq_disk = bd_disk;
	rq->end_io = done;
58

59 60
	/*
	 * don't check dying flag for MQ because the request won't
61
	 * be reused after dying flag is set
62
	 */
63
	if (q->mq_ops) {
64
		blk_mq_sched_insert_request(rq, at_head, true, false);
65 66 67
		return;
	}

68 69
	spin_lock_irq(q->queue_lock);

70
	if (unlikely(blk_queue_dying(q))) {
71
		rq->rq_flags |= RQF_QUIET;
72
		__blk_end_request_all(rq, BLK_STS_IOERR);
73
		spin_unlock_irq(q->queue_lock);
74 75 76
		return;
	}

77
	__elv_add_request(q, rq, where);
78
	__blk_run_queue(q);
79 80 81 82 83 84 85 86 87 88 89 90
	spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);

/**
 * blk_execute_rq - insert a request into queue for execution
 * @q:		queue to insert the request in
 * @bd_disk:	matching gendisk
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 *
 * Description:
91
 *    Insert a fully prepared request at the back of the I/O scheduler queue
92 93
 *    for execution and wait for completion.
 */
94
void blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
95 96 97
		   struct request *rq, int at_head)
{
	DECLARE_COMPLETION_ONSTACK(wait);
98
	unsigned long hang_check;
99 100 101

	rq->end_io_data = &wait;
	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
102 103 104 105

	/* Prevent hang_check timer from firing at us during very long I/O */
	hang_check = sysctl_hung_task_timeout_secs;
	if (hang_check)
106
		while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
107
	else
108
		wait_for_completion_io(&wait);
109 110
}
EXPORT_SYMBOL(blk_execute_rq);