Commit 4e4cbee9 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: switch bios to blk_status_t

Replace bi_error with a new bi_status to allow for a clear conversion.
Note that device mapper overloaded bi_error with a private value, which
we'll have to keep arround at least for now and thus propagate to a
proper blk_status_t value.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent fc17b653
...@@ -221,7 +221,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi, ...@@ -221,7 +221,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
* @bio: bio to generate/verify integrity metadata for * @bio: bio to generate/verify integrity metadata for
* @proc_fn: Pointer to the relevant processing function * @proc_fn: Pointer to the relevant processing function
*/ */
static int bio_integrity_process(struct bio *bio, static blk_status_t bio_integrity_process(struct bio *bio,
integrity_processing_fn *proc_fn) integrity_processing_fn *proc_fn)
{ {
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
...@@ -229,7 +229,7 @@ static int bio_integrity_process(struct bio *bio, ...@@ -229,7 +229,7 @@ static int bio_integrity_process(struct bio *bio,
struct bvec_iter bviter; struct bvec_iter bviter;
struct bio_vec bv; struct bio_vec bv;
struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip = bio_integrity(bio);
unsigned int ret = 0; blk_status_t ret = BLK_STS_OK;
void *prot_buf = page_address(bip->bip_vec->bv_page) + void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset; bip->bip_vec->bv_offset;
...@@ -366,7 +366,7 @@ static void bio_integrity_verify_fn(struct work_struct *work) ...@@ -366,7 +366,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio *bio = bip->bip_bio; struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn); bio->bi_status = bio_integrity_process(bio, bi->profile->verify_fn);
/* Restore original bio completion handler */ /* Restore original bio completion handler */
bio->bi_end_io = bip->bip_end_io; bio->bi_end_io = bip->bip_end_io;
...@@ -395,7 +395,7 @@ void bio_integrity_endio(struct bio *bio) ...@@ -395,7 +395,7 @@ void bio_integrity_endio(struct bio *bio)
* integrity metadata. Restore original bio end_io handler * integrity metadata. Restore original bio end_io handler
* and run it. * and run it.
*/ */
if (bio->bi_error) { if (bio->bi_status) {
bio->bi_end_io = bip->bip_end_io; bio->bi_end_io = bip->bip_end_io;
bio_endio(bio); bio_endio(bio);
......
...@@ -309,8 +309,8 @@ static struct bio *__bio_chain_endio(struct bio *bio) ...@@ -309,8 +309,8 @@ static struct bio *__bio_chain_endio(struct bio *bio)
{ {
struct bio *parent = bio->bi_private; struct bio *parent = bio->bi_private;
if (!parent->bi_error) if (!parent->bi_status)
parent->bi_error = bio->bi_error; parent->bi_status = bio->bi_status;
bio_put(bio); bio_put(bio);
return parent; return parent;
} }
...@@ -918,7 +918,7 @@ static void submit_bio_wait_endio(struct bio *bio) ...@@ -918,7 +918,7 @@ static void submit_bio_wait_endio(struct bio *bio)
{ {
struct submit_bio_ret *ret = bio->bi_private; struct submit_bio_ret *ret = bio->bi_private;
ret->error = bio->bi_error; ret->error = blk_status_to_errno(bio->bi_status);
complete(&ret->event); complete(&ret->event);
} }
...@@ -1818,7 +1818,7 @@ void bio_endio(struct bio *bio) ...@@ -1818,7 +1818,7 @@ void bio_endio(struct bio *bio)
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), trace_block_bio_complete(bdev_get_queue(bio->bi_bdev),
bio, bio->bi_error); bio, bio->bi_status);
bio_clear_flag(bio, BIO_TRACE_COMPLETION); bio_clear_flag(bio, BIO_TRACE_COMPLETION);
} }
......
...@@ -144,6 +144,9 @@ static const struct { ...@@ -144,6 +144,9 @@ static const struct {
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
/* device mapper special case, should not leak out: */
[BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
/* everything else not covered above: */ /* everything else not covered above: */
[BLK_STS_IOERR] = { -EIO, "I/O" }, [BLK_STS_IOERR] = { -EIO, "I/O" },
}; };
...@@ -188,7 +191,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, ...@@ -188,7 +191,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, blk_status_t error) unsigned int nbytes, blk_status_t error)
{ {
if (error) if (error)
bio->bi_error = blk_status_to_errno(error); bio->bi_status = error;
if (unlikely(rq->rq_flags & RQF_QUIET)) if (unlikely(rq->rq_flags & RQF_QUIET))
bio_set_flag(bio, BIO_QUIET); bio_set_flag(bio, BIO_QUIET);
...@@ -1717,7 +1720,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1717,7 +1720,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio, q->bio_split); blk_queue_split(q, &bio, q->bio_split);
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }
...@@ -1775,7 +1778,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1775,7 +1778,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
req = get_request(q, bio->bi_opf, bio, GFP_NOIO); req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
if (IS_ERR(req)) { if (IS_ERR(req)) {
__wbt_done(q->rq_wb, wb_acct); __wbt_done(q->rq_wb, wb_acct);
bio->bi_error = PTR_ERR(req); if (PTR_ERR(req) == -ENOMEM)
bio->bi_status = BLK_STS_RESOURCE;
else
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
goto out_unlock; goto out_unlock;
} }
...@@ -1930,7 +1936,7 @@ generic_make_request_checks(struct bio *bio) ...@@ -1930,7 +1936,7 @@ generic_make_request_checks(struct bio *bio)
{ {
struct request_queue *q; struct request_queue *q;
int nr_sectors = bio_sectors(bio); int nr_sectors = bio_sectors(bio);
int err = -EIO; blk_status_t status = BLK_STS_IOERR;
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
struct hd_struct *part; struct hd_struct *part;
...@@ -1973,7 +1979,7 @@ generic_make_request_checks(struct bio *bio) ...@@ -1973,7 +1979,7 @@ generic_make_request_checks(struct bio *bio)
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!nr_sectors) { if (!nr_sectors) {
err = 0; status = BLK_STS_OK;
goto end_io; goto end_io;
} }
} }
...@@ -2025,9 +2031,9 @@ generic_make_request_checks(struct bio *bio) ...@@ -2025,9 +2031,9 @@ generic_make_request_checks(struct bio *bio)
return true; return true;
not_supported: not_supported:
err = -EOPNOTSUPP; status = BLK_STS_NOTSUPP;
end_io: end_io:
bio->bi_error = err; bio->bi_status = status;
bio_endio(bio); bio_endio(bio);
return false; return false;
} }
......
...@@ -384,9 +384,9 @@ static struct kobj_type integrity_ktype = { ...@@ -384,9 +384,9 @@ static struct kobj_type integrity_ktype = {
.sysfs_ops = &integrity_ops, .sysfs_ops = &integrity_ops,
}; };
static int blk_integrity_nop_fn(struct blk_integrity_iter *iter) static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
{ {
return 0; return BLK_STS_OK;
} }
static const struct blk_integrity_profile nop_profile = { static const struct blk_integrity_profile nop_profile = {
......
...@@ -143,7 +143,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool) ...@@ -143,7 +143,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
mempool_free(bvec->bv_page, pool); mempool_free(bvec->bv_page, pool);
} }
bio_orig->bi_error = bio->bi_error; bio_orig->bi_status = bio->bi_status;
bio_endio(bio_orig); bio_endio(bio_orig);
bio_put(bio); bio_put(bio);
} }
...@@ -163,7 +163,7 @@ static void __bounce_end_io_read(struct bio *bio, mempool_t *pool) ...@@ -163,7 +163,7 @@ static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
{ {
struct bio *bio_orig = bio->bi_private; struct bio *bio_orig = bio->bi_private;
if (!bio->bi_error) if (!bio->bi_status)
copy_to_high_bio_irq(bio_orig, bio); copy_to_high_bio_irq(bio_orig, bio);
bounce_end_io(bio, pool); bounce_end_io(bio, pool);
......
...@@ -46,8 +46,8 @@ static __be16 t10_pi_ip_fn(void *data, unsigned int len) ...@@ -46,8 +46,8 @@ static __be16 t10_pi_ip_fn(void *data, unsigned int len)
* 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
* tag. * tag.
*/ */
static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn, static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
unsigned int type) csum_fn *fn, unsigned int type)
{ {
unsigned int i; unsigned int i;
...@@ -67,11 +67,11 @@ static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn, ...@@ -67,11 +67,11 @@ static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn,
iter->seed++; iter->seed++;
} }
return 0; return BLK_STS_OK;
} }
static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn, static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
unsigned int type) csum_fn *fn, unsigned int type)
{ {
unsigned int i; unsigned int i;
...@@ -108,7 +108,7 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn, ...@@ -108,7 +108,7 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
"(rcvd %04x, want %04x)\n", iter->disk_name, "(rcvd %04x, want %04x)\n", iter->disk_name,
(unsigned long long)iter->seed, (unsigned long long)iter->seed,
be16_to_cpu(pi->guard_tag), be16_to_cpu(csum)); be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
return -EILSEQ; return BLK_STS_PROTECTION;
} }
next: next:
...@@ -117,45 +117,45 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn, ...@@ -117,45 +117,45 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
iter->seed++; iter->seed++;
} }
return 0; return BLK_STS_OK;
} }
static int t10_pi_type1_generate_crc(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
{ {
return t10_pi_generate(iter, t10_pi_crc_fn, 1); return t10_pi_generate(iter, t10_pi_crc_fn, 1);
} }
static int t10_pi_type1_generate_ip(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
{ {
return t10_pi_generate(iter, t10_pi_ip_fn, 1); return t10_pi_generate(iter, t10_pi_ip_fn, 1);
} }
static int t10_pi_type1_verify_crc(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
{ {
return t10_pi_verify(iter, t10_pi_crc_fn, 1); return t10_pi_verify(iter, t10_pi_crc_fn, 1);
} }
static int t10_pi_type1_verify_ip(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
{ {
return t10_pi_verify(iter, t10_pi_ip_fn, 1); return t10_pi_verify(iter, t10_pi_ip_fn, 1);
} }
static int t10_pi_type3_generate_crc(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
{ {
return t10_pi_generate(iter, t10_pi_crc_fn, 3); return t10_pi_generate(iter, t10_pi_crc_fn, 3);
} }
static int t10_pi_type3_generate_ip(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
{ {
return t10_pi_generate(iter, t10_pi_ip_fn, 3); return t10_pi_generate(iter, t10_pi_ip_fn, 3);
} }
static int t10_pi_type3_verify_crc(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
{ {
return t10_pi_verify(iter, t10_pi_crc_fn, 3); return t10_pi_verify(iter, t10_pi_crc_fn, 3);
} }
static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter) static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
{ {
return t10_pi_verify(iter, t10_pi_ip_fn, 3); return t10_pi_verify(iter, t10_pi_ip_fn, 3);
} }
......
...@@ -1070,7 +1070,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) ...@@ -1070,7 +1070,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
d->ip.rq = NULL; d->ip.rq = NULL;
do { do {
bio = rq->bio; bio = rq->bio;
bok = !fastfail && !bio->bi_error; bok = !fastfail && !bio->bi_status;
} while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size)); } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
/* cf. http://lkml.org/lkml/2006/10/31/28 */ /* cf. http://lkml.org/lkml/2006/10/31/28 */
...@@ -1131,7 +1131,7 @@ ktiocomplete(struct frame *f) ...@@ -1131,7 +1131,7 @@ ktiocomplete(struct frame *f)
ahout->cmdstat, ahin->cmdstat, ahout->cmdstat, ahin->cmdstat,
d->aoemajor, d->aoeminor); d->aoemajor, d->aoeminor);
noskb: if (buf) noskb: if (buf)
buf->bio->bi_error = -EIO; buf->bio->bi_status = BLK_STS_IOERR;
goto out; goto out;
} }
...@@ -1144,7 +1144,7 @@ noskb: if (buf) ...@@ -1144,7 +1144,7 @@ noskb: if (buf)
"aoe: runt data size in read from", "aoe: runt data size in read from",
(long) d->aoemajor, d->aoeminor, (long) d->aoemajor, d->aoeminor,
skb->len, n); skb->len, n);
buf->bio->bi_error = -EIO; buf->bio->bi_status = BLK_STS_IOERR;
break; break;
} }
if (n > f->iter.bi_size) { if (n > f->iter.bi_size) {
...@@ -1152,7 +1152,7 @@ noskb: if (buf) ...@@ -1152,7 +1152,7 @@ noskb: if (buf)
"aoe: too-large data size in read from", "aoe: too-large data size in read from",
(long) d->aoemajor, d->aoeminor, (long) d->aoemajor, d->aoeminor,
n, f->iter.bi_size); n, f->iter.bi_size);
buf->bio->bi_error = -EIO; buf->bio->bi_status = BLK_STS_IOERR;
break; break;
} }
bvcpy(skb, f->buf->bio, f->iter, n); bvcpy(skb, f->buf->bio, f->iter, n);
...@@ -1654,7 +1654,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf) ...@@ -1654,7 +1654,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
if (buf == NULL) if (buf == NULL)
return; return;
buf->iter.bi_size = 0; buf->iter.bi_size = 0;
buf->bio->bi_error = -EIO; buf->bio->bi_status = BLK_STS_IOERR;
if (buf->nframesout == 0) if (buf->nframesout == 0)
aoe_end_buf(d, buf); aoe_end_buf(d, buf);
} }
......
...@@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d) ...@@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d)
if (rq == NULL) if (rq == NULL)
return; return;
while ((bio = d->ip.nxbio)) { while ((bio = d->ip.nxbio)) {
bio->bi_error = -EIO; bio->bi_status = BLK_STS_IOERR;
d->ip.nxbio = bio->bi_next; d->ip.nxbio = bio->bi_next;
n = (unsigned long) rq->special; n = (unsigned long) rq->special;
rq->special = (void *) --n; rq->special = (void *) --n;
......
...@@ -178,7 +178,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device, ...@@ -178,7 +178,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
else else
submit_bio(bio); submit_bio(bio);
wait_until_done_or_force_detached(device, bdev, &device->md_io.done); wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
if (!bio->bi_error) if (!bio->bi_status)
err = device->md_io.error; err = device->md_io.error;
out: out:
......
...@@ -959,16 +959,16 @@ static void drbd_bm_endio(struct bio *bio) ...@@ -959,16 +959,16 @@ static void drbd_bm_endio(struct bio *bio)
!bm_test_page_unchanged(b->bm_pages[idx])) !bm_test_page_unchanged(b->bm_pages[idx]))
drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx); drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
if (bio->bi_error) { if (bio->bi_status) {
/* ctx error will hold the completed-last non-zero error code, /* ctx error will hold the completed-last non-zero error code,
* in case error codes differ. */ * in case error codes differ. */
ctx->error = bio->bi_error; ctx->error = blk_status_to_errno(bio->bi_status);
bm_set_page_io_err(b->bm_pages[idx]); bm_set_page_io_err(b->bm_pages[idx]);
/* Not identical to on disk version of it. /* Not identical to on disk version of it.
* Is BM_PAGE_IO_ERROR enough? */ * Is BM_PAGE_IO_ERROR enough? */
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
drbd_err(device, "IO ERROR %d on bitmap page idx %u\n", drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
bio->bi_error, idx); bio->bi_status, idx);
} else { } else {
bm_clear_page_io_err(b->bm_pages[idx]); bm_clear_page_io_err(b->bm_pages[idx]);
dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx); dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
......
...@@ -1627,7 +1627,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device, ...@@ -1627,7 +1627,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
__release(local); __release(local);
if (!bio->bi_bdev) { if (!bio->bi_bdev) {
drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
bio->bi_error = -ENODEV; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
return; return;
} }
......
...@@ -1229,9 +1229,9 @@ void one_flush_endio(struct bio *bio) ...@@ -1229,9 +1229,9 @@ void one_flush_endio(struct bio *bio)
struct drbd_device *device = octx->device; struct drbd_device *device = octx->device;
struct issue_flush_context *ctx = octx->ctx; struct issue_flush_context *ctx = octx->ctx;
if (bio->bi_error) { if (bio->bi_status) {
ctx->error = bio->bi_error; ctx->error = blk_status_to_errno(bio->bi_status);
drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error); drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
} }
kfree(octx); kfree(octx);
bio_put(bio); bio_put(bio);
......
...@@ -203,7 +203,7 @@ void start_new_tl_epoch(struct drbd_connection *connection) ...@@ -203,7 +203,7 @@ void start_new_tl_epoch(struct drbd_connection *connection)
void complete_master_bio(struct drbd_device *device, void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m) struct bio_and_error *m)
{ {
m->bio->bi_error = m->error; m->bio->bi_status = errno_to_blk_status(m->error);
bio_endio(m->bio); bio_endio(m->bio);
dec_ap_bio(device); dec_ap_bio(device);
} }
...@@ -1157,7 +1157,7 @@ static void drbd_process_discard_req(struct drbd_request *req) ...@@ -1157,7 +1157,7 @@ static void drbd_process_discard_req(struct drbd_request *req)
if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9, if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9,
GFP_NOIO, 0)) GFP_NOIO, 0))
req->private_bio->bi_error = -EIO; req->private_bio->bi_status = BLK_STS_IOERR;
bio_endio(req->private_bio); bio_endio(req->private_bio);
} }
...@@ -1225,7 +1225,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long ...@@ -1225,7 +1225,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
/* only pass the error to the upper layers. /* only pass the error to the upper layers.
* if user cannot handle io errors, that's not our business. */ * if user cannot handle io errors, that's not our business. */
drbd_err(device, "could not kmalloc() req\n"); drbd_err(device, "could not kmalloc() req\n");
bio->bi_error = -ENOMEM; bio->bi_status = BLK_STS_RESOURCE;
bio_endio(bio); bio_endio(bio);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
......
...@@ -63,7 +63,7 @@ void drbd_md_endio(struct bio *bio) ...@@ -63,7 +63,7 @@ void drbd_md_endio(struct bio *bio)
struct drbd_device *device; struct drbd_device *device;
device = bio->bi_private; device = bio->bi_private;
device->md_io.error = bio->bi_error; device->md_io.error = blk_status_to_errno(bio->bi_status);
/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
* to timeout on the lower level device, and eventually detach from it. * to timeout on the lower level device, and eventually detach from it.
...@@ -177,13 +177,13 @@ void drbd_peer_request_endio(struct bio *bio) ...@@ -177,13 +177,13 @@ void drbd_peer_request_endio(struct bio *bio)
bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES || bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
bio_op(bio) == REQ_OP_DISCARD; bio_op(bio) == REQ_OP_DISCARD;
if (bio->bi_error && __ratelimit(&drbd_ratelimit_state)) if (bio->bi_status && __ratelimit(&drbd_ratelimit_state))
drbd_warn(device, "%s: error=%d s=%llus\n", drbd_warn(device, "%s: error=%d s=%llus\n",
is_write ? (is_discard ? "discard" : "write") is_write ? (is_discard ? "discard" : "write")
: "read", bio->bi_error, : "read", bio->bi_status,
(unsigned long long)peer_req->i.sector); (unsigned long long)peer_req->i.sector);
if (bio->bi_error) if (bio->bi_status)
set_bit(__EE_WAS_ERROR, &peer_req->flags); set_bit(__EE_WAS_ERROR, &peer_req->flags);
bio_put(bio); /* no need for the bio anymore */ bio_put(bio); /* no need for the bio anymore */
...@@ -243,16 +243,16 @@ void drbd_request_endio(struct bio *bio) ...@@ -243,16 +243,16 @@ void drbd_request_endio(struct bio *bio)
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
if (!bio->bi_error) if (!bio->bi_status)
drbd_panic_after_delayed_completion_of_aborted_request(device); drbd_panic_after_delayed_completion_of_aborted_request(device);
} }
/* to avoid recursion in __req_mod */ /* to avoid recursion in __req_mod */
if (unlikely(bio->bi_error)) { if (unlikely(bio->bi_status)) {
switch (bio_op(bio)) { switch (bio_op(bio)) {
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
case REQ_OP_DISCARD: case REQ_OP_DISCARD:
if (bio->bi_error == -EOPNOTSUPP) if (bio->bi_status == BLK_STS_NOTSUPP)
what = DISCARD_COMPLETED_NOTSUPP; what = DISCARD_COMPLETED_NOTSUPP;
else else
what = DISCARD_COMPLETED_WITH_ERROR; what = DISCARD_COMPLETED_WITH_ERROR;
...@@ -272,7 +272,7 @@ void drbd_request_endio(struct bio *bio) ...@@ -272,7 +272,7 @@ void drbd_request_endio(struct bio *bio)
} }
bio_put(req->private_bio); bio_put(req->private_bio);
req->private_bio = ERR_PTR(bio->bi_error); req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
/* not req_mod(), we need irqsave here! */ /* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&device->resource->req_lock, flags); spin_lock_irqsave(&device->resource->req_lock, flags);
......
...@@ -3780,9 +3780,9 @@ static void floppy_rb0_cb(struct bio *bio) ...@@ -3780,9 +3780,9 @@ static void floppy_rb0_cb(struct bio *bio)
struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private; struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
int drive = cbdata->drive; int drive = cbdata->drive;
if (bio->bi_error) { if (bio->bi_status) {
pr_info("floppy: error %d while reading block 0\n", pr_info("floppy: error %d while reading block 0\n",
bio->bi_error); bio->bi_status);
set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
} }
complete(&cbdata->complete); complete(&cbdata->complete);
......
...@@ -952,9 +952,9 @@ static void pkt_end_io_read(struct bio *bio) ...@@ -952,9 +952,9 @@ static void pkt_end_io_read(struct bio *bio)
pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
bio, (unsigned long long)pkt->sector, bio, (unsigned long long)pkt->sector,
(unsigned long long)bio->bi_iter.bi_sector, bio->bi_error); (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);