blk-core.c 48.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Linus Torvalds's avatar
Linus Torvalds committed
2
3
4
5
6
/*
 * Copyright (C) 1991, 1992 Linus Torvalds
 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
7
8
 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
 *	-  July2000
Linus Torvalds's avatar
Linus Torvalds committed
9
10
11
12
13
14
15
16
17
18
19
 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
 */

/*
 * This handles all read/write requests to block devices
 */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
20
#include <linux/blk-mq.h>
Linus Torvalds's avatar
Linus Torvalds committed
21
22
23
24
25
26
27
28
29
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/writeback.h>
30
#include <linux/task_io_accounting_ops.h>
31
#include <linux/fault-inject.h>
32
#include <linux/list_sort.h>
Tejun Heo's avatar
Tejun Heo committed
33
#include <linux/delay.h>
34
#include <linux/ratelimit.h>
Lin Ming's avatar
Lin Ming committed
35
#include <linux/pm_runtime.h>
36
#include <linux/blk-cgroup.h>
37
#include <linux/t10-pi.h>
38
#include <linux/debugfs.h>
39
#include <linux/bpf.h>
40
#include <linux/psi.h>
41
42
43

#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
Linus Torvalds's avatar
Linus Torvalds committed
44

45
#include "blk.h"
46
#include "blk-mq.h"
47
#include "blk-mq-sched.h"
48
#include "blk-pm.h"
49
#include "blk-rq-qos.h"
50

51
52
53
54
#ifdef CONFIG_DEBUG_FS
struct dentry *blk_debugfs_root;
#endif

55
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
56
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
57
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
Keith Busch's avatar
Keith Busch committed
58
EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
59
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
60

61
62
DEFINE_IDA(blk_queue_ida);

Linus Torvalds's avatar
Linus Torvalds committed
63
64
65
/*
 * For queue allocation
 */
66
struct kmem_cache *blk_requestq_cachep;
Linus Torvalds's avatar
Linus Torvalds committed
67
68
69
70

/*
 * Controlling structure to kblockd
 */
71
static struct workqueue_struct *kblockd_workqueue;
Linus Torvalds's avatar
Linus Torvalds committed
72

73
74
75
76
77
78
79
/**
 * blk_queue_flag_set - atomically set a queue flag
 * @flag: flag to be set
 * @q: request queue
 */
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
80
	set_bit(flag, &q->queue_flags);
81
82
83
84
85
86
87
88
89
90
}
EXPORT_SYMBOL(blk_queue_flag_set);

/**
 * blk_queue_flag_clear - atomically clear a queue flag
 * @flag: flag to be cleared
 * @q: request queue
 */
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
91
	clear_bit(flag, &q->queue_flags);
92
93
94
95
96
97
98
99
100
101
102
103
104
}
EXPORT_SYMBOL(blk_queue_flag_clear);

/**
 * blk_queue_flag_test_and_set - atomically test and set a queue flag
 * @flag: flag to be set
 * @q: request queue
 *
 * Returns the previous value of @flag - 0 if the flag was not set and 1 if
 * the flag was already set.
 */
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
105
	return test_and_set_bit(flag, &q->queue_flags);
106
107
108
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);

109
void blk_rq_init(struct request_queue *q, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
110
{
111
112
	memset(rq, 0, sizeof(*rq));

Linus Torvalds's avatar
Linus Torvalds committed
113
	INIT_LIST_HEAD(&rq->queuelist);
Jens Axboe's avatar
Jens Axboe committed
114
	rq->q = q;
115
	rq->__sector = (sector_t) -1;
116
117
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
Jens Axboe's avatar
Jens Axboe committed
118
	rq->tag = -1;
119
	rq->internal_tag = -1;
120
	rq->start_time_ns = ktime_get_ns();
121
	rq->part = NULL;
122
	refcount_set(&rq->ref, 1);
Linus Torvalds's avatar
Linus Torvalds committed
123
}
124
EXPORT_SYMBOL(blk_rq_init);
Linus Torvalds's avatar
Linus Torvalds committed
125

126
127
128
129
130
131
132
133
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
	REQ_OP_NAME(READ),
	REQ_OP_NAME(WRITE),
	REQ_OP_NAME(FLUSH),
	REQ_OP_NAME(DISCARD),
	REQ_OP_NAME(SECURE_ERASE),
	REQ_OP_NAME(ZONE_RESET),
134
	REQ_OP_NAME(ZONE_RESET_ALL),
135
136
137
	REQ_OP_NAME(ZONE_OPEN),
	REQ_OP_NAME(ZONE_CLOSE),
	REQ_OP_NAME(ZONE_FINISH),
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
	REQ_OP_NAME(WRITE_SAME),
	REQ_OP_NAME(WRITE_ZEROES),
	REQ_OP_NAME(SCSI_IN),
	REQ_OP_NAME(SCSI_OUT),
	REQ_OP_NAME(DRV_IN),
	REQ_OP_NAME(DRV_OUT),
};
#undef REQ_OP_NAME

/**
 * blk_op_str - Return string XXX in the REQ_OP_XXX.
 * @op: REQ_OP_XXX.
 *
 * Description: Centralize block layer function to convert REQ_OP_XXX into
 * string format. Useful in the debugging and tracing bio or request. For
 * invalid REQ_OP_XXX it returns string "UNKNOWN".
 */
inline const char *blk_op_str(unsigned int op)
{
	const char *op_str = "UNKNOWN";

	if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
		op_str = blk_op_name[op];

	return op_str;
}
EXPORT_SYMBOL_GPL(blk_op_str);

166
167
168
169
170
171
172
173
174
175
176
177
178
179
static const struct {
	int		errno;
	const char	*name;
} blk_errors[] = {
	[BLK_STS_OK]		= { 0,		"" },
	[BLK_STS_NOTSUPP]	= { -EOPNOTSUPP, "operation not supported" },
	[BLK_STS_TIMEOUT]	= { -ETIMEDOUT,	"timeout" },
	[BLK_STS_NOSPC]		= { -ENOSPC,	"critical space allocation" },
	[BLK_STS_TRANSPORT]	= { -ENOLINK,	"recoverable transport" },
	[BLK_STS_TARGET]	= { -EREMOTEIO,	"critical target" },
	[BLK_STS_NEXUS]		= { -EBADE,	"critical nexus" },
	[BLK_STS_MEDIUM]	= { -ENODATA,	"critical medium" },
	[BLK_STS_PROTECTION]	= { -EILSEQ,	"protection" },
	[BLK_STS_RESOURCE]	= { -ENOMEM,	"kernel resource" },
180
	[BLK_STS_DEV_RESOURCE]	= { -EBUSY,	"device resource" },
181
	[BLK_STS_AGAIN]		= { -EAGAIN,	"nonblocking retry" },
182

183
184
185
	/* device mapper special case, should not leak out: */
	[BLK_STS_DM_REQUEUE]	= { -EREMCHG, "dm internal retry" },

186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
	/* everything else not covered above: */
	[BLK_STS_IOERR]		= { -EIO,	"I/O" },
};

blk_status_t errno_to_blk_status(int errno)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
		if (blk_errors[i].errno == errno)
			return (__force blk_status_t)i;
	}

	return BLK_STS_IOERR;
}
EXPORT_SYMBOL_GPL(errno_to_blk_status);

int blk_status_to_errno(blk_status_t status)
{
	int idx = (__force int)status;

207
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
208
209
210
211
212
		return -EIO;
	return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);

213
214
static void print_req_error(struct request *req, blk_status_t status,
		const char *caller)
215
216
217
{
	int idx = (__force int)status;

218
	if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
219
220
		return;

221
	printk_ratelimited(KERN_ERR
222
223
		"%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
		"phys_seg %u prio class %u\n",
224
		caller, blk_errors[idx].name,
225
226
227
228
229
		req->rq_disk ? req->rq_disk->disk_name : "?",
		blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
		req->cmd_flags & ~REQ_OP_MASK,
		req->nr_phys_segments,
		IOPRIO_PRIO_CLASS(req->ioprio));
230
231
}

232
static void req_bio_endio(struct request *rq, struct bio *bio,
233
			  unsigned int nbytes, blk_status_t error)
Linus Torvalds's avatar
Linus Torvalds committed
234
{
235
	if (error)
236
		bio->bi_status = error;
237

238
	if (unlikely(rq->rq_flags & RQF_QUIET))
239
		bio_set_flag(bio, BIO_QUIET);
240

241
	bio_advance(bio, nbytes);
242

243
	/* don't actually finish bio if it's part of flush sequence */
244
	if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
245
		bio_endio(bio);
Linus Torvalds's avatar
Linus Torvalds committed
246
247
248
249
}

void blk_dump_rq_flags(struct request *rq, char *msg)
{
250
251
	printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
		rq->rq_disk ? rq->rq_disk->disk_name : "?",
252
		(unsigned long long) rq->cmd_flags);
Linus Torvalds's avatar
Linus Torvalds committed
253

254
255
256
	printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
	       (unsigned long long)blk_rq_pos(rq),
	       blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
257
258
	printk(KERN_INFO "  bio %p, biotail %p, len %u\n",
	       rq->bio, rq->biotail, blk_rq_bytes(rq));
Linus Torvalds's avatar
Linus Torvalds committed
259
260
261
262
263
264
265
266
267
268
269
270
}
EXPORT_SYMBOL(blk_dump_rq_flags);

/**
 * blk_sync_queue - cancel any pending callbacks on a queue
 * @q: the queue
 *
 * Description:
 *     The block layer may perform asynchronous callback activity
 *     on a queue, such as calling the unplug function after a timeout.
 *     A block device may call blk_sync_queue to ensure that any
 *     such activity is cancelled, thus allowing it to release resources
271
 *     that the callbacks might use. The caller must already have made sure
Linus Torvalds's avatar
Linus Torvalds committed
272
273
274
 *     that its ->make_request_fn will not re-add plugging prior to calling
 *     this function.
 *
275
 *     This function does not cancel any asynchronous activity arising
276
 *     out of elevator or throttling code. That would require elevator_exit()
277
 *     and blkcg_exit_queue() to be called with queue lock initialized.
278
 *
Linus Torvalds's avatar
Linus Torvalds committed
279
280
281
 */
void blk_sync_queue(struct request_queue *q)
{
282
	del_timer_sync(&q->timeout);
283
	cancel_work_sync(&q->timeout_work);
Linus Torvalds's avatar
Linus Torvalds committed
284
285
286
}
EXPORT_SYMBOL(blk_sync_queue);

287
/**
288
 * blk_set_pm_only - increment pm_only counter
289
290
 * @q: request queue pointer
 */
291
void blk_set_pm_only(struct request_queue *q)
292
{
293
	atomic_inc(&q->pm_only);
294
}
295
EXPORT_SYMBOL_GPL(blk_set_pm_only);
296

297
void blk_clear_pm_only(struct request_queue *q)
298
{
299
300
301
302
303
304
	int pm_only;

	pm_only = atomic_dec_return(&q->pm_only);
	WARN_ON_ONCE(pm_only < 0);
	if (pm_only == 0)
		wake_up_all(&q->mq_freeze_wq);
305
}
306
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
307

308
void blk_put_queue(struct request_queue *q)
309
310
311
{
	kobject_put(&q->kobj);
}
312
EXPORT_SYMBOL(blk_put_queue);
313

314
315
void blk_set_queue_dying(struct request_queue *q)
{
316
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
317

318
319
320
321
322
323
324
	/*
	 * When queue DYING flag is set, we need to block new req
	 * entering queue, so we call blk_freeze_queue_start() to
	 * prevent I/O from crossing blk_queue_enter().
	 */
	blk_freeze_queue_start(q);

Jens Axboe's avatar
Jens Axboe committed
325
	if (queue_is_mq(q))
326
		blk_mq_wake_waiters(q);
327
328
329

	/* Make blk_queue_enter() reexamine the DYING flag. */
	wake_up_all(&q->mq_freeze_wq);
330
331
332
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);

333
334
335
336
/**
 * blk_cleanup_queue - shutdown a request queue
 * @q: request queue to shutdown
 *
337
338
 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
 * put it.  All future requests will be failed immediately with -ENODEV.
339
 */
340
void blk_cleanup_queue(struct request_queue *q)
341
{
342
343
	WARN_ON_ONCE(blk_queue_registered(q));

344
	/* mark @q DYING, no new request or merges will be allowed afterwards */
345
	blk_set_queue_dying(q);
346

347
348
349
	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
	blk_queue_flag_set(QUEUE_FLAG_DYING, q);
350

351
352
	/*
	 * Drain all requests queued before DYING marking. Set DEAD flag to
353
354
	 * prevent that blk_mq_run_hw_queues() accesses the hardware queues
	 * after draining finished.
355
	 */
356
	blk_freeze_queue(q);
357
358
359

	rq_qos_exit(q);

360
	blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
361

362
363
364
	/* for synchronous bio-based driver finish in-flight integrity i/o */
	blk_flush_integrity();

365
	/* @q won't process any more request, flush async actions */
366
	del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
367
368
	blk_sync_queue(q);

Jens Axboe's avatar
Jens Axboe committed
369
	if (queue_is_mq(q))
370
		blk_mq_exit_queue(q);
Jens Axboe's avatar
Jens Axboe committed
371

372
373
374
375
376
377
378
379
380
381
382
383
384
	/*
	 * In theory, request pool of sched_tags belongs to request queue.
	 * However, the current implementation requires tag_set for freeing
	 * requests, so free the pool now.
	 *
	 * Queue has become frozen, there can't be any in-queue requests, so
	 * it is safe to free requests now.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator)
		blk_mq_sched_free_requests(q);
	mutex_unlock(&q->sysfs_lock);

385
	percpu_ref_exit(&q->q_usage_counter);
Bart Van Assche's avatar
Bart Van Assche committed
386

387
	/* @q is and will stay empty, shutdown and put */
388
389
	blk_put_queue(q);
}
Linus Torvalds's avatar
Linus Torvalds committed
390
391
EXPORT_SYMBOL(blk_cleanup_queue);

392
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
393
{
394
	return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
395
396
}
EXPORT_SYMBOL(blk_alloc_queue);
Linus Torvalds's avatar
Linus Torvalds committed
397

398
399
400
401
402
/**
 * blk_queue_enter() - try to increase q->q_usage_counter
 * @q: request queue pointer
 * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
 */
403
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
404
{
405
	const bool pm = flags & BLK_MQ_REQ_PREEMPT;
406

407
	while (true) {
408
		bool success = false;
409

410
		rcu_read_lock();
411
412
		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
			/*
413
414
415
			 * The code that increments the pm_only counter is
			 * responsible for ensuring that that counter is
			 * globally visible before the queue is unfrozen.
416
			 */
417
			if (pm || !blk_queue_pm_only(q)) {
418
419
420
421
422
				success = true;
			} else {
				percpu_ref_put(&q->q_usage_counter);
			}
		}
423
		rcu_read_unlock();
424
425

		if (success)
426
427
			return 0;

428
		if (flags & BLK_MQ_REQ_NOWAIT)
429
430
			return -EBUSY;

431
		/*
432
		 * read pair of barrier in blk_freeze_queue_start(),
433
		 * we need to order reading __PERCPU_REF_DEAD flag of
434
435
436
		 * .q_usage_counter and reading .mq_freeze_depth or
		 * queue dying flag, otherwise the following wait may
		 * never return if the two reads are reordered.
437
438
439
		 */
		smp_rmb();

440
		wait_event(q->mq_freeze_wq,
441
			   (!q->mq_freeze_depth &&
442
443
			    (pm || (blk_pm_request_resume(q),
				    !blk_queue_pm_only(q)))) ||
444
			   blk_queue_dying(q));
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
		if (blk_queue_dying(q))
			return -ENODEV;
	}
}

void blk_queue_exit(struct request_queue *q)
{
	percpu_ref_put(&q->q_usage_counter);
}

static void blk_queue_usage_counter_release(struct percpu_ref *ref)
{
	struct request_queue *q =
		container_of(ref, struct request_queue, q_usage_counter);

	wake_up_all(&q->mq_freeze_wq);
}

463
static void blk_rq_timed_out_timer(struct timer_list *t)
464
{
465
	struct request_queue *q = from_timer(q, t, timeout);
466
467
468
469

	kblockd_schedule_work(&q->timeout_work);
}

470
471
472
473
static void blk_timeout_work(struct work_struct *work)
{
}

474
475
476
477
478
/**
 * blk_alloc_queue_node - allocate a request queue
 * @gfp_mask: memory allocation flags
 * @node_id: NUMA node to allocate memory from
 */
479
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
480
{
481
	struct request_queue *q;
482
	int ret;
483

484
	q = kmem_cache_alloc_node(blk_requestq_cachep,
485
				gfp_mask | __GFP_ZERO, node_id);
Linus Torvalds's avatar
Linus Torvalds committed
486
487
488
	if (!q)
		return NULL;

489
490
	q->last_merge = NULL;

491
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
492
	if (q->id < 0)
493
		goto fail_q;
494

495
496
	ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
	if (ret)
497
498
		goto fail_id;

499
500
501
502
	q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
	if (!q->backing_dev_info)
		goto fail_split;

503
504
505
506
	q->stats = blk_alloc_queue_stats();
	if (!q->stats)
		goto fail_stats;

507
	q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
508
509
	q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
	q->backing_dev_info->name = "block";
510
	q->node = node_id;
511

512
513
514
	timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
		    laptop_mode_timer_fn, 0);
	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
515
	INIT_WORK(&q->timeout_work, blk_timeout_work);
516
	INIT_LIST_HEAD(&q->icq_list);
517
#ifdef CONFIG_BLK_CGROUP
518
	INIT_LIST_HEAD(&q->blkg_list);
519
#endif
520

521
	kobject_init(&q->kobj, &blk_queue_ktype);
Linus Torvalds's avatar
Linus Torvalds committed
522

523
524
525
#ifdef CONFIG_BLK_DEV_IO_TRACE
	mutex_init(&q->blk_trace_mutex);
#endif
526
	mutex_init(&q->sysfs_lock);
527
	mutex_init(&q->sysfs_dir_lock);
528
	spin_lock_init(&q->queue_lock);
529

530
	init_waitqueue_head(&q->mq_freeze_wq);
531
	mutex_init(&q->mq_freeze_lock);
532

533
534
535
536
537
538
539
	/*
	 * Init percpu_ref in atomic mode so that it's faster to shutdown.
	 * See blk_register_queue() for details.
	 */
	if (percpu_ref_init(&q->q_usage_counter,
				blk_queue_usage_counter_release,
				PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
540
		goto fail_bdi;
541

542
543
544
	if (blkcg_init_queue(q))
		goto fail_ref;

Linus Torvalds's avatar
Linus Torvalds committed
545
	return q;
546

547
548
fail_ref:
	percpu_ref_exit(&q->q_usage_counter);
549
fail_bdi:
550
551
	blk_free_queue_stats(q->stats);
fail_stats:
552
	bdi_put(q->backing_dev_info);
553
fail_split:
554
	bioset_exit(&q->bio_split);
555
556
557
558
559
fail_id:
	ida_simple_remove(&blk_queue_ida, q->id);
fail_q:
	kmem_cache_free(blk_requestq_cachep, q);
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
560
}
561
EXPORT_SYMBOL(blk_alloc_queue_node);
Linus Torvalds's avatar
Linus Torvalds committed
562

563
bool blk_get_queue(struct request_queue *q)
Linus Torvalds's avatar
Linus Torvalds committed
564
{
565
	if (likely(!blk_queue_dying(q))) {
566
567
		__blk_get_queue(q);
		return true;
Linus Torvalds's avatar
Linus Torvalds committed
568
569
	}

570
	return false;
Linus Torvalds's avatar
Linus Torvalds committed
571
}
572
EXPORT_SYMBOL(blk_get_queue);
Linus Torvalds's avatar
Linus Torvalds committed
573

Jens Axboe's avatar
Jens Axboe committed
574
575
576
577
578
/**
 * blk_get_request - allocate a request
 * @q: request queue to allocate a request for
 * @op: operation (REQ_OP_*) and REQ_* flags, e.g. REQ_SYNC.
 * @flags: BLK_MQ_REQ_* flags, e.g. BLK_MQ_REQ_NOWAIT.
Linus Torvalds's avatar
Linus Torvalds committed
579
 */
Jens Axboe's avatar
Jens Axboe committed
580
581
struct request *blk_get_request(struct request_queue *q, unsigned int op,
				blk_mq_req_flags_t flags)
Linus Torvalds's avatar
Linus Torvalds committed
582
{
Jens Axboe's avatar
Jens Axboe committed
583
	struct request *req;
Linus Torvalds's avatar
Linus Torvalds committed
584

Jens Axboe's avatar
Jens Axboe committed
585
586
	WARN_ON_ONCE(op & REQ_NOWAIT);
	WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
Linus Torvalds's avatar
Linus Torvalds committed
587

Jens Axboe's avatar
Jens Axboe committed
588
589
590
	req = blk_mq_alloc_request(q, op, flags);
	if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
		q->mq_ops->initialize_rq_fn(req);
Linus Torvalds's avatar
Linus Torvalds committed
591

Jens Axboe's avatar
Jens Axboe committed
592
	return req;
Linus Torvalds's avatar
Linus Torvalds committed
593
}
Jens Axboe's avatar
Jens Axboe committed
594
EXPORT_SYMBOL(blk_get_request);
Linus Torvalds's avatar
Linus Torvalds committed
595
596
597

void blk_put_request(struct request *req)
{
Jens Axboe's avatar
Jens Axboe committed
598
	blk_mq_free_request(req);
Linus Torvalds's avatar
Linus Torvalds committed
599
600
601
}
EXPORT_SYMBOL(blk_put_request);

602
603
bool bio_attempt_back_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs)
604
{
Jens Axboe's avatar
Jens Axboe committed
605
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
606

607
	if (!ll_back_merge_fn(req, bio, nr_segs))
608
609
		return false;

610
	trace_block_bio_backmerge(req->q, req, bio);
Tejun Heo's avatar
Tejun Heo committed
611
	rq_qos_merge(req->q, req, bio);
612
613
614
615
616
617

	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
		blk_rq_set_mixed_merge(req);

	req->biotail->bi_next = bio;
	req->biotail = bio;
618
	req->__data_len += bio->bi_iter.bi_size;
619

620
	blk_account_io_start(req, false);
621
622
623
	return true;
}

624
625
bool bio_attempt_front_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs)
626
{
Jens Axboe's avatar
Jens Axboe committed
627
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
628

629
	if (!ll_front_merge_fn(req, bio, nr_segs))
630
631
		return false;

632
	trace_block_bio_frontmerge(req->q, req, bio);
Tejun Heo's avatar
Tejun Heo committed
633
	rq_qos_merge(req->q, req, bio);
634
635
636
637
638
639
640

	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
		blk_rq_set_mixed_merge(req);

	bio->bi_next = req->bio;
	req->bio = bio;

641
642
	req->__sector = bio->bi_iter.bi_sector;
	req->__data_len += bio->bi_iter.bi_size;
643

644
	blk_account_io_start(req, false);
645
646
647
	return true;
}

648
649
650
651
652
653
654
655
656
657
658
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
		struct bio *bio)
{
	unsigned short segments = blk_rq_nr_discard_segments(req);

	if (segments >= queue_max_discard_segments(q))
		goto no_merge;
	if (blk_rq_sectors(req) + bio_sectors(bio) >
	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
		goto no_merge;

Tejun Heo's avatar
Tejun Heo committed
659
660
	rq_qos_merge(q, req, bio);

661
662
663
664
665
666
667
668
669
670
671
672
	req->biotail->bi_next = bio;
	req->biotail = bio;
	req->__data_len += bio->bi_iter.bi_size;
	req->nr_phys_segments = segments + 1;

	blk_account_io_start(req, false);
	return true;
no_merge:
	req_set_nomerge(q, req);
	return false;
}

673
/**
674
 * blk_attempt_plug_merge - try to merge with %current's plugged list
675
676
 * @q: request_queue new bio is being queued at
 * @bio: new bio being queued
677
 * @nr_segs: number of segments in @bio
678
679
680
 * @same_queue_rq: pointer to &struct request that gets filled in when
 * another request associated with @q is found on the plug list
 * (optional, may be %NULL)
681
682
683
684
685
 *
 * Determine whether @bio being queued on @q can be merged with a request
 * on %current's plugged list.  Returns %true if merge was successful,
 * otherwise %false.
 *
686
687
688
689
690
691
 * Plugging coalesces IOs from the same issuer for the same purpose without
 * going through @q->queue_lock.  As such it's more of an issuing mechanism
 * than scheduling, and the request, while may have elvpriv data, is not
 * added on the elevator at this point.  In addition, we don't have
 * reliable access to the elevator outside queue lock.  Only check basic
 * merging parameters without querying the elevator.
692
693
 *
 * Caller must ensure !blk_queue_nomerges(q) beforehand.
694
 */
695
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
696
		unsigned int nr_segs, struct request **same_queue_rq)
697
698
699
{
	struct blk_plug *plug;
	struct request *rq;
Shaohua Li's avatar
Shaohua Li committed
700
	struct list_head *plug_list;
701

702
	plug = blk_mq_plug(q, bio);
703
	if (!plug)
704
		return false;
705

Jens Axboe's avatar
Jens Axboe committed
706
	plug_list = &plug->mq_list;
Shaohua Li's avatar
Shaohua Li committed
707
708

	list_for_each_entry_reverse(rq, plug_list, queuelist) {
709
		bool merged = false;
710

711
		if (rq->q == q && same_queue_rq) {
712
713
714
715
716
			/*
			 * Only blk-mq multiple hardware queues case checks the
			 * rq in the same queue, there should be only one such
			 * rq in a queue
			 **/
717
			*same_queue_rq = rq;
718
		}
719

720
		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
721
722
			continue;

723
724
		switch (blk_try_merge(rq, bio)) {
		case ELEVATOR_BACK_MERGE:
725
			merged = bio_attempt_back_merge(rq, bio, nr_segs);
726
727
			break;
		case ELEVATOR_FRONT_MERGE:
728
			merged = bio_attempt_front_merge(rq, bio, nr_segs);
729
			break;
730
731
732
		case ELEVATOR_DISCARD_MERGE:
			merged = bio_attempt_discard_merge(q, rq, bio);
			break;
733
734
		default:
			break;
735
		}
736
737
738

		if (merged)
			return true;
739
	}
740
741

	return false;
742
743
}

744
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
Linus Torvalds's avatar
Linus Torvalds committed
745
746
747
748
{
	char b[BDEVNAME_SIZE];

	printk(KERN_INFO "attempt to access beyond end of device\n");
749
	printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
750
			bio_devname(bio, b), bio->bi_opf,
Kent Overstreet's avatar
Kent Overstreet committed
751
			(unsigned long long)bio_end_sector(bio),
752
			(long long)maxsector);
Linus Torvalds's avatar
Linus Torvalds committed
753
754
}

755
756
757
758
759
760
761
762
763
764
#ifdef CONFIG_FAIL_MAKE_REQUEST

static DECLARE_FAULT_ATTR(fail_make_request);

static int __init setup_fail_make_request(char *str)
{
	return setup_fault_attr(&fail_make_request, str);
}
__setup("fail_make_request=", setup_fail_make_request);

765
static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
766
{
767
	return part->make_it_fail && should_fail(&fail_make_request, bytes);
768
769
770
771
}

static int __init fail_make_request_debugfs(void)
{
772
773
774
	struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
						NULL, &fail_make_request);

775
	return PTR_ERR_OR_ZERO(dir);
776
777
778
779
780
781
}

late_initcall(fail_make_request_debugfs);

#else /* CONFIG_FAIL_MAKE_REQUEST */

782
783
static inline bool should_fail_request(struct hd_struct *part,
					unsigned int bytes)
784
{
785
	return false;
786
787
788
789
}

#endif /* CONFIG_FAIL_MAKE_REQUEST */

790
791
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{
792
793
	const int op = bio_op(bio);

794
	if (part->policy && op_is_write(op)) {
795
796
		char b[BDEVNAME_SIZE];

797
798
799
		if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
			return false;

800
		WARN_ONCE(1,
801
802
803
		       "generic_make_request: Trying to write "
			"to read-only block-device %s (partno %d)\n",
			bio_devname(bio, b), part->partno);
804
805
		/* Older lvm-tools actually trigger this */
		return false;
806
807
808
809
810
	}

	return false;
}

811
812
813
814
815
816
817
818
static noinline int should_fail_bio(struct bio *bio)
{
	if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
		return -EIO;
	return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);

819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
/*
 * Check whether this bio extends beyond the end of the device or partition.
 * This may well happen - the kernel calls bread() without checking the size of
 * the device, e.g., when mounting a file system.
 */
static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
{
	unsigned int nr_sectors = bio_sectors(bio);

	if (nr_sectors && maxsector &&
	    (nr_sectors > maxsector ||
	     bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
		handle_bad_sector(bio, maxsector);
		return -EIO;
	}
	return 0;
}

837
838
839
840
841
842
/*
 * Remap block n of partition p to block n+start(p) of the disk.
 */
static inline int blk_partition_remap(struct bio *bio)
{
	struct hd_struct *p;
843
	int ret = -EIO;
844

845
846
	rcu_read_lock();
	p = __disk_get_part(bio->bi_disk, bio->bi_partno);
847
848
849
850
851
	if (unlikely(!p))
		goto out;
	if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
		goto out;
	if (unlikely(bio_check_ro(bio, p)))
852
853
		goto out;

854
	if (bio_sectors(bio)) {
855
856
857
858
859
860
		if (bio_check_eod(bio, part_nr_sects_read(p)))
			goto out;
		bio->bi_iter.bi_sector += p->start_sect;
		trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
				      bio->bi_iter.bi_sector - p->start_sect);
	}
861
	bio->bi_partno = 0;
862
	ret = 0;
863
864
out:
	rcu_read_unlock();
865
866
867
	return ret;
}

868
869
static noinline_for_stack bool
generic_make_request_checks(struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
870
{
871
	struct request_queue *q;
872
	int nr_sectors = bio_sectors(bio);
873
	blk_status_t status = BLK_STS_IOERR;
874
	char b[BDEVNAME_SIZE];
Linus Torvalds's avatar
Linus Torvalds committed
875
876
877

	might_sleep();

878
	q = bio->bi_disk->queue;
879
880
881
882
	if (unlikely(!q)) {
		printk(KERN_ERR
		       "generic_make_request: Trying to access "
			"nonexistent block-device %s (%Lu)\n",
883
			bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
884
885
		goto end_io;
	}
886

887
888
889
890
	/*
	 * For a REQ_NOWAIT based request, return -EOPNOTSUPP
	 * if queue is not a request based queue.
	 */
Jens Axboe's avatar
Jens Axboe committed
891
	if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
892
893
		goto not_supported;

894
	if (should_fail_bio(bio))
895
		goto end_io;
896

897
898
	if (bio->bi_partno) {
		if (unlikely(blk_partition_remap(bio)))
899
900
			goto end_io;
	} else {
901
902
903
		if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
			goto end_io;
		if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
904
905
			goto end_io;
	}
906

907
908
909
910
911
	/*
	 * Filter flush bio's early so that make_request based
	 * drivers without flush support don't have to worry
	 * about them.
	 */
912
	if (op_is_flush(bio->bi_opf) &&
Jens Axboe's avatar
Jens Axboe committed
913
	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
Jens Axboe's avatar
Jens Axboe committed
914
		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
915
		if (!nr_sectors) {
916
			status = BLK_STS_OK;
917
918
			goto end_io;
		}
919
	}
920

921
922
923
	if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		bio->bi_opf &= ~REQ_HIPRI;

924
925
926
927
928
929
930
931
932
933
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
		if (!blk_queue_discard(q))
			goto not_supported;
		break;
	case REQ_OP_SECURE_ERASE:
		if (!blk_queue_secure_erase(q))
			goto not_supported;
		break;
	case REQ_OP_WRITE_SAME:
934
		if (!q->limits.max_write_same_sectors)
935
			goto not_supported;
936
		break;
937
	case REQ_OP_ZONE_RESET:
938
939
940
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
941
		if (!blk_queue_is_zoned(q))
942
			goto not_supported;
943
		break;
944
945
946
947
	case REQ_OP_ZONE_RESET_ALL:
		if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
			goto not_supported;
		break;
948
	case REQ_OP_WRITE_ZEROES:
949
		if (!q->limits.max_write_zeroes_sectors)
950
951
			goto not_supported;
		break;
952
953
	default:
		break;