blk.h 10.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
3
4
#ifndef BLK_INTERNAL_H
#define BLK_INTERNAL_H

5
#include <linux/idr.h>
6
#include <linux/blk-mq.h>
7
#include <xen/xen.h>
8
#include "blk-mq.h"
9
#include "blk-mq-sched.h"
10

11
12
13
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT		(5 * HZ)

14
15
16
17
#ifdef CONFIG_DEBUG_FS
extern struct dentry *blk_debugfs_root;
#endif

18
19
20
21
struct blk_flush_queue {
	unsigned int		flush_queue_delayed:1;
	unsigned int		flush_pending_idx:1;
	unsigned int		flush_running_idx:1;
22
	blk_status_t 		rq_status;
23
24
25
26
	unsigned long		flush_pending_since;
	struct list_head	flush_queue[2];
	struct list_head	flush_data_in_flight;
	struct request		*flush_rq;
27
28
29
30
31
32

	/*
	 * flush_rq shares tag with this rq, both can't be active
	 * at the same time
	 */
	struct request		*orig_rq;
33
34
35
	spinlock_t		mq_flush_lock;
};

36
37
extern struct kmem_cache *blk_requestq_cachep;
extern struct kobj_type blk_queue_ktype;
38
extern struct ida blk_queue_ida;
39

40
41
static inline struct blk_flush_queue *
blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
42
{
43
	return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
44
45
}

46
47
48
49
50
static inline void __blk_get_queue(struct request_queue *q)
{
	kobject_get(&q->kobj);
}

51
52
53
54
55
56
static inline bool
is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
{
	return hctx->fq->flush_rq == req;
}

57
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
58
		int node, int cmd_size, gfp_t flags);
59
void blk_free_flush_queue(struct blk_flush_queue *q);
60

61
62
63
64
65
66
67
68
69
70
71
72
void blk_freeze_queue(struct request_queue *q);

static inline void blk_queue_enter_live(struct request_queue *q)
{
	/*
	 * Given that running in generic_make_request() context
	 * guarantees that a live reference against q_usage_counter has
	 * been established, further references under that same context
	 * need not check that the queue has been frozen (marked dead).
	 */
	percpu_ref_get(&q->q_usage_counter);
}
73

74
75
static inline bool biovec_phys_mergeable(struct request_queue *q,
		struct bio_vec *vec1, struct bio_vec *vec2)
76
{
77
	unsigned long mask = queue_segment_boundary(q);
Christoph Hellwig's avatar
Christoph Hellwig committed
78
79
	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
80
81

	if (addr1 + vec1->bv_len != addr2)
82
		return false;
83
	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
84
		return false;
85
86
	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
		return false;
87
88
89
	return true;
}

90
91
92
static inline bool __bvec_gap_to_prev(struct request_queue *q,
		struct bio_vec *bprv, unsigned int offset)
{
93
	return (offset & queue_virt_boundary(q)) ||
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
}

/*
 * Check if adding a bio_vec after bprv with offset would create a gap in
 * the SG list. Most drivers don't care about this, but some do.
 */
static inline bool bvec_gap_to_prev(struct request_queue *q,
		struct bio_vec *bprv, unsigned int offset)
{
	if (!queue_virt_boundary(q))
		return false;
	return __bvec_gap_to_prev(q, bprv, offset);
}

109
110
111
112
113
114
115
116
117
118
119
120
static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
		unsigned int nr_segs)
{
	rq->nr_phys_segments = nr_segs;
	rq->__data_len = bio->bi_iter.bi_size;
	rq->bio = rq->biotail = bio;
	rq->ioprio = bio_prio(bio);

	if (bio->bi_disk)
		rq->rq_disk = bio->bi_disk;
}

121
122
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
123
124
125
126
127
128
129
bool __bio_integrity_endio(struct bio *);
static inline bool bio_integrity_endio(struct bio *bio)
{
	if (bio_integrity(bio))
		return __bio_integrity_endio(bio);
	return true;
}
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161

static inline bool integrity_req_gap_back_merge(struct request *req,
		struct bio *next)
{
	struct bio_integrity_payload *bip = bio_integrity(req->bio);
	struct bio_integrity_payload *bip_next = bio_integrity(next);

	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
				bip_next->bip_vec[0].bv_offset);
}

static inline bool integrity_req_gap_front_merge(struct request *req,
		struct bio *bio)
{
	struct bio_integrity_payload *bip = bio_integrity(bio);
	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);

	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
				bip_next->bip_vec[0].bv_offset);
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline bool integrity_req_gap_back_merge(struct request *req,
		struct bio *next)
{
	return false;
}
static inline bool integrity_req_gap_front_merge(struct request *req,
		struct bio *bio)
{
	return false;
}

162
163
164
static inline void blk_flush_integrity(void)
{
}
165
166
167
168
static inline bool bio_integrity_endio(struct bio *bio)
{
	return true;
}
169
#endif /* CONFIG_BLK_DEV_INTEGRITY */
170

171
unsigned long blk_rq_timeout(unsigned long timeout);
172
void blk_add_timer(struct request *req);
173

174
175
176
177
bool bio_attempt_front_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs);
bool bio_attempt_back_merge(struct request *req, struct bio *bio,
		unsigned int nr_segs);
178
179
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
		struct bio *bio);
180
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
181
		unsigned int nr_segs, struct request **same_queue_rq);
182
183
184

void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
185
void blk_account_io_done(struct request *req, u64 now);
186

187
188
189
/*
 * Internal elevator interface
 */
190
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
191

192
void blk_insert_flush(struct request *rq);
193

194
void elevator_init_mq(struct request_queue *q);
195
196
int elevator_switch_mq(struct request_queue *q,
			      struct elevator_type *new_e);
197
void __elevator_exit(struct request_queue *, struct elevator_queue *);
198
int elv_register_queue(struct request_queue *q, bool uevent);
199
200
void elv_unregister_queue(struct request_queue *q);

201
202
203
static inline void elevator_exit(struct request_queue *q,
		struct elevator_queue *e)
{
204
205
	lockdep_assert_held(&q->sysfs_lock);

206
207
208
209
	blk_mq_sched_free_requests(q);
	__elevator_exit(q, e);
}

210
211
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);

212
213
214
215
216
217
218
219
220
221
222
223
#ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
				const char *, size_t);
#else
static inline int blk_should_fake_timeout(struct request_queue *q)
{
	return 0;
}
#endif

224
225
226
227
228
229
void __blk_queue_split(struct request_queue *q, struct bio **bio,
		unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio,
		unsigned int nr_segs);
int ll_front_merge_fn(struct request *req,  struct bio *bio,
		unsigned int nr_segs);
230
231
struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
232
233
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
				struct request *next);
234
unsigned int blk_recalc_rq_segments(struct request *rq);
235
void blk_rq_set_mixed_merge(struct request *rq);
236
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
237
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
238

239
240
int blk_dev_init(void);

241
242
243
244
245
/*
 * Contribute to IO statistics IFF:
 *
 *	a) it's attached to a gendisk, and
 *	b) the queue had IO stats enabled when this request was started, and
246
 *	c) it's a file system request
247
 */
248
static inline bool blk_do_io_stat(struct request *rq)
249
{
250
	return rq->rq_disk &&
251
	       (rq->rq_flags & RQF_IO_STAT) &&
252
		!blk_rq_is_passthrough(rq);
253
254
}

255
256
257
258
259
260
261
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{
	req->cmd_flags |= REQ_NOMERGE;
	if (req == q->last_merge)
		q->last_merge = NULL;
}

262
263
264
265
266
267
268
269
270
271
/*
 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
 * is defined as 'unsigned int', meantime it has to aligned to with logical
 * block size which is the minimum accepted unit by hardware.
 */
static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
{
	return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
}

272
273
274
275
/*
 * Internal io_context interface
 */
void get_io_context(struct io_context *ioc);
276
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
277
278
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
			     gfp_t gfp_mask);
279
void ioc_clear_queue(struct request_queue *q);
280

281
int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
282
283
284
285
286
287

/**
 * create_io_context - try to create task->io_context
 * @gfp_mask: allocation mask
 * @node: allocation node
 *
288
289
290
 * If %current->io_context is %NULL, allocate a new io_context and install
 * it.  Returns the current %current->io_context which may be %NULL if
 * allocation failed.
291
292
 *
 * Note that this function can't be called with IRQ disabled because
293
 * task_lock which protects %current->io_context is IRQ-unsafe.
294
 */
295
static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
296
297
{
	WARN_ON_ONCE(irqs_disabled());
298
299
300
	if (unlikely(!current->io_context))
		create_task_io_context(current, gfp_mask, node);
	return current->io_context;
301
302
303
304
305
}

/*
 * Internal throttling interface
 */
306
#ifdef CONFIG_BLK_DEV_THROTTLING
307
extern void blk_throtl_drain(struct request_queue *q);
308
309
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
310
extern void blk_throtl_register_queue(struct request_queue *q);
311
#else /* CONFIG_BLK_DEV_THROTTLING */
312
static inline void blk_throtl_drain(struct request_queue *q) { }
313
314
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
315
static inline void blk_throtl_register_queue(struct request_queue *q) { }
316
#endif /* CONFIG_BLK_DEV_THROTTLING */
317
318
319
320
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
	const char *page, size_t count);
321
extern void blk_throtl_bio_endio(struct bio *bio);
322
extern void blk_throtl_stat_add(struct request *rq, u64 time);
323
324
#else
static inline void blk_throtl_bio_endio(struct bio *bio) { }
325
static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
326
#endif
327

328
329
330
331
332
333
334
335
336
337
338
339
340
#ifdef CONFIG_BOUNCE
extern int init_emergency_isa_pool(void);
extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
#else
static inline int init_emergency_isa_pool(void)
{
	return 0;
}
static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
{
}
#endif /* CONFIG_BOUNCE */

341
342
343
344
345
346
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
extern int blk_iolatency_init(struct request_queue *q);
#else
static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif

347
348
struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);

349
350
351
352
353
354
#ifdef CONFIG_BLK_DEV_ZONED
void blk_queue_free_zone_bitmaps(struct request_queue *q);
#else
static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
#endif

355
#endif /* BLK_INTERNAL_H */