OSDN Git Service

block: split bios to max possible length
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / block / blk-merge.c
1 /*
2  * Functions related to segment and merge handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9
10 #include "blk.h"
11
12 static struct bio *blk_bio_discard_split(struct request_queue *q,
13                                          struct bio *bio,
14                                          struct bio_set *bs,
15                                          unsigned *nsegs)
16 {
17         unsigned int max_discard_sectors, granularity;
18         int alignment;
19         sector_t tmp;
20         unsigned split_sectors;
21
22         *nsegs = 1;
23
24         /* Zero-sector (unknown) and one-sector granularities are the same.  */
25         granularity = max(q->limits.discard_granularity >> 9, 1U);
26
27         max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
28         max_discard_sectors -= max_discard_sectors % granularity;
29
30         if (unlikely(!max_discard_sectors)) {
31                 /* XXX: warn */
32                 return NULL;
33         }
34
35         if (bio_sectors(bio) <= max_discard_sectors)
36                 return NULL;
37
38         split_sectors = max_discard_sectors;
39
40         /*
41          * If the next starting sector would be misaligned, stop the discard at
42          * the previous aligned sector.
43          */
44         alignment = (q->limits.discard_alignment >> 9) % granularity;
45
46         tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
47         tmp = sector_div(tmp, granularity);
48
49         if (split_sectors > tmp)
50                 split_sectors -= tmp;
51
52         return bio_split(bio, split_sectors, GFP_NOIO, bs);
53 }
54
55 static struct bio *blk_bio_write_same_split(struct request_queue *q,
56                                             struct bio *bio,
57                                             struct bio_set *bs,
58                                             unsigned *nsegs)
59 {
60         *nsegs = 1;
61
62         if (!q->limits.max_write_same_sectors)
63                 return NULL;
64
65         if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
66                 return NULL;
67
68         return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
69 }
70
71 static struct bio *blk_bio_segment_split(struct request_queue *q,
72                                          struct bio *bio,
73                                          struct bio_set *bs,
74                                          unsigned *segs)
75 {
76         struct bio_vec bv, bvprv, *bvprvp = NULL;
77         struct bvec_iter iter;
78         unsigned seg_size = 0, nsegs = 0, sectors = 0;
79         unsigned front_seg_size = bio->bi_seg_front_size;
80         bool do_split = true;
81         struct bio *new = NULL;
82
83         bio_for_each_segment(bv, bio, iter) {
84                 /*
85                  * If the queue doesn't support SG gaps and adding this
86                  * offset would create a gap, disallow it.
87                  */
88                 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
89                         goto split;
90
91                 if (sectors + (bv.bv_len >> 9) >
92                                 blk_max_size_offset(q, bio->bi_iter.bi_sector)) {
93                         /*
94                          * Consider this a new segment if we're splitting in
95                          * the middle of this vector.
96                          */
97                         if (nsegs < queue_max_segments(q) &&
98                             sectors < blk_max_size_offset(q,
99                                                 bio->bi_iter.bi_sector)) {
100                                 nsegs++;
101                                 sectors = blk_max_size_offset(q,
102                                                 bio->bi_iter.bi_sector);
103                         }
104                         goto split;
105                 }
106
107                 if (bvprvp && blk_queue_cluster(q)) {
108                         if (seg_size + bv.bv_len > queue_max_segment_size(q))
109                                 goto new_segment;
110                         if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
111                                 goto new_segment;
112                         if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
113                                 goto new_segment;
114
115                         seg_size += bv.bv_len;
116                         bvprv = bv;
117                         bvprvp = &bvprv;
118                         sectors += bv.bv_len >> 9;
119
120                         if (nsegs == 1 && seg_size > front_seg_size)
121                                 front_seg_size = seg_size;
122                         continue;
123                 }
124 new_segment:
125                 if (nsegs == queue_max_segments(q))
126                         goto split;
127
128                 nsegs++;
129                 bvprv = bv;
130                 bvprvp = &bvprv;
131                 seg_size = bv.bv_len;
132                 sectors += bv.bv_len >> 9;
133
134                 if (nsegs == 1 && seg_size > front_seg_size)
135                         front_seg_size = seg_size;
136         }
137
138         do_split = false;
139 split:
140         *segs = nsegs;
141
142         if (do_split) {
143                 new = bio_split(bio, sectors, GFP_NOIO, bs);
144                 if (new)
145                         bio = new;
146         }
147
148         bio->bi_seg_front_size = front_seg_size;
149         if (seg_size > bio->bi_seg_back_size)
150                 bio->bi_seg_back_size = seg_size;
151
152         return do_split ? new : NULL;
153 }
154
155 void blk_queue_split(struct request_queue *q, struct bio **bio,
156                      struct bio_set *bs)
157 {
158         struct bio *split, *res;
159         unsigned nsegs;
160
161         if ((*bio)->bi_rw & REQ_DISCARD)
162                 split = blk_bio_discard_split(q, *bio, bs, &nsegs);
163         else if ((*bio)->bi_rw & REQ_WRITE_SAME)
164                 split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
165         else
166                 split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
167
168         /* physical segments can be figured out during splitting */
169         res = split ? split : *bio;
170         res->bi_phys_segments = nsegs;
171         bio_set_flag(res, BIO_SEG_VALID);
172
173         if (split) {
174                 /* there isn't chance to merge the splitted bio */
175                 split->bi_rw |= REQ_NOMERGE;
176
177                 bio_chain(split, *bio);
178                 generic_make_request(*bio);
179                 *bio = split;
180         }
181 }
182 EXPORT_SYMBOL(blk_queue_split);
183
184 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
185                                              struct bio *bio,
186                                              bool no_sg_merge)
187 {
188         struct bio_vec bv, bvprv = { NULL };
189         int cluster, prev = 0;
190         unsigned int seg_size, nr_phys_segs;
191         struct bio *fbio, *bbio;
192         struct bvec_iter iter;
193
194         if (!bio)
195                 return 0;
196
197         /*
198          * This should probably be returning 0, but blk_add_request_payload()
199          * (Christoph!!!!)
200          */
201         if (bio->bi_rw & REQ_DISCARD)
202                 return 1;
203
204         if (bio->bi_rw & REQ_WRITE_SAME)
205                 return 1;
206
207         fbio = bio;
208         cluster = blk_queue_cluster(q);
209         seg_size = 0;
210         nr_phys_segs = 0;
211         for_each_bio(bio) {
212                 bio_for_each_segment(bv, bio, iter) {
213                         /*
214                          * If SG merging is disabled, each bio vector is
215                          * a segment
216                          */
217                         if (no_sg_merge)
218                                 goto new_segment;
219
220                         if (prev && cluster) {
221                                 if (seg_size + bv.bv_len
222                                     > queue_max_segment_size(q))
223                                         goto new_segment;
224                                 if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
225                                         goto new_segment;
226                                 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
227                                         goto new_segment;
228
229                                 seg_size += bv.bv_len;
230                                 bvprv = bv;
231                                 continue;
232                         }
233 new_segment:
234                         if (nr_phys_segs == 1 && seg_size >
235                             fbio->bi_seg_front_size)
236                                 fbio->bi_seg_front_size = seg_size;
237
238                         nr_phys_segs++;
239                         bvprv = bv;
240                         prev = 1;
241                         seg_size = bv.bv_len;
242                 }
243                 bbio = bio;
244         }
245
246         if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
247                 fbio->bi_seg_front_size = seg_size;
248         if (seg_size > bbio->bi_seg_back_size)
249                 bbio->bi_seg_back_size = seg_size;
250
251         return nr_phys_segs;
252 }
253
254 void blk_recalc_rq_segments(struct request *rq)
255 {
256         bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
257                         &rq->q->queue_flags);
258
259         rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
260                         no_sg_merge);
261 }
262
263 void blk_recount_segments(struct request_queue *q, struct bio *bio)
264 {
265         unsigned short seg_cnt;
266
267         /* estimate segment number by bi_vcnt for non-cloned bio */
268         if (bio_flagged(bio, BIO_CLONED))
269                 seg_cnt = bio_segments(bio);
270         else
271                 seg_cnt = bio->bi_vcnt;
272
273         if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
274                         (seg_cnt < queue_max_segments(q)))
275                 bio->bi_phys_segments = seg_cnt;
276         else {
277                 struct bio *nxt = bio->bi_next;
278
279                 bio->bi_next = NULL;
280                 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
281                 bio->bi_next = nxt;
282         }
283
284         bio_set_flag(bio, BIO_SEG_VALID);
285 }
286 EXPORT_SYMBOL(blk_recount_segments);
287
288 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
289                                    struct bio *nxt)
290 {
291         struct bio_vec end_bv = { NULL }, nxt_bv;
292         struct bvec_iter iter;
293
294         if (!blk_queue_cluster(q))
295                 return 0;
296
297         if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
298             queue_max_segment_size(q))
299                 return 0;
300
301         if (!bio_has_data(bio))
302                 return 1;
303
304         bio_for_each_segment(end_bv, bio, iter)
305                 if (end_bv.bv_len == iter.bi_size)
306                         break;
307
308         nxt_bv = bio_iovec(nxt);
309
310         if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
311                 return 0;
312
313         /*
314          * bio and nxt are contiguous in memory; check if the queue allows
315          * these two to be merged into one
316          */
317         if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
318                 return 1;
319
320         return 0;
321 }
322
323 static inline void
324 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
325                      struct scatterlist *sglist, struct bio_vec *bvprv,
326                      struct scatterlist **sg, int *nsegs, int *cluster)
327 {
328
329         int nbytes = bvec->bv_len;
330
331         if (*sg && *cluster) {
332                 if ((*sg)->length + nbytes > queue_max_segment_size(q))
333                         goto new_segment;
334
335                 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
336                         goto new_segment;
337                 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
338                         goto new_segment;
339
340                 (*sg)->length += nbytes;
341         } else {
342 new_segment:
343                 if (!*sg)
344                         *sg = sglist;
345                 else {
346                         /*
347                          * If the driver previously mapped a shorter
348                          * list, we could see a termination bit
349                          * prematurely unless it fully inits the sg
350                          * table on each mapping. We KNOW that there
351                          * must be more entries here or the driver
352                          * would be buggy, so force clear the
353                          * termination bit to avoid doing a full
354                          * sg_init_table() in drivers for each command.
355                          */
356                         sg_unmark_end(*sg);
357                         *sg = sg_next(*sg);
358                 }
359
360                 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
361                 (*nsegs)++;
362         }
363         *bvprv = *bvec;
364 }
365
366 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
367                              struct scatterlist *sglist,
368                              struct scatterlist **sg)
369 {
370         struct bio_vec bvec, bvprv = { NULL };
371         struct bvec_iter iter;
372         int nsegs, cluster;
373
374         nsegs = 0;
375         cluster = blk_queue_cluster(q);
376
377         if (bio->bi_rw & REQ_DISCARD) {
378                 /*
379                  * This is a hack - drivers should be neither modifying the
380                  * biovec, nor relying on bi_vcnt - but because of
381                  * blk_add_request_payload(), a discard bio may or may not have
382                  * a payload we need to set up here (thank you Christoph) and
383                  * bi_vcnt is really the only way of telling if we need to.
384                  */
385
386                 if (bio->bi_vcnt)
387                         goto single_segment;
388
389                 return 0;
390         }
391
392         if (bio->bi_rw & REQ_WRITE_SAME) {
393 single_segment:
394                 *sg = sglist;
395                 bvec = bio_iovec(bio);
396                 sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
397                 return 1;
398         }
399
400         for_each_bio(bio)
401                 bio_for_each_segment(bvec, bio, iter)
402                         __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
403                                              &nsegs, &cluster);
404
405         return nsegs;
406 }
407
408 /*
409  * map a request to scatterlist, return number of sg entries setup. Caller
410  * must make sure sg can hold rq->nr_phys_segments entries
411  */
412 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
413                   struct scatterlist *sglist)
414 {
415         struct scatterlist *sg = NULL;
416         int nsegs = 0;
417
418         if (rq->bio)
419                 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
420
421         if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
422             (blk_rq_bytes(rq) & q->dma_pad_mask)) {
423                 unsigned int pad_len =
424                         (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
425
426                 sg->length += pad_len;
427                 rq->extra_len += pad_len;
428         }
429
430         if (q->dma_drain_size && q->dma_drain_needed(rq)) {
431                 if (rq->cmd_flags & REQ_WRITE)
432                         memset(q->dma_drain_buffer, 0, q->dma_drain_size);
433
434                 sg_unmark_end(sg);
435                 sg = sg_next(sg);
436                 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
437                             q->dma_drain_size,
438                             ((unsigned long)q->dma_drain_buffer) &
439                             (PAGE_SIZE - 1));
440                 nsegs++;
441                 rq->extra_len += q->dma_drain_size;
442         }
443
444         if (sg)
445                 sg_mark_end(sg);
446
447         /*
448          * Something must have been wrong if the figured number of
449          * segment is bigger than number of req's physical segments
450          */
451         WARN_ON(nsegs > rq->nr_phys_segments);
452
453         return nsegs;
454 }
455 EXPORT_SYMBOL(blk_rq_map_sg);
456
457 static inline int ll_new_hw_segment(struct request_queue *q,
458                                     struct request *req,
459                                     struct bio *bio)
460 {
461         int nr_phys_segs = bio_phys_segments(q, bio);
462
463         if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
464                 goto no_merge;
465
466         if (blk_integrity_merge_bio(q, req, bio) == false)
467                 goto no_merge;
468
469         /*
470          * This will form the start of a new hw segment.  Bump both
471          * counters.
472          */
473         req->nr_phys_segments += nr_phys_segs;
474         return 1;
475
476 no_merge:
477         req->cmd_flags |= REQ_NOMERGE;
478         if (req == q->last_merge)
479                 q->last_merge = NULL;
480         return 0;
481 }
482
483 int ll_back_merge_fn(struct request_queue *q, struct request *req,
484                      struct bio *bio)
485 {
486         if (req_gap_back_merge(req, bio))
487                 return 0;
488         if (blk_integrity_rq(req) &&
489             integrity_req_gap_back_merge(req, bio))
490                 return 0;
491         if (blk_rq_sectors(req) + bio_sectors(bio) >
492             blk_rq_get_max_sectors(req)) {
493                 req->cmd_flags |= REQ_NOMERGE;
494                 if (req == q->last_merge)
495                         q->last_merge = NULL;
496                 return 0;
497         }
498         if (!bio_flagged(req->biotail, BIO_SEG_VALID))
499                 blk_recount_segments(q, req->biotail);
500         if (!bio_flagged(bio, BIO_SEG_VALID))
501                 blk_recount_segments(q, bio);
502
503         return ll_new_hw_segment(q, req, bio);
504 }
505
506 int ll_front_merge_fn(struct request_queue *q, struct request *req,
507                       struct bio *bio)
508 {
509
510         if (req_gap_front_merge(req, bio))
511                 return 0;
512         if (blk_integrity_rq(req) &&
513             integrity_req_gap_front_merge(req, bio))
514                 return 0;
515         if (blk_rq_sectors(req) + bio_sectors(bio) >
516             blk_rq_get_max_sectors(req)) {
517                 req->cmd_flags |= REQ_NOMERGE;
518                 if (req == q->last_merge)
519                         q->last_merge = NULL;
520                 return 0;
521         }
522         if (!bio_flagged(bio, BIO_SEG_VALID))
523                 blk_recount_segments(q, bio);
524         if (!bio_flagged(req->bio, BIO_SEG_VALID))
525                 blk_recount_segments(q, req->bio);
526
527         return ll_new_hw_segment(q, req, bio);
528 }
529
530 /*
531  * blk-mq uses req->special to carry normal driver per-request payload, it
532  * does not indicate a prepared command that we cannot merge with.
533  */
534 static bool req_no_special_merge(struct request *req)
535 {
536         struct request_queue *q = req->q;
537
538         return !q->mq_ops && req->special;
539 }
540
541 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
542                                 struct request *next)
543 {
544         int total_phys_segments;
545         unsigned int seg_size =
546                 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
547
548         /*
549          * First check if the either of the requests are re-queued
550          * requests.  Can't merge them if they are.
551          */
552         if (req_no_special_merge(req) || req_no_special_merge(next))
553                 return 0;
554
555         if (req_gap_back_merge(req, next->bio))
556                 return 0;
557
558         /*
559          * Will it become too large?
560          */
561         if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
562             blk_rq_get_max_sectors(req))
563                 return 0;
564
565         total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
566         if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
567                 if (req->nr_phys_segments == 1)
568                         req->bio->bi_seg_front_size = seg_size;
569                 if (next->nr_phys_segments == 1)
570                         next->biotail->bi_seg_back_size = seg_size;
571                 total_phys_segments--;
572         }
573
574         if (total_phys_segments > queue_max_segments(q))
575                 return 0;
576
577         if (blk_integrity_merge_rq(q, req, next) == false)
578                 return 0;
579
580         /* Merge is OK... */
581         req->nr_phys_segments = total_phys_segments;
582         return 1;
583 }
584
585 /**
586  * blk_rq_set_mixed_merge - mark a request as mixed merge
587  * @rq: request to mark as mixed merge
588  *
589  * Description:
590  *     @rq is about to be mixed merged.  Make sure the attributes
591  *     which can be mixed are set in each bio and mark @rq as mixed
592  *     merged.
593  */
594 void blk_rq_set_mixed_merge(struct request *rq)
595 {
596         unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
597         struct bio *bio;
598
599         if (rq->cmd_flags & REQ_MIXED_MERGE)
600                 return;
601
602         /*
603          * @rq will no longer represent mixable attributes for all the
604          * contained bios.  It will just track those of the first one.
605          * Distributes the attributs to each bio.
606          */
607         for (bio = rq->bio; bio; bio = bio->bi_next) {
608                 WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
609                              (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
610                 bio->bi_rw |= ff;
611         }
612         rq->cmd_flags |= REQ_MIXED_MERGE;
613 }
614
615 static void blk_account_io_merge(struct request *req)
616 {
617         if (blk_do_io_stat(req)) {
618                 struct hd_struct *part;
619                 int cpu;
620
621                 cpu = part_stat_lock();
622                 part = req->part;
623
624                 part_round_stats(cpu, part);
625                 part_dec_in_flight(part, rq_data_dir(req));
626
627                 hd_struct_put(part);
628                 part_stat_unlock();
629         }
630 }
631
632 /*
633  * Has to be called with the request spinlock acquired
634  */
635 static int attempt_merge(struct request_queue *q, struct request *req,
636                           struct request *next)
637 {
638         if (!rq_mergeable(req) || !rq_mergeable(next))
639                 return 0;
640
641         if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
642                 return 0;
643
644         /*
645          * not contiguous
646          */
647         if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
648                 return 0;
649
650         if (rq_data_dir(req) != rq_data_dir(next)
651             || req->rq_disk != next->rq_disk
652             || req_no_special_merge(next))
653                 return 0;
654
655         if (req->cmd_flags & REQ_WRITE_SAME &&
656             !blk_write_same_mergeable(req->bio, next->bio))
657                 return 0;
658
659         /*
660          * If we are allowed to merge, then append bio list
661          * from next to rq and release next. merge_requests_fn
662          * will have updated segment counts, update sector
663          * counts here.
664          */
665         if (!ll_merge_requests_fn(q, req, next))
666                 return 0;
667
668         /*
669          * If failfast settings disagree or any of the two is already
670          * a mixed merge, mark both as mixed before proceeding.  This
671          * makes sure that all involved bios have mixable attributes
672          * set properly.
673          */
674         if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
675             (req->cmd_flags & REQ_FAILFAST_MASK) !=
676             (next->cmd_flags & REQ_FAILFAST_MASK)) {
677                 blk_rq_set_mixed_merge(req);
678                 blk_rq_set_mixed_merge(next);
679         }
680
681         /*
682          * At this point we have either done a back merge
683          * or front merge. We need the smaller start_time of
684          * the merged requests to be the current request
685          * for accounting purposes.
686          */
687         if (time_after(req->start_time, next->start_time))
688                 req->start_time = next->start_time;
689
690         req->biotail->bi_next = next->bio;
691         req->biotail = next->biotail;
692
693         req->__data_len += blk_rq_bytes(next);
694
695         elv_merge_requests(q, req, next);
696
697         /*
698          * 'next' is going away, so update stats accordingly
699          */
700         blk_account_io_merge(next);
701
702         req->ioprio = ioprio_best(req->ioprio, next->ioprio);
703         if (blk_rq_cpu_valid(next))
704                 req->cpu = next->cpu;
705
706         /* owner-ship of bio passed from next to req */
707         next->bio = NULL;
708         __blk_put_request(q, next);
709         return 1;
710 }
711
712 int attempt_back_merge(struct request_queue *q, struct request *rq)
713 {
714         struct request *next = elv_latter_request(q, rq);
715
716         if (next)
717                 return attempt_merge(q, rq, next);
718
719         return 0;
720 }
721
722 int attempt_front_merge(struct request_queue *q, struct request *rq)
723 {
724         struct request *prev = elv_former_request(q, rq);
725
726         if (prev)
727                 return attempt_merge(q, prev, rq);
728
729         return 0;
730 }
731
732 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
733                           struct request *next)
734 {
735         return attempt_merge(q, rq, next);
736 }
737
738 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
739 {
740         if (!rq_mergeable(rq) || !bio_mergeable(bio))
741                 return false;
742
743         if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
744                 return false;
745
746         /* different data direction or already started, don't merge */
747         if (bio_data_dir(bio) != rq_data_dir(rq))
748                 return false;
749
750         /* must be same device and not a special request */
751         if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
752                 return false;
753
754         /* only merge integrity protected bio into ditto rq */
755         if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
756                 return false;
757
758         /* must be using the same buffer */
759         if (rq->cmd_flags & REQ_WRITE_SAME &&
760             !blk_write_same_mergeable(rq->bio, bio))
761                 return false;
762
763         return true;
764 }
765
766 int blk_try_merge(struct request *rq, struct bio *bio)
767 {
768         if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
769                 return ELEVATOR_BACK_MERGE;
770         else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
771                 return ELEVATOR_FRONT_MERGE;
772         return ELEVATOR_NO_MERGE;
773 }