Skip to content

Commit cfd4730

Browse files
committed
Merge tag 'block-6.13-20242901' of git://git.kernel.dk/linux
Pull more block updates from Jens Axboe: - NVMe pull request via Keith: - Use correct srcu list traversal (Breno) - Scatter-gather support for metadata (Keith) - Fabrics shutdown race condition fix (Nilay) - Persistent reservations updates (Guixin) - Add the required bits for MD atomic write support for raid0/1/10 - Correct return value for unknown opcode in ublk - Fix deadlock with zone revalidation - Fix for the io priority request vs bio cleanups - Use the correct unsigned int type for various limit helpers - Fix for a race in loop - Cleanup blk_rq_prep_clone() to prevent uninit-value warning and make it easier for actual humans to read - Fix potential UAF when iterating tags - A few fixes for bfq-iosched UAF issues - Fix for brd discard not decrementing the allocated page count - Various little fixes and cleanups * tag 'block-6.13-20242901' of git://git.kernel.dk/linux: (36 commits) brd: decrease the number of allocated pages which discarded block, bfq: fix bfqq uaf in bfq_limit_depth() block: Don't allow an atomic write be truncated in blkdev_write_iter() mq-deadline: don't call req_get_ioprio from the I/O completion handler block: Prevent potential deadlock in blk_revalidate_disk_zones() block: Remove extra part pointer NULLify in blk_rq_init() nvme: tuning pr code by using defined structs and macros nvme: introduce change ptpl and iekey definition block: return bool from get_disk_ro and bdev_read_only block: remove a duplicate definition for bdev_read_only block: return bool from blk_rq_aligned block: return unsigned int from blk_lim_dma_alignment_and_pad block: return unsigned int from queue_dma_alignment block: return unsigned int from bdev_io_opt block: req->bio is always set in the merge code block: don't bother checking the data direction for merges block: blk-mq: fix uninit-value in blk_rq_prep_clone and refactor Revert "block, bfq: merge bfq_release_process_ref() into bfq_put_cooperator()" md/raid10: Atomic write support md/raid1: Atomic write support ...
2 parents dd54fcc + 8273420 commit cfd4730

27 files changed

+547
-192
lines changed

block/bfq-cgroup.c

+1
Original file line numberDiff line numberDiff line change
@@ -736,6 +736,7 @@ static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
736736
*/
737737
bfq_put_cooperator(sync_bfqq);
738738
bic_set_bfqq(bic, NULL, true, act_idx);
739+
bfq_release_process_ref(bfqd, sync_bfqq);
739740
}
740741
}
741742

block/bfq-iosched.c

+28-15
Original file line numberDiff line numberDiff line change
@@ -582,23 +582,31 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
582582
#define BFQ_LIMIT_INLINE_DEPTH 16
583583

584584
#ifdef CONFIG_BFQ_GROUP_IOSCHED
585-
static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
585+
static bool bfqq_request_over_limit(struct bfq_data *bfqd,
586+
struct bfq_io_cq *bic, blk_opf_t opf,
587+
unsigned int act_idx, int limit)
586588
{
587-
struct bfq_data *bfqd = bfqq->bfqd;
588-
struct bfq_entity *entity = &bfqq->entity;
589589
struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
590590
struct bfq_entity **entities = inline_entities;
591-
int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
592-
int class_idx = bfqq->ioprio_class - 1;
591+
int alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
593592
struct bfq_sched_data *sched_data;
593+
struct bfq_entity *entity;
594+
struct bfq_queue *bfqq;
594595
unsigned long wsum;
595596
bool ret = false;
596-
597-
if (!entity->on_st_or_in_serv)
598-
return false;
597+
int depth;
598+
int level;
599599

600600
retry:
601601
spin_lock_irq(&bfqd->lock);
602+
bfqq = bic_to_bfqq(bic, op_is_sync(opf), act_idx);
603+
if (!bfqq)
604+
goto out;
605+
606+
entity = &bfqq->entity;
607+
if (!entity->on_st_or_in_serv)
608+
goto out;
609+
602610
/* +1 for bfqq entity, root cgroup not included */
603611
depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
604612
if (depth > alloc_depth) {
@@ -643,7 +651,7 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
643651
* class.
644652
*/
645653
wsum = 0;
646-
for (i = 0; i <= class_idx; i++) {
654+
for (i = 0; i <= bfqq->ioprio_class - 1; i++) {
647655
wsum = wsum * IOPRIO_BE_NR +
648656
sched_data->service_tree[i].wsum;
649657
}
@@ -666,7 +674,9 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
666674
return ret;
667675
}
668676
#else
669-
static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
677+
static bool bfqq_request_over_limit(struct bfq_data *bfqd,
678+
struct bfq_io_cq *bic, blk_opf_t opf,
679+
unsigned int act_idx, int limit)
670680
{
671681
return false;
672682
}
@@ -704,16 +714,17 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
704714
}
705715

706716
for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) {
707-
struct bfq_queue *bfqq =
708-
bic_to_bfqq(bic, op_is_sync(opf), act_idx);
717+
/* Fast path to check if bfqq is already allocated. */
718+
if (!bic_to_bfqq(bic, op_is_sync(opf), act_idx))
719+
continue;
709720

710721
/*
711722
* Does queue (or any parent entity) exceed number of
712723
* requests that should be available to it? Heavily
713724
* limit depth so that it cannot consume more
714725
* available requests and thus starve other entities.
715726
*/
716-
if (bfqq && bfqq_request_over_limit(bfqq, limit)) {
727+
if (bfqq_request_over_limit(bfqd, bic, opf, act_idx, limit)) {
717728
depth = 1;
718729
break;
719730
}
@@ -5434,8 +5445,6 @@ void bfq_put_cooperator(struct bfq_queue *bfqq)
54345445
bfq_put_queue(__bfqq);
54355446
__bfqq = next;
54365447
}
5437-
5438-
bfq_release_process_ref(bfqq->bfqd, bfqq);
54395448
}
54405449

54415450
static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
@@ -5448,6 +5457,8 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
54485457
bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfqq->ref);
54495458

54505459
bfq_put_cooperator(bfqq);
5460+
5461+
bfq_release_process_ref(bfqd, bfqq);
54515462
}
54525463

54535464
static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync,
@@ -6734,6 +6745,8 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
67346745
bic_set_bfqq(bic, NULL, true, bfqq->actuator_idx);
67356746

67366747
bfq_put_cooperator(bfqq);
6748+
6749+
bfq_release_process_ref(bfqq->bfqd, bfqq);
67376750
return NULL;
67386751
}
67396752

block/blk-merge.c

+7-28
Original file line numberDiff line numberDiff line change
@@ -864,17 +864,10 @@ static struct request *attempt_merge(struct request_queue *q,
864864
if (req_op(req) != req_op(next))
865865
return NULL;
866866

867-
if (rq_data_dir(req) != rq_data_dir(next))
867+
if (req->bio->bi_write_hint != next->bio->bi_write_hint)
868+
return NULL;
869+
if (req->bio->bi_ioprio != next->bio->bi_ioprio)
868870
return NULL;
869-
870-
if (req->bio && next->bio) {
871-
/* Don't merge requests with different write hints. */
872-
if (req->bio->bi_write_hint != next->bio->bi_write_hint)
873-
return NULL;
874-
if (req->bio->bi_ioprio != next->bio->bi_ioprio)
875-
return NULL;
876-
}
877-
878871
if (!blk_atomic_write_mergeable_rqs(req, next))
879872
return NULL;
880873

@@ -986,30 +979,16 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
986979
if (req_op(rq) != bio_op(bio))
987980
return false;
988981

989-
/* different data direction or already started, don't merge */
990-
if (bio_data_dir(bio) != rq_data_dir(rq))
991-
return false;
992-
993-
/* don't merge across cgroup boundaries */
994982
if (!blk_cgroup_mergeable(rq, bio))
995983
return false;
996-
997-
/* only merge integrity protected bio into ditto rq */
998984
if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
999985
return false;
1000-
1001-
/* Only merge if the crypt contexts are compatible */
1002986
if (!bio_crypt_rq_ctx_compatible(rq, bio))
1003987
return false;
1004-
1005-
if (rq->bio) {
1006-
/* Don't merge requests with different write hints. */
1007-
if (rq->bio->bi_write_hint != bio->bi_write_hint)
1008-
return false;
1009-
if (rq->bio->bi_ioprio != bio->bi_ioprio)
1010-
return false;
1011-
}
1012-
988+
if (rq->bio->bi_write_hint != bio->bi_write_hint)
989+
return false;
990+
if (rq->bio->bi_ioprio != bio->bi_ioprio)
991+
return false;
1013992
if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
1014993
return false;
1015994

block/blk-mq.c

+6-8
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
388388
rq->tag = BLK_MQ_NO_TAG;
389389
rq->internal_tag = BLK_MQ_NO_TAG;
390390
rq->start_time_ns = blk_time_get_ns();
391-
rq->part = NULL;
392391
blk_crypto_rq_set_defaults(rq);
393392
}
394393
EXPORT_SYMBOL(blk_rq_init);
@@ -3273,27 +3272,28 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
32733272
int (*bio_ctr)(struct bio *, struct bio *, void *),
32743273
void *data)
32753274
{
3276-
struct bio *bio, *bio_src;
3275+
struct bio *bio_src;
32773276

32783277
if (!bs)
32793278
bs = &fs_bio_set;
32803279

32813280
__rq_for_each_bio(bio_src, rq_src) {
3282-
bio = bio_alloc_clone(rq->q->disk->part0, bio_src, gfp_mask,
3283-
bs);
3281+
struct bio *bio = bio_alloc_clone(rq->q->disk->part0, bio_src,
3282+
gfp_mask, bs);
32843283
if (!bio)
32853284
goto free_and_out;
32863285

3287-
if (bio_ctr && bio_ctr(bio, bio_src, data))
3286+
if (bio_ctr && bio_ctr(bio, bio_src, data)) {
3287+
bio_put(bio);
32883288
goto free_and_out;
3289+
}
32893290

32903291
if (rq->bio) {
32913292
rq->biotail->bi_next = bio;
32923293
rq->biotail = bio;
32933294
} else {
32943295
rq->bio = rq->biotail = bio;
32953296
}
3296-
bio = NULL;
32973297
}
32983298

32993299
/* Copy attributes of the original request to the clone request. */
@@ -3311,8 +3311,6 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
33113311
return 0;
33123312

33133313
free_and_out:
3314-
if (bio)
3315-
bio_put(bio);
33163314
blk_rq_unprep_clone(rq);
33173315

33183316
return -ENOMEM;

0 commit comments

Comments
 (0)