aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorBruce Ashfield <bruce.ashfield@gmail.com>2019-07-22 11:15:13 -0400
committerBruce Ashfield <bruce.ashfield@gmail.com>2019-07-22 11:15:13 -0400
commitc7f55a28286aeebf0d03d7a95852aa788b1485f1 (patch)
treeb4a80cfd4193bd255c8949c668abb6aae1e15bcc /block
parent113a670ce2eb3daca9fa9c4f43d064d002db7c9b (diff)
parente9b75c60f91a359cb0e1b2d0a9ed1c81485215e2 (diff)
downloadlinux-yocto-c7f55a28286aeebf0d03d7a95852aa788b1485f1.tar.gz
linux-yocto-c7f55a28286aeebf0d03d7a95852aa788b1485f1.tar.bz2
linux-yocto-c7f55a28286aeebf0d03d7a95852aa788b1485f1.zip
Merge branch 'master-5.2'
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig25
-rw-r--r--block/badblocks.c10
-rw-r--r--block/bfq-cgroup.c16
-rw-r--r--block/bfq-iosched.c1390
-rw-r--r--block/bfq-iosched.h120
-rw-r--r--block/bfq-wf2q.c60
-rw-r--r--block/bio-integrity.c19
-rw-r--r--block/bio.c331
-rw-r--r--block/blk-cgroup.c14
-rw-r--r--block/blk-core.c126
-rw-r--r--block/blk-exec.c1
-rw-r--r--block/blk-flush.c7
-rw-r--r--block/blk-integrity.c19
-rw-r--r--block/blk-iolatency.c2
-rw-r--r--block/blk-merge.c351
-rw-r--r--block/blk-mq-cpumap.c11
-rw-r--r--block/blk-mq-debugfs.c165
-rw-r--r--block/blk-mq-debugfs.h36
-rw-r--r--block/blk-mq-pci.c12
-rw-r--r--block/blk-mq-rdma.c14
-rw-r--r--block/blk-mq-sched.c50
-rw-r--r--block/blk-mq-sched.h1
-rw-r--r--block/blk-mq-sysfs.c17
-rw-r--r--block/blk-mq-tag.c3
-rw-r--r--block/blk-mq-virtio.c14
-rw-r--r--block/blk-mq.c413
-rw-r--r--block/blk-mq.h39
-rw-r--r--block/blk-rq-qos.c9
-rw-r--r--block/blk-rq-qos.h1
-rw-r--r--block/blk-settings.c37
-rw-r--r--block/blk-stat.c1
-rw-r--r--block/blk-sysfs.c111
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/blk-timeout.c1
-rw-r--r--block/blk-wbt.c1
-rw-r--r--block/blk-zoned.c1
-rw-r--r--block/blk.h15
-rw-r--r--block/bounce.c11
-rw-r--r--block/bsg-lib.c61
-rw-r--r--block/bsg.c199
-rw-r--r--block/elevator.c14
-rw-r--r--block/genhd.c86
-rw-r--r--block/ioctl.c1
-rw-r--r--block/ioprio.c1
-rw-r--r--block/kyber-iosched.c13
-rw-r--r--block/mq-deadline.c1
-rw-r--r--block/opal_proto.h12
-rw-r--r--block/partition-generic.c7
-rw-r--r--block/partitions/acorn.c7
-rw-r--r--block/partitions/aix.h1
-rw-r--r--block/partitions/amiga.h1
-rw-r--r--block/partitions/efi.c16
-rw-r--r--block/partitions/efi.h16
-rw-r--r--block/partitions/ibm.h1
-rw-r--r--block/partitions/karma.h1
-rw-r--r--block/partitions/ldm.c18
-rw-r--r--block/partitions/ldm.h16
-rw-r--r--block/partitions/msdos.h1
-rw-r--r--block/partitions/osf.h1
-rw-r--r--block/partitions/sgi.h1
-rw-r--r--block/partitions/sun.h1
-rw-r--r--block/partitions/sysv68.h1
-rw-r--r--block/partitions/ultrix.h1
-rw-r--r--block/scsi_ioctl.c16
-rw-r--r--block/sed-opal.c726
-rw-r--r--block/t10-pi.c19
66 files changed, 2491 insertions, 2204 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 028bc085dac8..2466dcc3ef1d 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -26,30 +26,6 @@ menuconfig BLOCK
if BLOCK
-config LBDAF
- bool "Support for large (2TB+) block devices and files"
- depends on !64BIT
- default y
- help
- Enable block devices or files of size 2TB and larger.
-
- This option is required to support the full capacity of large
- (2TB+) block devices, including RAID, disk, Network Block Device,
- Logical Volume Manager (LVM) and loopback.
-
- This option also enables support for single files larger than
- 2TB.
-
- The ext4 filesystem requires that this feature be enabled in
- order to support filesystems that have the huge_file feature
- enabled. Otherwise, it will refuse to mount in the read-write
- mode any filesystems that use the huge_file feature, which is
- enabled by default by mke2fs.ext4.
-
- The GFS2 filesystem also requires this feature.
-
- If unsure, say Y.
-
config BLK_SCSI_REQUEST
bool
@@ -97,6 +73,7 @@ config BLK_DEV_INTEGRITY
config BLK_DEV_ZONED
bool "Zoned block device support"
+ select MQ_IOSCHED_DEADLINE
---help---
Block layer zoned block device support. This option enables
support for ZAC/ZBC host-managed and host-aware zoned block devices.
diff --git a/block/badblocks.c b/block/badblocks.c
index 91f7bcf979d3..2e5f5697db35 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Bad block management
*
* - Heavily based on MD badblocks code from Neil Brown
*
* Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/badblocks.h>
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index c6113af31960..b3796a40a61a 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* cgroups support for the BFQ I/O scheduler.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
@@ -578,7 +569,8 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqg_and_blkg_get(bfqg);
if (bfq_bfqq_busy(bfqq)) {
- bfq_pos_tree_add_move(bfqd, bfqq);
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
bfq_activate_bfqq(bfqd, bfqq);
}
@@ -1102,7 +1094,7 @@ struct cftype bfq_blkcg_legacy_files[] = {
},
#endif /* CONFIG_DEBUG_BLK_CGROUP */
- /* the same statictics which cover the bfqg and its descendants */
+ /* the same statistics which cover the bfqg and its descendants */
{
.name = "bfq.io_service_bytes_recursive",
.private = (unsigned long)&blkcg_policy_bfq,
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index cd307767a134..e5db3856b194 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Budget Fair Queueing (BFQ) I/O scheduler.
*
@@ -12,16 +13,6 @@
*
* Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
* BFQ is a proportional-share I/O scheduler, with some extra
* low-latency capabilities. BFQ also supports full hierarchical
* scheduling through cgroups. Next paragraphs provide an introduction
@@ -189,7 +180,7 @@ static const int bfq_default_max_budget = 16 * 1024;
/*
* When a sync request is dispatched, the queue that contains that
* request, and all the ancestor entities of that queue, are charged
- * with the number of sectors of the request. In constrast, if the
+ * with the number of sectors of the request. In contrast, if the
* request is async, then the queue and its ancestor entities are
* charged with the number of sectors of the request, multiplied by
* the factor below. This throttles the bandwidth for async I/O,
@@ -217,7 +208,7 @@ const int bfq_timeout = HZ / 8;
* queue merging.
*
* As can be deduced from the low time limit below, queue merging, if
- * successful, happens at the very beggining of the I/O of the involved
+ * successful, happens at the very beginning of the I/O of the involved
* cooperating processes, as a consequence of the arrival of the very
* first requests from each cooperator. After that, there is very
* little chance to find cooperators.
@@ -230,13 +221,26 @@ static struct kmem_cache *bfq_pool;
#define BFQ_MIN_TT (2 * NSEC_PER_MSEC)
/* hw_tag detection: parallel requests threshold and min samples needed. */
-#define BFQ_HW_QUEUE_THRESHOLD 4
+#define BFQ_HW_QUEUE_THRESHOLD 3
#define BFQ_HW_QUEUE_SAMPLES 32
#define BFQQ_SEEK_THR (sector_t)(8 * 100)
#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
+#define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \
+ (get_sdist(last_pos, rq) > \
+ BFQQ_SEEK_THR && \
+ (!blk_queue_nonrot(bfqd->queue) || \
+ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
#define BFQQ_CLOSE_THR (sector_t)(8 * 1024)
#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19)
+/*
+ * Sync random I/O is likely to be confused with soft real-time I/O,
+ * because it is characterized by limited throughput and apparently
+ * isochronous arrival pattern. To avoid false positives, queues
+ * containing only random (seeky) I/O are prevented from being tagged
+ * as soft real-time.
+ */
+#define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1)
/* Min number of samples required to perform peak-rate update */
#define BFQ_RATE_MIN_SAMPLES 32
@@ -428,7 +432,7 @@ void bfq_schedule_dispatch(struct bfq_data *bfqd)
/*
* Lifted from AS - choose which of rq1 and rq2 that is best served now.
- * We choose the request that is closesr to the head right now. Distance
+ * We choose the request that is closer to the head right now. Distance
* behind the head is penalized and only allowed to a certain extent.
*/
static struct request *bfq_choose_req(struct bfq_data *bfqd,
@@ -590,7 +594,16 @@ static bool bfq_too_late_for_merging(struct bfq_queue *bfqq)
bfq_merge_time_limit);
}
-void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+/*
+ * The following function is not marked as __cold because it is
+ * actually cold, but for the same performance goal described in the
+ * comments on the likely() at the beginning of
+ * bfq_setup_cooperator(). Unexpectedly, to reach an even lower
+ * execution time for the case where this function is not invoked, we
+ * had to add an unlikely() in each involved if().
+ */
+void __cold
+bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
struct rb_node **p, *parent;
struct bfq_queue *__bfqq;
@@ -624,52 +637,68 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
}
/*
- * Tell whether there are active queues with different weights or
- * active groups.
- */
-static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
-{
- /*
- * For queue weights to differ, queue_weights_tree must contain
- * at least two nodes.
- */
- return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
- (bfqd->queue_weights_tree.rb_node->rb_left ||
- bfqd->queue_weights_tree.rb_node->rb_right)
-#ifdef CONFIG_BFQ_GROUP_IOSCHED
- ) ||
- (bfqd->num_groups_with_pending_reqs > 0
-#endif
- );
-}
-
-/*
- * The following function returns true if every queue must receive the
- * same share of the throughput (this condition is used when deciding
- * whether idling may be disabled, see the comments in the function
- * bfq_better_to_idle()).
+ * The following function returns false either if every active queue
+ * must receive the same share of the throughput (symmetric scenario),
+ * or, as a special case, if bfqq must receive a share of the
+ * throughput lower than or equal to the share that every other active
+ * queue must receive. If bfqq does sync I/O, then these are the only
+ * two cases where bfqq happens to be guaranteed its share of the
+ * throughput even if I/O dispatching is not plugged when bfqq remains
+ * temporarily empty (for more details, see the comments in the
+ * function bfq_better_to_idle()). For this reason, the return value
+ * of this function is used to check whether I/O-dispatch plugging can
+ * be avoided.
*
- * Such a scenario occurs when:
+ * The above first case (symmetric scenario) occurs when:
* 1) all active queues have the same weight,
- * 2) all active groups at the same level in the groups tree have the same
- * weight,
+ * 2) all active queues belong to the same I/O-priority class,
* 3) all active groups at the same level in the groups tree have the same
+ * weight,
+ * 4) all active groups at the same level in the groups tree have the same
* number of children.
*
* Unfortunately, keeping the necessary state for evaluating exactly
* the last two symmetry sub-conditions above would be quite complex
- * and time consuming. Therefore this function evaluates, instead,
- * only the following stronger two sub-conditions, for which it is
+ * and time consuming. Therefore this function evaluates, instead,
+ * only the following stronger three sub-conditions, for which it is
* much easier to maintain the needed state:
* 1) all active queues have the same weight,
- * 2) there are no active groups.
+ * 2) all active queues belong to the same I/O-priority class,
+ * 3) there are no active groups.
* In particular, the last condition is always true if hierarchical
* support or the cgroups interface are not enabled, thus no state
* needs to be maintained in this case.
*/
-static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
+static bool bfq_asymmetric_scenario(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
{
- return !bfq_varied_queue_weights_or_active_groups(bfqd);
+ bool smallest_weight = bfqq &&
+ bfqq->weight_counter &&
+ bfqq->weight_counter ==
+ container_of(
+ rb_first_cached(&bfqd->queue_weights_tree),
+ struct bfq_weight_counter,
+ weights_node);
+
+ /*
+ * For queue weights to differ, queue_weights_tree must contain
+ * at least two nodes.
+ */
+ bool varied_queue_weights = !smallest_weight &&
+ !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) &&
+ (bfqd->queue_weights_tree.rb_root.rb_node->rb_left ||
+ bfqd->queue_weights_tree.rb_root.rb_node->rb_right);
+
+ bool multiple_classes_busy =
+ (bfqd->busy_queues[0] && bfqd->busy_queues[1]) ||
+ (bfqd->busy_queues[0] && bfqd->busy_queues[2]) ||
+ (bfqd->busy_queues[1] && bfqd->busy_queues[2]);
+
+ return varied_queue_weights || multiple_classes_busy
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
+ || bfqd->num_groups_with_pending_reqs > 0
+#endif
+ ;
}
/*
@@ -686,10 +715,11 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
* should be low too.
*/
void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
- struct rb_root *root)
+ struct rb_root_cached *root)
{
struct bfq_entity *entity = &bfqq->entity;
- struct rb_node **new = &(root->rb_node), *parent = NULL;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
+ bool leftmost = true;
/*
* Do not insert if the queue is already associated with a
@@ -718,8 +748,10 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
}
if (entity->weight < __counter->weight)
new = &((*new)->rb_left);
- else
+ else {
new = &((*new)->rb_right);
+ leftmost = false;
+ }
}
bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
@@ -728,25 +760,26 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
/*
* In the unlucky event of an allocation failure, we just
* exit. This will cause the weight of queue to not be
- * considered in bfq_varied_queue_weights_or_active_groups,
- * which, in its turn, causes the scenario to be deemed
- * wrongly symmetric in case bfqq's weight would have been
- * the only weight making the scenario asymmetric. On the
- * bright side, no unbalance will however occur when bfqq
- * becomes inactive again (the invocation of this function
- * is triggered by an activation of queue). In fact,
- * bfq_weights_tree_remove does nothing if
- * !bfqq->weight_counter.
+ * considered in bfq_asymmetric_scenario, which, in its turn,
+ * causes the scenario to be deemed wrongly symmetric in case
+ * bfqq's weight would have been the only weight making the
+ * scenario asymmetric. On the bright side, no unbalance will
+ * however occur when bfqq becomes inactive again (the
+ * invocation of this function is triggered by an activation
+ * of queue). In fact, bfq_weights_tree_remove does nothing
+ * if !bfqq->weight_counter.
*/
if (unlikely(!bfqq->weight_counter))
return;
bfqq->weight_counter->weight = entity->weight;
rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
- rb_insert_color(&bfqq->weight_counter->weights_node, root);
+ rb_insert_color_cached(&bfqq->weight_counter->weights_node, root,
+ leftmost);
inc_counter:
bfqq->weight_counter->num_active++;
+ bfqq->ref++;
}
/*
@@ -757,7 +790,7 @@ inc_counter:
*/
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
- struct rb_root *root)
+ struct rb_root_cached *root)
{
if (!bfqq->weight_counter)
return;
@@ -766,11 +799,12 @@ void __bfq_weights_tree_remove(struct bfq_data *bfqd,
if (bfqq->weight_counter->num_active > 0)
goto reset_entity_pointer;
- rb_erase(&bfqq->weight_counter->weights_node, root);
+ rb_erase_cached(&bfqq->weight_counter->weights_node, root);
kfree(bfqq->weight_counter);
reset_entity_pointer:
bfqq->weight_counter = NULL;
+ bfq_put_queue(bfqq);
}
/*
@@ -782,9 +816,6 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
{
struct bfq_entity *entity = bfqq->entity.parent;
- __bfq_weights_tree_remove(bfqd, bfqq,
- &bfqd->queue_weights_tree);
-
for_each_entity(entity) {
struct bfq_sched_data *sd = entity->my_sched_data;
@@ -818,6 +849,15 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
bfqd->num_groups_with_pending_reqs--;
}
}
+
+ /*
+ * Next function is invoked last, because it causes bfqq to be
+ * freed if the following holds: bfqq is not in service and
+ * has no dispatched request. DO NOT use bfqq after the next
+ * function invocation.
+ */
+ __bfq_weights_tree_remove(bfqd, bfqq,
+ &bfqd->queue_weights_tree);
}
/*
@@ -873,7 +913,8 @@ static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
static unsigned long bfq_serv_to_charge(struct request *rq,
struct bfq_queue *bfqq)
{
- if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
+ if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 ||
+ bfq_asymmetric_scenario(bfqq->bfqd, bfqq))
return blk_rq_sectors(rq);
return blk_rq_sectors(rq) * bfq_async_charge_factor;
@@ -907,8 +948,10 @@ static void bfq_updated_next_req(struct bfq_data *bfqd,
*/
return;
- new_budget = max_t(unsigned long, bfqq->max_budget,
- bfq_serv_to_charge(next_rq, bfqq));
+ new_budget = max_t(unsigned long,
+ max_t(unsigned long, bfqq->max_budget,
+ bfq_serv_to_charge(next_rq, bfqq)),
+ entity->service);
if (entity->budget != new_budget) {
entity->budget = new_budget;
bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
@@ -937,7 +980,7 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
* of several files
* mplayer took 23 seconds to start, if constantly weight-raised.
*
- * As for higher values than that accomodating the above bad
+ * As for higher values than that accommodating the above bad
* scenario, tests show that higher values would often yield
* the opposite of the desired result, i.e., would worsen
* responsiveness by allowing non-interactive applications to
@@ -976,6 +1019,7 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
else
bfq_clear_bfqq_IO_bound(bfqq);
+ bfqq->entity.new_weight = bic->saved_weight;
bfqq->ttime = bic->saved_ttime;
bfqq->wr_coeff = bic->saved_wr_coeff;
bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt;
@@ -1011,7 +1055,8 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
static int bfqq_process_refs(struct bfq_queue *bfqq)
{
- return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
+ return bfqq->ref - bfqq->allocated - bfqq->entity.on_st -
+ (bfqq->weight_counter != NULL);
}
/* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
@@ -1022,8 +1067,18 @@ static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
hlist_del_init(&item->burst_list_node);
- hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
- bfqd->burst_size = 1;
+
+ /*
+ * Start the creation of a new burst list only if there is no
+ * active queue. See comments on the conditional invocation of
+ * bfq_handle_burst().
+ */
+ if (bfq_tot_busy_queues(bfqd) == 0) {
+ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
+ bfqd->burst_size = 1;
+ } else
+ bfqd->burst_size = 0;
+
bfqd->burst_parent_entity = bfqq->entity.parent;
}
@@ -1079,7 +1134,8 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
* many parallel threads/processes. Examples are systemd during boot,
* or git grep. To help these processes get their job done as soon as
* possible, it is usually better to not grant either weight-raising
- * or device idling to their queues.
+ * or device idling to their queues, unless these queues must be
+ * protected from the I/O flowing through other active queues.
*
* In this comment we describe, firstly, the reasons why this fact
* holds, and, secondly, the next function, which implements the main
@@ -1091,7 +1147,10 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
* cumulatively served, the sooner the target job of these queues gets
* completed. As a consequence, weight-raising any of these queues,
* which also implies idling the device for it, is almost always
- * counterproductive. In most cases it just lowers throughput.
+ * counterproductive, unless there are other active queues to isolate
+ * these new queues from. If there no other active queues, then
+ * weight-raising these new queues just lowers throughput in most
+ * cases.
*
* On the other hand, a burst of queue creations may be caused also by
* the start of an application that does not consist of a lot of
@@ -1125,14 +1184,16 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
* are very rare. They typically occur if some service happens to
* start doing I/O exactly when the interactive task starts.
*
- * Turning back to the next function, it implements all the steps
- * needed to detect the occurrence of a large burst and to properly
- * mark all the queues belonging to it (so that they can then be
- * treated in a different way). This goal is achieved by maintaining a
- * "burst list" that holds, temporarily, the queues that belong to the
- * burst in progress. The list is then used to mark these queues as
- * belonging to a large burst if the burst does become large. The main
- * steps are the following.
+ * Turning back to the next function, it is invoked only if there are
+ * no active queues (apart from active queues that would belong to the
+ * same, possible burst bfqq would belong to), and it implements all
+ * the steps needed to detect the occurrence of a large burst and to
+ * properly mark all the queues belonging to it (so that they can then
+ * be treated in a different way). This goal is achieved by
+ * maintaining a "burst list" that holds, temporarily, the queues that
+ * belong to the burst in progress. The list is then used to mark
+ * these queues as belonging to a large burst if the burst does become
+ * large. The main steps are the following.
*
* . when the very first queue is created, the queue is inserted into the
* list (as it could be the first queue in a possible burst)
@@ -1380,7 +1441,15 @@ static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd,
{
struct bfq_entity *entity = &bfqq->entity;
- if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time) {
+ /*
+ * In the next compound condition, we check also whether there
+ * is some budget left, because otherwise there is no point in
+ * trying to go on serving bfqq with this same budget: bfqq
+ * would be expired immediately after being selected for
+ * service. This would only cause useless overhead.
+ */
+ if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time &&
+ bfq_bfqq_budget_left(bfqq) > 0) {
/*
* We do not clear the flag non_blocking_wait_rq here, as
* the latter is used in bfq_activate_bfqq to signal
@@ -1569,6 +1638,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
*/
in_burst = bfq_bfqq_in_large_burst(bfqq);
soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
+ !BFQQ_TOTALLY_SEEKY(bfqq) &&
!in_burst &&
time_is_before_jiffies(bfqq->soft_rt_next_start) &&
bfqq->dispatched == 0;
@@ -1677,6 +1747,123 @@ static void bfq_add_request(struct request *rq)
bfqq->queued[rq_is_sync(rq)]++;
bfqd->queued++;
+ if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_bfqq_sync(bfqq)) {
+ /*
+ * Periodically reset inject limit, to make sure that
+ * the latter eventually drops in case workload
+ * changes, see step (3) in the comments on
+ * bfq_update_inject_limit().
+ */
+ if (time_is_before_eq_jiffies(bfqq->decrease_time_jif +
+ msecs_to_jiffies(1000))) {
+ /* invalidate baseline total service time */
+ bfqq->last_serv_time_ns = 0;
+
+ /*
+ * Reset pointer in case we are waiting for
+ * some request completion.
+ */
+ bfqd->waited_rq = NULL;
+
+ /*
+ * If bfqq has a short think time, then start
+ * by setting the inject limit to 0
+ * prudentially, because the service time of
+ * an injected I/O request may be higher than
+ * the think time of bfqq, and therefore, if
+ * one request was injected when bfqq remains
+ * empty, this injected request might delay
+ * the service of the next I/O request for
+ * bfqq significantly. In case bfqq can
+ * actually tolerate some injection, then the
+ * adaptive update will however raise the
+ * limit soon. This lucky circumstance holds
+ * exactly because bfqq has a short think
+ * time, and thus, after remaining empty, is
+ * likely to get new I/O enqueued---and then
+ * completed---before being expired. This is
+ * the very pattern that gives the
+ * limit-update algorithm the chance to
+ * measure the effect of injection on request
+ * service times, and then to update the limit
+ * accordingly.
+ *
+ * On the opposite end, if bfqq has a long
+ * think time, then start directly by 1,
+ * because:
+ * a) on the bright side, keeping at most one
+ * request in service in the drive is unlikely
+ * to cause any harm to the latency of bfqq's
+ * requests, as the service time of a single
+ * request is likely to be lower than the
+ * think time of bfqq;
+ * b) on the downside, after becoming empty,
+ * bfqq is likely to expire before getting its
+ * next request. With this request arrival
+ * pattern, it is very hard to sample total
+ * service times and update the inject limit
+ * accordingly (see comments on
+ * bfq_update_inject_limit()). So the limit is
+ * likely to be never, or at least seldom,
+ * updated. As a consequence, by setting the
+ * limit to 1, we avoid that no injection ever
+ * occurs with bfqq. On the downside, this
+ * proactive step further reduces chances to
+ * actually compute the baseline total service
+ * time. Thus it reduces chances to execute the
+ * limit-update algorithm and possibly raise the
+ * limit to more than 1.
+ */
+ if (bfq_bfqq_has_short_ttime(bfqq))
+ bfqq->inject_limit = 0;
+ else
+ bfqq->inject_limit = 1;
+ bfqq->decrease_time_jif = jiffies;
+ }
+
+ /*
+ * The following conditions must hold to setup a new
+ * sampling of total service time, and then a new
+ * update of the inject limit:
+ * - bfqq is in service, because the total service
+ * time is evaluated only for the I/O requests of
+ * the queues in service;
+ * - this is the right occasion to compute or to
+ * lower the baseline total service time, because
+ * there are actually no requests in the drive,
+ * or
+ * the baseline total service time is available, and
+ * this is the right occasion to compute the other
+ * quantity needed to update the inject limit, i.e.,
+ * the total service time caused by the amount of
+ * injection allowed by the current value of the
+ * limit. It is the right occasion because injection
+ * has actually been performed during the service
+ * hole, and there are still in-flight requests,
+ * which are very likely to be exactly the injected
+ * requests, or part of them;
+ * - the minimum interval for sampling the total
+ * service time and updating the inject limit has
+ * elapsed.
+ */
+ if (bfqq == bfqd->in_service_queue &&
+ (bfqd->rq_in_driver == 0 ||
+ (bfqq->last_serv_time_ns > 0 &&
+ bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
+ time_is_before_eq_jiffies(bfqq->decrease_time_jif +
+ msecs_to_jiffies(100))) {
+ bfqd->last_empty_occupied_ns = ktime_get_ns();
+ /*
+ * Start the state machine for measuring the
+ * total service time of rq: setting
+ * wait_dispatch will cause bfqd->waited_rq to
+ * be set when rq will be dispatched.
+ */
+ bfqd->wait_dispatch = true;
+ bfqd->rqs_injected = false;
+ }
+ }
+
elv_rb_add(&bfqq->sort_list, rq);
/*
@@ -1688,8 +1875,9 @@ static void bfq_add_request(struct request *rq)
/*
* Adjust priority tree position, if next_rq changes.
+ * See comments on bfq_pos_tree_add_move() for the unlikely().
*/
- if (prev != bfqq->next_rq)
+ if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq))
bfq_pos_tree_add_move(bfqd, bfqq);
if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */
@@ -1829,7 +2017,9 @@ static void bfq_remove_request(struct request_queue *q,
bfqq->pos_root = NULL;
}
} else {
- bfq_pos_tree_add_move(bfqd, bfqq);
+ /* see comments on bfq_pos_tree_add_move() for the unlikely() */
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
if (rq->cmd_flags & REQ_META)
@@ -1914,7 +2104,12 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
*/
if (prev != bfqq->next_rq) {
bfq_updated_next_req(bfqd, bfqq);
- bfq_pos_tree_add_move(bfqd, bfqq);
+ /*
+ * See comments on bfq_pos_tree_add_move() for
+ * the unlikely().
+ */
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
}
}
@@ -2197,6 +2392,46 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_queue *in_service_bfqq, *new_bfqq;
/*
+ * Do not perform queue merging if the device is non
+ * rotational and performs internal queueing. In fact, such a
+ * device reaches a high speed through internal parallelism
+ * and pipelining. This means that, to reach a high
+ * throughput, it must have many requests enqueued at the same
+ * time. But, in this configuration, the internal scheduling
+ * algorithm of the device does exactly the job of queue
+ * merging: it reorders requests so as to obtain as much as
+ * possible a sequential I/O pattern. As a consequence, with
+ * the workload generated by processes doing interleaved I/O,
+ * the throughput reached by the device is likely to be the
+ * same, with and without queue merging.
+ *
+ * Disabling merging also provides a remarkable benefit in
+ * terms of throughput. Merging tends to make many workloads
+ * artificially more uneven, because of shared queues
+ * remaining non empty for incomparably more time than
+ * non-merged queues. This may accentuate workload
+ * asymmetries. For example, if one of the queues in a set of
+ * merged queues has a higher weight than a normal queue, then
+ * the shared queue may inherit such a high weight and, by
+ * staying almost always active, may force BFQ to perform I/O
+ * plugging most of the time. This evidently makes it harder
+ * for BFQ to let the device reach a high throughput.
+ *
+ * Finally, the likely() macro below is not used because one
+ * of the two branches is more likely than the other, but to
+ * have the code path after the following if() executed as
+ * fast as possible for the case of a non rotational device
+ * with queueing. We want it because this is the fastest kind
+ * of device. On the opposite end, the likely() may lengthen
+ * the execution time of BFQ for the case of slower devices
+ * (rotational or at least without queueing). But in this case
+ * the execution time of BFQ matters very little, if not at
+ * all.
+ */
+ if (likely(bfqd->nonrot_with_queueing))
+ return NULL;
+
+ /*
* Prevent bfqq from being merged if it has been created too
* long ago. The idea is that true cooperating processes, and
* thus their associated bfq_queues, are supposed to be
@@ -2217,14 +2452,15 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
return NULL;
/* If there is only one backlogged queue, don't search. */
- if (bfqd->busy_queues == 1)
+ if (bfq_tot_busy_queues(bfqd) == 1)
return NULL;
in_service_bfqq = bfqd->in_service_queue;
if (in_service_bfqq && in_service_bfqq != bfqq &&
likely(in_service_bfqq != &bfqd->oom_bfqq) &&
- bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
+ bfq_rq_close_to_sector(io_struct, request,
+ bfqd->in_serv_last_pos) &&
bfqq->entity.parent == in_service_bfqq->entity.parent &&
bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
@@ -2258,6 +2494,7 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq)
if (!bic)
return;
+ bic->saved_weight = bfqq->entity.orig_weight;
bic->saved_ttime = bfqq->ttime;
bic->saved_has_short_ttime = bfq_bfqq_has_short_ttime(bfqq);
bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
@@ -2346,6 +2583,16 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
* assignment causes no harm).
*/
new_bfqq->bic = NULL;
+ /*
+ * If the queue is shared, the pid is the pid of one of the associated
+ * processes. Which pid depends on the exact sequence of merge events
+ * the queue underwent. So printing such a pid is useless and confusing
+ * because it reports a random pid between those of the associated
+ * processes.
+ * We mark such a queue with a pid -1, and then print SHARED instead of
+ * a pid in logging messages.
+ */
+ new_bfqq->pid = -1;
bfqq->bic = NULL;
/* release process reference to bfqq */
bfq_put_queue(bfqq);
@@ -2380,8 +2627,8 @@ static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq,
/*
* bic still points to bfqq, then it has not yet been
* redirected to some other bfq_queue, and a queue
- * merge beween bfqq and new_bfqq can be safely
- * fulfillled, i.e., bic can be redirected to new_bfqq
+ * merge between bfqq and new_bfqq can be safely
+ * fulfilled, i.e., bic can be redirected to new_bfqq
* and bfqq can be put.
*/
bfq_merge_bfqqs(bfqd, bfqd->bio_bic, bfqq,
@@ -2515,10 +2762,14 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
* queue).
*/
if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
- bfq_symmetric_scenario(bfqd))
+ !bfq_asymmetric_scenario(bfqd, bfqq))
sl = min_t(u64, sl, BFQ_MIN_TT);
+ else if (bfqq->wr_coeff > 1)
+ sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);
bfqd->last_idling_start = ktime_get();
+ bfqd->last_idling_start_jiffies = jiffies;
+
hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
HRTIMER_MODE_REL);
bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
@@ -2742,7 +2993,7 @@ static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
if ((bfqd->rq_in_driver > 0 ||
now_ns - bfqd->last_completion < BFQ_MIN_TT)
- && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR)
+ && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq))
bfqd->sequential_samples++;
bfqd->tot_sectors_dispatched += blk_rq_sectors(rq);
@@ -2764,6 +3015,8 @@ update_rate_and_reset:
bfq_update_rate_reset(bfqd, rq);
update_last_values:
bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
+ if (RQ_BFQQ(rq) == bfqd->in_service_queue)
+ bfqd->in_serv_last_pos = bfqd->last_position;
bfqd->last_dispatch = now_ns;
}
@@ -2792,7 +3045,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
bfq_remove_request(q, rq);
}
-static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{
/*
* If this bfqq is shared between multiple processes, check
@@ -2818,16 +3071,20 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_requeue_bfqq(bfqd, bfqq, true);
/*
* Resort priority tree of potential close cooperators.
+ * See comments on bfq_pos_tree_add_move() for the unlikely().
*/
- bfq_pos_tree_add_move(bfqd, bfqq);
+ if (unlikely(!bfqd->nonrot_with_queueing))
+ bfq_pos_tree_add_move(bfqd, bfqq);
}
/*
* All in-service entities must have been properly deactivated
* or requeued before executing the next function, which
- * resets all in-service entites as no more in service.
+ * resets all in-service entities as no more in service. This
+ * may cause bfqq to be freed. If this happens, the next
+ * function returns true.
*/
- __bfq_bfqd_reset_in_service(bfqd);
+ return __bfq_bfqd_reset_in_service(bfqd);
}
/**
@@ -3191,13 +3448,6 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
}
-static bool bfq_bfqq_injectable(struct bfq_queue *bfqq)
-{
- return BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
- blk_queue_nonrot(bfqq->bfqd->queue) &&
- bfqq->bfqd->hw_tag;
-}
-
/**
* bfq_bfqq_expire - expire a queue.
* @bfqd: device owning the queue.
@@ -3232,7 +3482,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
bool slow;
unsigned long delta = 0;
struct bfq_entity *entity = &bfqq->entity;
- int ref;
/*
* Check whether the process is slow (see bfq_bfqq_is_slow).
@@ -3274,16 +3523,32 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
* requests, then the request pattern is isochronous
* (see the comments on the function
* bfq_bfqq_softrt_next_start()). Thus we can compute
- * soft_rt_next_start. If, instead, the queue still
- * has outstanding requests, then we have to wait for
- * the completion of all the outstanding requests to
- * discover whether the request pattern is actually
- * isochronous.
+ * soft_rt_next_start. And we do it, unless bfqq is in
+ * interactive weight raising. We do not do it in the
+ * latter subcase, for the following reason. bfqq may
+ * be conveying the I/O needed to load a soft
+ * real-time application. Such an application will
+ * actually exhibit a soft real-time I/O pattern after
+ * it finally starts doing its job. But, if
+ * soft_rt_next_start is computed here for an
+ * interactive bfqq, and bfqq had received a lot of
+ * service before remaining with no outstanding
+ * request (likely to happen on a fast device), then
+ * soft_rt_next_start would be assigned such a high
+ * value that, for a very long time, bfqq would be
+ * prevented from being possibly considered as soft
+ * real time.
+ *
+ * If, instead, the queue still has outstanding
+ * requests, then we have to wait for the completion
+ * of all the outstanding requests to discover whether
+ * the request pattern is actually isochronous.
*/
- if (bfqq->dispatched == 0)
+ if (bfqq->dispatched == 0 &&
+ bfqq->wr_coeff != bfqd->bfq_wr_coeff)
bfqq->soft_rt_next_start =
bfq_bfqq_softrt_next_start(bfqd, bfqq);
- else {
+ else if (bfqq->dispatched > 0) {
/*
* Schedule an update of soft_rt_next_start to when
* the task may be discovered to be isochronous.
@@ -3297,18 +3562,22 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq));
/*
+ * bfqq expired, so no total service time needs to be computed
+ * any longer: reset state machine for measuring total service
+ * times.
+ */
+ bfqd->rqs_injected = bfqd->wait_dispatch = false;
+ bfqd->waited_rq = NULL;
+
+ /*
* Increase, decrease or leave budget unchanged according to
* reason.
*/
__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
- ref = bfqq->ref;
- __bfq_bfqq_expire(bfqd, bfqq);
-
- if (ref == 1) /* bfqq is gone, no more actions on it */
+ if (__bfq_bfqq_expire(bfqd, bfqq))
+ /* bfqq is gone, no more actions on it */
return;
- bfqq->injected_service = 0;
-
/* mark bfqq as waiting a request only if a bic still points to it */
if (!bfq_bfqq_busy(bfqq) &&
reason != BFQQE_BUDGET_TIMEOUT &&
@@ -3376,53 +3645,13 @@ static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
bfq_bfqq_budget_timeout(bfqq);
}
-/*
- * For a queue that becomes empty, device idling is allowed only if
- * this function returns true for the queue. As a consequence, since
- * device idling plays a critical role in both throughput boosting and
- * service guarantees, the return value of this function plays a
- * critical role in both these aspects as well.
- *
- * In a nutshell, this function returns true only if idling is
- * beneficial for throughput or, even if detrimental for throughput,
- * idling is however necessary to preserve service guarantees (low
- * latency, desired throughput distribution, ...). In particular, on
- * NCQ-capable devices, this function tries to return false, so as to
- * help keep the drives' internal queues full, whenever this helps the
- * device boost the throughput without causing any service-guarantee
- * issue.
- *
- * In more detail, the return value of this function is obtained by,
- * first, computing a number of boolean variables that take into
- * account throughput and service-guarantee issues, and, then,
- * combining these variables in a logical expression. Most of the
- * issues taken into account are not trivial. We discuss these issues
- * individually while introducing the variables.
- */
-static bool bfq_better_to_idle(struct bfq_queue *bfqq)
+static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
{
- struct bfq_data *bfqd = bfqq->bfqd;
bool rot_without_queueing =
!blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag,
bfqq_sequential_and_IO_bound,
- idling_boosts_thr, idling_boosts_thr_without_issues,
- idling_needed_for_service_guarantees,
- asymmetric_scenario;
-
- if (bfqd->strict_guarantees)
- return true;
-
- /*
- * Idling is performed only if slice_idle > 0. In addition, we
- * do not idle if
- * (a) bfqq is async
- * (b) bfqq is in the idle io prio class: in this case we do
- * not idle because we want to minimize the bandwidth that
- * queues in this class can steal to higher-priority queues
- */
- if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
- bfq_class_idle(bfqq))
- return false;
+ idling_boosts_thr;
bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) &&
bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq);
@@ -3454,8 +3683,7 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
bfqq_sequential_and_IO_bound);
/*
- * The value of the next variable,
- * idling_boosts_thr_without_issues, is equal to that of
+ * The return value of this function is equal to that of
* idling_boosts_thr, unless a special case holds. In this
* special case, described below, idling may cause problems to
* weight-raised queues.
@@ -3472,217 +3700,259 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* which enqueue several requests in advance, and further
* reorder internally-queued requests.
*
- * For this reason, we force to false the value of
- * idling_boosts_thr_without_issues if there are weight-raised
- * busy queues. In this case, and if bfqq is not weight-raised,
- * this guarantees that the device is not idled for bfqq (if,
- * instead, bfqq is weight-raised, then idling will be
- * guaranteed by another variable, see below). Combined with
- * the timestamping rules of BFQ (see [1] for details), this
- * behavior causes bfqq, and hence any sync non-weight-raised
- * queue, to get a lower number of requests served, and thus
- * to ask for a lower number of requests from the request
- * pool, before the busy weight-raised queues get served
- * again. This often mitigates starvation problems in the
- * presence of heavy write workloads and NCQ, thereby
- * guaranteeing a higher application and system responsiveness
- * in these hostile scenarios.
+ * For this reason, we force to false the return value if
+ * there are weight-raised busy queues. In this case, and if
+ * bfqq is not weight-raised, this guarantees that the device
+ * is not idled for bfqq (if, instead, bfqq is weight-raised,
+ * then idling will be guaranteed by another variable, see
+ * below). Combined with the timestamping rules of BFQ (see
+ * [1] for details), this behavior causes bfqq, and hence any
+ * sync non-weight-raised queue, to get a lower number of
+ * requests served, and thus to ask for a lower number of
+ * requests from the request pool, before the busy
+ * weight-raised queues get served again. This often mitigates
+ * starvation problems in the presence of heavy write
+ * workloads and NCQ, thereby guaranteeing a higher
+ * application and system responsiveness in these hostile
+ * scenarios.
*/
- idling_boosts_thr_without_issues = idling_boosts_thr &&
+ return idling_boosts_thr &&
bfqd->wr_busy_queues == 0;
+}
- /*
- * There is then a case where idling must be performed not
- * for throughput concerns, but to preserve service
- * guarantees.
- *
- * To introduce this case, we can note that allowing the drive
- * to enqueue more than one request at a time, and hence
- * delegating de facto final scheduling decisions to the
- * drive's internal scheduler, entails loss of control on the
- * actual request service order. In particular, the critical
- * situation is when requests from different processes happen
- * to be present, at the same time, in the internal queue(s)
- * of the drive. In such a situation, the drive, by deciding
- * the service order of the internally-queued requests, does
- * determine also the actual throughput distribution among
- * these processes. But the drive typically has no notion or
- * concern about per-process throughput distribution, and
- * makes its decisions only on a per-request basis. Therefore,
- * the service distribution enforced by the drive's internal
- * scheduler is likely to coincide with the desired
- * device-throughput distribution only in a completely
- * symmetric scenario where:
- * (i) each of these processes must get the same throughput as
- * the others;
- * (ii) the I/O of each process has the same properties, in
- * terms of locality (sequential or random), direction
- * (reads or writes), request sizes, greediness
- * (from I/O-bound to sporadic), and so on.
- * In fact, in such a scenario, the drive tends to treat
- * the requests of each of these processes in about the same
- * way as the requests of the others, and thus to provide
- * each of these processes with about the same throughput
- * (which is exactly the desired throughput distribution). In
- * contrast, in any asymmetric scenario, device idling is
- * certainly needed to guarantee that bfqq receives its
- * assigned fraction of the device throughput (see [1] for
- * details).
- * The problem is that idling may significantly reduce
- * throughput with certain combinations of types of I/O and
- * devices. An important example is sync random I/O, on flash
- * storage with command queueing. So, unless bfqq falls in the
- * above cases where idling also boosts throughput, it would
- * be important to check conditions (i) and (ii) accurately,
- * so as to avoid idling when not strictly needed for service
- * guarantees.
- *
- * Unfortunately, it is extremely difficult to thoroughly
- * check condition (ii). And, in case there are active groups,
- * it becomes very difficult to check condition (i) too. In
- * fact, if there are active groups, then, for condition (i)
- * to become false, it is enough that an active group contains
- * more active processes or sub-groups than some other active
- * group. More precisely, for condition (i) to hold because of
- * such a group, it is not even necessary that the group is
- * (still) active: it is sufficient that, even if the group
- * has become inactive, some of its descendant processes still
- * have some request already dispatched but still waiting for
- * completion. In fact, requests have still to be guaranteed
- * their share of the throughput even after being
- * dispatched. In this respect, it is easy to show that, if a
- * group frequently becomes inactive while still having
- * in-flight requests, and if, when this happens, the group is
- * not considered in the calculation of whether the scenario
- * is asymmetric, then the group may fail to be guaranteed its
- * fair share of the throughput (basically because idling may
- * not be performed for the descendant processes of the group,
- * but it had to be). We address this issue with the
- * following bi-modal behavior, implemented in the function
- * bfq_symmetric_scenario().
- *
- * If there are groups with requests waiting for completion
- * (as commented above, some of these groups may even be
- * already inactive), then the scenario is tagged as
- * asymmetric, conservatively, without checking any of the
- * conditions (i) and (ii). So the device is idled for bfqq.
- * This behavior matches also the fact that groups are created
- * exactly if controlling I/O is a primary concern (to
- * preserve bandwidth and latency guarantees).
- *
- * On the opposite end, if there are no groups with requests
- * waiting for completion, then only condition (i) is actually
- * controlled, i.e., provided that condition (i) holds, idling
- * is not performed, regardless of whether condition (ii)
- * holds. In other words, only if condition (i) does not hold,
- * then idling is allowed, and the device tends to be
- * prevented from queueing many requests, possibly of several
- * processes. Since there are no groups with requests waiting
- * for completion, then, to control condition (i) it is enough
- * to check just whether all the queues with requests waiting
- * for completion also have the same weight.
- *
- * Not checking condition (ii) evidently exposes bfqq to the
- * risk of getting less throughput than its fair share.
- * However, for queues with the same weight, a further
- * mechanism, preemption, mitigates or even eliminates this
- * problem. And it does so without consequences on overall
- * throughput. This mechanism and its benefits are explained
- * in the next three paragraphs.
- *
- * Even if a queue, say Q, is expired when it remains idle, Q
- * can still preempt the new in-service queue if the next
- * request of Q arrives soon (see the comments on
- * bfq_bfqq_update_budg_for_activation). If all queues and
- * groups have the same weight, this form of preemption,
- * combined with the hole-recovery heuristic described in the
- * comments on function bfq_bfqq_update_budg_for_activation,
- * are enough to preserve a correct bandwidth distribution in
- * the mid term, even without idling. In fact, even if not
- * idling allows the internal queues of the device to contain
- * many requests, and thus to reorder requests, we can rather
- * safely assume that the internal scheduler still preserves a
- * minimum of mid-term fairness.
- *
- * More precisely, this preemption-based, idleless approach
- * provides fairness in terms of IOPS, and not sectors per
- * second. This can be seen with a simple example. Suppose
- * that there are two queues with the same weight, but that
- * the first queue receives requests of 8 sectors, while the
- * second queue receives requests of 1024 sectors. In
- * addition, suppose that each of the two queues contains at
- * most one request at a time, which implies that each queue
- * always remains idle after it is served. Finally, after
- * remaining idle, each queue receives very quickly a new
- * request. It follows that the two queues are served
- * alternatively, preempting each other if needed. This
- * implies that, although both queues have the same weight,
- * the queue with large requests receives a service that is
- * 1024/8 times as high as the service received by the other
- * queue.
- *
- * The motivation for using preemption instead of idling (for
- * queues with the same weight) is that, by not idling,
- * service guarantees are preserved (completely or at least in
- * part) without minimally sacrificing throughput. And, if
- * there is no active group, then the primary expectation for
- * this device is probably a high throughput.
- *
- * We are now left only with explaining the additional
- * compound condition that is checked below for deciding
- * whether the scenario is asymmetric. To explain this
- * compound condition, we need to add that the function
- * bfq_symmetric_scenario checks the weights of only
- * non-weight-raised queues, for efficiency reasons (see
- * comments on bfq_weights_tree_add()). Then the fact that
- * bfqq is weight-raised is checked explicitly here. More
- * precisely, the compound condition below takes into account
- * also the fact that, even if bfqq is being weight-raised,
- * the scenario is still symmetric if all queues with requests
- * waiting for completion happen to be
- * weight-raised. Actually, we should be even more precise
- * here, and differentiate between interactive weight raising
- * and soft real-time weight raising.
- *
- * As a side note, it is worth considering that the above
- * device-idling countermeasures may however fail in the
- * following unlucky scenario: if idling is (correctly)
- * disabled in a time period during which all symmetry
- * sub-conditions hold, and hence the device is allowed to
- * enqueue many requests, but at some later point in time some
- * sub-condition stops to hold, then it may become impossible
- * to let requests be served in the desired order until all
- * the requests already queued in the device have been served.
- */
- asymmetric_scenario = (bfqq->wr_coeff > 1 &&
- bfqd->wr_busy_queues < bfqd->busy_queues) ||
- !bfq_symmetric_scenario(bfqd);
+/*
+ * There is a case where idling does not have to be performed for
+ * throughput concerns, but to preserve the throughput share of
+ * the process associated with bfqq.
+ *
+ * To introduce this case, we can note that allowing the drive
+ * to enqueue more than one request at a time, and hence
+ * delegating de facto final scheduling decisions to the
+ * drive's internal scheduler, entails loss of control on the
+ * actual request service order. In particular, the critical
+ * situation is when requests from different processes happen
+ * to be present, at the same time, in the internal queue(s)
+ * of the drive. In such a situation, the drive, by deciding
+ * the service order of the internally-queued requests, does
+ * determine also the actual throughput distribution among
+ * these processes. But the drive typically has no notion or
+ * concern about per-process throughput distribution, and
+ * makes its decisions only on a per-request basis. Therefore,
+ * the service distribution enforced by the drive's internal
+ * scheduler is likely to coincide with the desired throughput
+ * distribution only in a completely symmetric, or favorably
+ * skewed scenario where:
+ * (i-a) each of these processes must get the same throughput as
+ * the others,
+ * (i-b) in case (i-a) does not hold, it holds that the process
+ * associated with bfqq must receive a lower or equal
+ * throughput than any of the other processes;
+ * (ii) the I/O of each process has the same properties, in
+ * terms of locality (sequential or random), direction
+ * (reads or writes), request sizes, greediness
+ * (from I/O-bound to sporadic), and so on;
+
+ * In fact, in such a scenario, the drive tends to treat the requests
+ * of each process in about the same way as the requests of the
+ * others, and thus to provide each of these processes with about the
+ * same throughput. This is exactly the desired throughput
+ * distribution if (i-a) holds, or, if (i-b) holds instead, this is an
+ * even more convenient distribution for (the process associated with)
+ * bfqq.
+ *
+ * In contrast, in any asymmetric or unfavorable scenario, device
+ * idling (I/O-dispatch plugging) is certainly needed to guarantee
+ * that bfqq receives its assigned fraction of the device throughput
+ * (see [1] for details).
+ *
+ * The problem is that idling may significantly reduce throughput with
+ * certain combinations of types of I/O and devices. An important
+ * example is sync random I/O on flash storage with command
+ * queueing. So, unless bfqq falls in cases where idling also boosts
+ * throughput, it is important to check conditions (i-a), i(-b) and
+ * (ii) accurately, so as to avoid idling when not strictly needed for
+ * service guarantees.
+ *
+ * Unfortunately, it is extremely difficult to thoroughly check
+ * condition (ii). And, in case there are active groups, it becomes
+ * very difficult to check conditions (i-a) and (i-b) too. In fact,
+ * if there are active groups, then, for conditions (i-a) or (i-b) to
+ * become false 'indirectly', it is enough that an active group
+ * contains more active processes or sub-groups than some other active
+ * group. More precisely, for conditions (i-a) or (i-b) to become
+ * false because of such a group, it is not even necessary that the
+ * group is (still) active: it is sufficient that, even if the group
+ * has become inactive, some of its descendant processes still have
+ * some request already dispatched but still waiting for
+ * completion. In fact, requests have still to be guaranteed their
+ * share of the throughput even after being dispatched. In this
+ * respect, it is easy to show that, if a group frequently becomes
+ * inactive while still having in-flight requests, and if, when this
+ * happens, the group is not considered in the calculation of whether
+ * the scenario is asymmetric, then the group may fail to be
+ * guaranteed its fair share of the throughput (basically because
+ * idling may not be performed for the descendant processes of the
+ * group, but it had to be). We address this issue with the following
+ * bi-modal behavior, implemented in the function
+ * bfq_asymmetric_scenario().
+ *
+ * If there are groups with requests waiting for completion
+ * (as commented above, some of these groups may even be
+ * already inactive), then the scenario is tagged as
+ * asymmetric, conservatively, without checking any of the
+ * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq.
+ * This behavior matches also the fact that groups are created
+ * exactly if controlling I/O is a primary concern (to
+ * preserve bandwidth and latency guarantees).
+ *
+ * On the opposite end, if there are no groups with requests waiting
+ * for completion, then only conditions (i-a) and (i-b) are actually
+ * controlled, i.e., provided that conditions (i-a) or (i-b) holds,
+ * idling is not performed, regardless of whether condition (ii)
+ * holds. In other words, only if conditions (i-a) and (i-b) do not
+ * hold, then idling is allowed, and the device tends to be prevented
+ * from queueing many requests, possibly of several processes. Since
+ * there are no groups with requests waiting for completion, then, to
+ * control conditions (i-a) and (i-b) it is enough to check just
+ * whether all the queues with requests waiting for completion also
+ * have the same weight.
+ *
+ * Not checking condition (ii) evidently exposes bfqq to the
+ * risk of getting less throughput than its fair share.
+ * However, for queues with the same weight, a further
+ * mechanism, preemption, mitigates or even eliminates this
+ * problem. And it does so without consequences on overall
+ * throughput. This mechanism and its benefits are explained
+ * in the next three paragraphs.
+ *
+ * Even if a queue, say Q, is expired when it remains idle, Q
+ * can still preempt the new in-service queue if the next
+ * request of Q arrives soon (see the comments on
+ * bfq_bfqq_update_budg_for_activation). If all queues and
+ * groups have the same weight, this form of preemption,
+ * combined with the hole-recovery heuristic described in the
+ * comments on function bfq_bfqq_update_budg_for_activation,
+ * are enough to preserve a correct bandwidth distribution in
+ * the mid term, even without idling. In fact, even if not
+ * idling allows the internal queues of the device to contain
+ * many requests, and thus to reorder requests, we can rather
+ * safely assume that the internal scheduler still preserves a
+ * minimum of mid-term fairness.
+ *
+ * More precisely, this preemption-based, idleless approach
+ * provides fairness in terms of IOPS, and not sectors per
+ * second. This can be seen with a simple example. Suppose
+ * that there are two queues with the same weight, but that
+ * the first queue receives requests of 8 sectors, while the
+ * second queue receives requests of 1024 sectors. In
+ * addition, suppose that each of the two queues contains at
+ * most one request at a time, which implies that each queue
+ * always remains idle after it is served. Finally, after
+ * remaining idle, each queue receives very quickly a new
+ * request. It follows that the two queues are served
+ * alternatively, preempting each other if needed. This
+ * implies that, although both queues have the same weight,
+ * the queue with large requests receives a service that is
+ * 1024/8 times as high as the service received by the other
+ * queue.
+ *
+ * The motivation for using preemption instead of idling (for
+ * queues with the same weight) is that, by not idling,
+ * service guarantees are preserved (completely or at least in
+ * part) without minimally sacrificing throughput. And, if
+ * there is no active group, then the primary expectation for
+ * this device is probably a high throughput.
+ *
+ * We are now left only with explaining the additional
+ * compound condition that is checked below for deciding
+ * whether the scenario is asymmetric. To explain this
+ * compound condition, we need to add that the function
+ * bfq_asymmetric_scenario checks the weights of only
+ * non-weight-raised queues, for efficiency reasons (see
+ * comments on bfq_weights_tree_add()). Then the fact that
+ * bfqq is weight-raised is checked explicitly here. More
+ * precisely, the compound condition below takes into account
+ * also the fact that, even if bfqq is being weight-raised,
+ * the scenario is still symmetric if all queues with requests
+ * waiting for completion happen to be
+ * weight-raised. Actually, we should be even more precise
+ * here, and differentiate between interactive weight raising
+ * and soft real-time weight raising.
+ *
+ * As a side note, it is worth considering that the above
+ * device-idling countermeasures may however fail in the
+ * following unlucky scenario: if idling is (correctly)
+ * disabled in a time period during which all symmetry
+ * sub-conditions hold, and hence the device is allowed to
+ * enqueue many requests, but at some later point in time some
+ * sub-condition stops to hold, then it may become impossible
+ * to let requests be served in the desired order until all
+ * the requests already queued in the device have been served.
+ */
+static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+{
+ return (bfqq->wr_coeff > 1 &&
+ bfqd->wr_busy_queues <
+ bfq_tot_busy_queues(bfqd)) ||
+ bfq_asymmetric_scenario(bfqd, bfqq);
+}
+
+/*
+ * For a queue that becomes empty, device idling is allowed only if
+ * this function returns true for that queue. As a consequence, since
+ * device idling plays a critical role for both throughput boosting
+ * and service guarantees, the return value of this function plays a
+ * critical role as well.
+ *
+ * In a nutshell, this function returns true only if idling is
+ * beneficial for throughput or, even if detrimental for throughput,
+ * idling is however necessary to preserve service guarantees (low
+ * latency, desired throughput distribution, ...). In particular, on
+ * NCQ-capable devices, this function tries to return false, so as to
+ * help keep the drives' internal queues full, whenever this helps the
+ * device boost the throughput without causing any service-guarantee
+ * issue.
+ *
+ * Most of the issues taken into account to get the return value of
+ * this function are not trivial. We discuss these issues in the two
+ * functions providing the main pieces of information needed by this
+ * function.
+ */
+static bool bfq_better_to_idle(struct bfq_queue *bfqq)
+{
+ struct bfq_data *bfqd = bfqq->bfqd;
+ bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar;
+
+ if (unlikely(bfqd->strict_guarantees))
+ return true;
/*
- * Finally, there is a case where maximizing throughput is the
- * best choice even if it may cause unfairness toward
- * bfqq. Such a case is when bfqq became active in a burst of
- * queue activations. Queues that became active during a large
- * burst benefit only from throughput, as discussed in the
- * comments on bfq_handle_burst. Thus, if bfqq became active
- * in a burst and not idling the device maximizes throughput,
- * then the device must no be idled, because not idling the
- * device provides bfqq and all other queues in the burst with
- * maximum benefit. Combining this and the above case, we can
- * now establish when idling is actually needed to preserve
- * service guarantees.
+ * Idling is performed only if slice_idle > 0. In addition, we
+ * do not idle if
+ * (a) bfqq is async
+ * (b) bfqq is in the idle io prio class: in this case we do
+ * not idle because we want to minimize the bandwidth that
+ * queues in this class can steal to higher-priority queues
*/
- idling_needed_for_service_guarantees =
- asymmetric_scenario && !bfq_bfqq_in_large_burst(bfqq);
+ if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) ||
+ bfq_class_idle(bfqq))
+ return false;
+
+ idling_boosts_thr_with_no_issue =
+ idling_boosts_thr_without_issues(bfqd, bfqq);
+
+ idling_needed_for_service_guar =
+ idling_needed_for_service_guarantees(bfqd, bfqq);
/*
- * We have now all the components we need to compute the
+ * We have now the two components we need to compute the
* return value of the function, which is true only if idling
* either boosts the throughput (without issues), or is
* necessary to preserve service guarantees.
*/
- return idling_boosts_thr_without_issues ||
- idling_needed_for_service_guarantees;
+ return idling_boosts_thr_with_no_issue ||
+ idling_needed_for_service_guar;
}
/*
@@ -3701,26 +3971,98 @@ static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
}
-static struct bfq_queue *bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
+/*
+ * This function chooses the queue from which to pick the next extra
+ * I/O request to inject, if it finds a compatible queue. See the
+ * comments on bfq_update_inject_limit() for details on the injection
+ * mechanism, and for the definitions of the quantities mentioned
+ * below.
+ */
+static struct bfq_queue *
+bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
{
- struct bfq_queue *bfqq;
+ struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
+ unsigned int limit = in_serv_bfqq->inject_limit;
+ /*
+ * If
+ * - bfqq is not weight-raised and therefore does not carry
+ * time-critical I/O,
+ * or
+ * - regardless of whether bfqq is weight-raised, bfqq has
+ * however a long think time, during which it can absorb the
+ * effect of an appropriate number of extra I/O requests
+ * from other queues (see bfq_update_inject_limit for
+ * details on the computation of this number);
+ * then injection can be performed without restrictions.
+ */
+ bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 ||
+ !bfq_bfqq_has_short_ttime(in_serv_bfqq);
/*
- * A linear search; but, with a high probability, very few
- * steps are needed to find a candidate queue, i.e., a queue
- * with enough budget left for its next request. In fact:
+ * If
+ * - the baseline total service time could not be sampled yet,
+ * so the inject limit happens to be still 0, and
+ * - a lot of time has elapsed since the plugging of I/O
+ * dispatching started, so drive speed is being wasted
+ * significantly;
+ * then temporarily raise inject limit to one request.
+ */
+ if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 &&
+ bfq_bfqq_wait_request(in_serv_bfqq) &&
+ time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
+ bfqd->bfq_slice_idle)
+ )
+ limit = 1;
+
+ if (bfqd->rq_in_driver >= limit)
+ return NULL;
+
+ /*
+ * Linear search of the source queue for injection; but, with
+ * a high probability, very few steps are needed to find a
+ * candidate queue, i.e., a queue with enough budget left for
+ * its next request. In fact:
* - BFQ dynamically updates the budget of every queue so as
* to accommodate the expected backlog of the queue;
* - if a queue gets all its requests dispatched as injected
* service, then the queue is removed from the active list
- * (and re-added only if it gets new requests, but with
- * enough budget for its new backlog).
+ * (and re-added only if it gets new requests, but then it
+ * is assigned again enough budget for its new backlog).
*/
list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ (in_serv_always_inject || bfqq->wr_coeff > 1) &&
bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
- bfq_bfqq_budget_left(bfqq))
- return bfqq;
+ bfq_bfqq_budget_left(bfqq)) {
+ /*
+ * Allow for only one large in-flight request
+ * on non-rotational devices, for the
+ * following reason. On non-rotationl drives,
+ * large requests take much longer than
+ * smaller requests to be served. In addition,
+ * the drive prefers to serve large requests
+ * w.r.t. to small ones, if it can choose. So,
+ * having more than one large requests queued
+ * in the drive may easily make the next first
+ * request of the in-service queue wait for so
+ * long to break bfqq's service guarantees. On
+ * the bright side, large requests let the
+ * drive reach a very high throughput, even if
+ * there is only one in-flight large request
+ * at a time.
+ */
+ if (blk_queue_nonrot(bfqd->queue) &&
+ blk_rq_sectors(bfqq->next_rq) >=
+ BFQQ_SECT_THR_NONROT)
+ limit = min_t(unsigned int, 1, limit);
+ else
+ limit = in_serv_bfqq->inject_limit;
+
+ if (bfqd->rq_in_driver < limit) {
+ bfqd->rqs_injected = true;
+ return bfqq;
+ }
+ }
return NULL;
}
@@ -3807,14 +4149,32 @@ check_queue:
* for a new request, or has requests waiting for a completion and
* may idle after their completion, then keep it anyway.
*
- * Yet, to boost throughput, inject service from other queues if
- * possible.
+ * Yet, inject service from other queues if it boosts
+ * throughput and is possible.
*/
if (bfq_bfqq_wait_request(bfqq) ||
(bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
- if (bfq_bfqq_injectable(bfqq) &&
- bfqq->injected_service * bfqq->inject_coeff <
- bfqq->entity.service * 10)
+ struct bfq_queue *async_bfqq =
+ bfqq->bic && bfqq->bic->bfqq[0] &&
+ bfq_bfqq_busy(bfqq->bic->bfqq[0]) ?
+ bfqq->bic->bfqq[0] : NULL;
+
+ /*
+ * If the process associated with bfqq has also async
+ * I/O pending, then inject it
+ * unconditionally. Injecting I/O from the same
+ * process can cause no harm to the process. On the
+ * contrary, it can only increase bandwidth and reduce
+ * latency for the process.
+ */
+ if (async_bfqq &&
+ icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic &&
+ bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <=
+ bfq_bfqq_budget_left(async_bfqq))
+ bfqq = bfqq->bic->bfqq[0];
+ else if (!idling_boosts_thr_without_issues(bfqd, bfqq) &&
+ (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 ||
+ !bfq_bfqq_has_short_ttime(bfqq)))
bfqq = bfq_choose_bfqq_for_injection(bfqd);
else
bfqq = NULL;
@@ -3906,15 +4266,15 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
bfq_bfqq_served(bfqq, service_to_charge);
- bfq_dispatch_remove(bfqd->queue, rq);
+ if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) {
+ bfqd->wait_dispatch = false;
+ bfqd->waited_rq = rq;
+ }
- if (bfqq != bfqd->in_service_queue) {
- if (likely(bfqd->in_service_queue))
- bfqd->in_service_queue->injected_service +=
- bfq_serv_to_charge(rq, bfqq);
+ bfq_dispatch_remove(bfqd->queue, rq);
+ if (bfqq != bfqd->in_service_queue)
goto return_rq;
- }
/*
* If weight raising has to terminate for bfqq, then next
@@ -3934,7 +4294,7 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
* belongs to CLASS_IDLE and other queues are waiting for
* service.
*/
- if (!(bfqd->busy_queues > 1 && bfq_class_idle(bfqq)))
+ if (!(bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)))
goto return_rq;
bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
@@ -3952,7 +4312,7 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
* most a call to dispatch for nothing
*/
return !list_empty_careful(&bfqd->dispatch) ||
- bfqd->busy_queues > 0;
+ bfq_tot_busy_queues(bfqd) > 0;
}
static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
@@ -4006,9 +4366,10 @@ static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
goto start_rq;
}
- bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
+ bfq_log(bfqd, "dispatch requests: %d busy queues",
+ bfq_tot_busy_queues(bfqd));
- if (bfqd->busy_queues == 0)
+ if (bfq_tot_busy_queues(bfqd) == 0)
goto exit;
/*
@@ -4223,6 +4584,7 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
unsigned long flags;
spin_lock_irqsave(&bfqd->lock, flags);
+ bfqq->bic = NULL;
bfq_exit_bfqq(bfqd, bfqq);
bic_set_bfqq(bic, NULL, is_sync);
spin_unlock_irqrestore(&bfqd->lock, flags);
@@ -4344,13 +4706,6 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_mark_bfqq_has_short_ttime(bfqq);
bfq_mark_bfqq_sync(bfqq);
bfq_mark_bfqq_just_created(bfqq);
- /*
- * Aggressively inject a lot of service: up to 90%.
- * This coefficient remains constant during bfqq life,
- * but this behavior might be changed, after enough
- * testing and tuning.
- */
- bfqq->inject_coeff = 1;
} else
bfq_clear_bfqq_sync(bfqq);
@@ -4488,10 +4843,12 @@ bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct request *rq)
{
bfqq->seek_history <<= 1;
- bfqq->seek_history |=
- get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR &&
- (!blk_queue_nonrot(bfqd->queue) ||
- blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT);
+ bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq);
+
+ if (bfqq->wr_coeff > 1 &&
+ bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time &&
+ BFQQ_TOTALLY_SEEKY(bfqq))
+ bfq_bfqq_end_wr(bfqq);
}
static void bfq_update_has_short_ttime(struct bfq_data *bfqd,
@@ -4560,28 +4917,31 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bool budget_timeout = bfq_bfqq_budget_timeout(bfqq);
/*
- * There is just this request queued: if the request
- * is small and the queue is not to be expired, then
- * just exit.
+ * There is just this request queued: if
+ * - the request is small, and
+ * - we are idling to boost throughput, and
+ * - the queue is not to be expired,
+ * then just exit.
*
* In this way, if the device is being idled to wait
* for a new request from the in-service queue, we
* avoid unplugging the device and committing the
- * device to serve just a small request. On the
- * contrary, we wait for the block layer to decide
- * when to unplug the device: hopefully, new requests
- * will be merged to this one quickly, then the device
- * will be unplugged and larger requests will be
- * dispatched.
+ * device to serve just a small request. In contrast
+ * we wait for the block layer to decide when to
+ * unplug the device: hopefully, new requests will be
+ * merged to this one quickly, then the device will be
+ * unplugged and larger requests will be dispatched.
*/
- if (small_req && !budget_timeout)
+ if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) &&
+ !budget_timeout)
return;
/*
- * A large enough request arrived, or the queue is to
- * be expired: in both cases disk idling is to be
- * stopped, so clear wait_request flag and reset
- * timer.
+ * A large enough request arrived, or idling is being
+ * performed to preserve service guarantees, or
+ * finally the queue is to be expired: in all these
+ * cases disk idling is to be stopped, so clear
+ * wait_request flag and reset timer.
*/
bfq_clear_bfqq_wait_request(bfqq);
hrtimer_try_to_cancel(&bfqd->idle_slice_timer);
@@ -4607,8 +4967,6 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
bool waiting, idle_timer_disabled = false;
if (new_bfqq) {
- if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
- new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
/*
* Release the request's reference to the old bfqq
* and make sure one is taken to the shared queue.
@@ -4751,6 +5109,8 @@ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
static void bfq_update_hw_tag(struct bfq_data *bfqd)
{
+ struct bfq_queue *bfqq = bfqd->in_service_queue;
+
bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver,
bfqd->rq_in_driver);
@@ -4763,7 +5123,18 @@ static void bfq_update_hw_tag(struct bfq_data *bfqd)
* sum is not exact, as it's not taking into account deactivated
* requests.
*/
- if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
+ if (bfqd->rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD)
+ return;
+
+ /*
+ * If active queue hasn't enough requests and can idle, bfq might not
+ * dispatch sufficient requests to hardware. Don't zero hw_tag in this
+ * case
+ */
+ if (bfqq && bfq_bfqq_has_short_ttime(bfqq) &&
+ bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] <
+ BFQ_HW_QUEUE_THRESHOLD &&
+ bfqd->rq_in_driver < BFQ_HW_QUEUE_THRESHOLD)
return;
if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
@@ -4772,6 +5143,9 @@ static void bfq_update_hw_tag(struct bfq_data *bfqd)
bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
bfqd->max_rq_in_driver = 0;
bfqd->hw_tag_samples = 0;
+
+ bfqd->nonrot_with_queueing =
+ blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag;
}
static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
@@ -4834,11 +5208,14 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
* isochronous, and both requisites for this condition to hold
* are now satisfied, then compute soft_rt_next_start (see the
* comments on the function bfq_bfqq_softrt_next_start()). We
- * schedule this delayed check when bfqq expires, if it still
- * has in-flight requests.
+ * do not compute soft_rt_next_start if bfqq is in interactive
+ * weight raising (see the comments in bfq_bfqq_expire() for
+ * an explanation). We schedule this delayed update when bfqq
+ * expires, if it still has in-flight requests.
*/
if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
- RB_EMPTY_ROOT(&bfqq->sort_list))
+ RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ bfqq->wr_coeff != bfqd->bfq_wr_coeff)
bfqq->soft_rt_next_start =
bfq_bfqq_softrt_next_start(bfqd, bfqq);
@@ -4896,6 +5273,147 @@ static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq)
}
/*
+ * The processes associated with bfqq may happen to generate their
+ * cumulative I/O at a lower rate than the rate at which the device
+ * could serve the same I/O. This is rather probable, e.g., if only
+ * one process is associated with bfqq and the device is an SSD. It
+ * results in bfqq becoming often empty while in service. In this
+ * respect, if BFQ is allowed to switch to another queue when bfqq
+ * remains empty, then the device goes on being fed with I/O requests,
+ * and the throughput is not affected. In contrast, if BFQ is not
+ * allowed to switch to another queue---because bfqq is sync and
+ * I/O-dispatch needs to be plugged while bfqq is temporarily
+ * empty---then, during the service of bfqq, there will be frequent
+ * "service holes", i.e., time intervals during which bfqq gets empty
+ * and the device can only consume the I/O already queued in its
+ * hardware queues. During service holes, the device may even get to
+ * remaining idle. In the end, during the service of bfqq, the device
+ * is driven at a lower speed than the one it can reach with the kind
+ * of I/O flowing through bfqq.
+ *
+ * To counter this loss of throughput, BFQ implements a "request
+ * injection mechanism", which tries to fill the above service holes
+ * with I/O requests taken from other queues. The hard part in this
+ * mechanism is finding the right amount of I/O to inject, so as to
+ * both boost throughput and not break bfqq's bandwidth and latency
+ * guarantees. In this respect, the mechanism maintains a per-queue
+ * inject limit, computed as below. While bfqq is empty, the injection
+ * mechanism dispatches extra I/O requests only until the total number
+ * of I/O requests in flight---i.e., already dispatched but not yet
+ * completed---remains lower than this limit.
+ *
+ * A first definition comes in handy to introduce the algorithm by
+ * which the inject limit is computed. We define as first request for
+ * bfqq, an I/O request for bfqq that arrives while bfqq is in
+ * service, and causes bfqq to switch from empty to non-empty. The
+ * algorithm updates the limit as a function of the effect of
+ * injection on the service times of only the first requests of
+ * bfqq. The reason for this restriction is that these are the
+ * requests whose service time is affected most, because they are the
+ * first to arrive after injection possibly occurred.
+ *
+ * To evaluate the effect of injection, the algorithm measures the
+ * "total service time" of first requests. We define as total service
+ * time of an I/O request, the time that elapses since when the
+ * request is enqueued into bfqq, to when it is completed. This
+ * quantity allows the whole effect of injection to be measured. It is
+ * easy to see why. Suppose that some requests of other queues are
+ * actually injected while bfqq is empty, and that a new request R
+ * then arrives for bfqq. If the device does start to serve all or
+ * part of the injected requests during the service hole, then,
+ * because of this extra service, it may delay the next invocation of
+ * the dispatch hook of BFQ. Then, even after R gets eventually
+ * dispatched, the device may delay the actual service of R if it is
+ * still busy serving the extra requests, or if it decides to serve,
+ * before R, some extra request still present in its queues. As a
+ * conclusion, the cumulative extra delay caused by injection can be
+ * easily evaluated by just comparing the total service time of first
+ * requests with and without injection.
+ *
+ * The limit-update algorithm works as follows. On the arrival of a
+ * first request of bfqq, the algorithm measures the total time of the
+ * request only if one of the three cases below holds, and, for each
+ * case, it updates the limit as described below:
+ *
+ * (1) If there is no in-flight request. This gives a baseline for the
+ * total service time of the requests of bfqq. If the baseline has
+ * not been computed yet, then, after computing it, the limit is
+ * set to 1, to start boosting throughput, and to prepare the
+ * ground for the next case. If the baseline has already been
+ * computed, then it is updated, in case it results to be lower
+ * than the previous value.
+ *
+ * (2) If the limit is higher than 0 and there are in-flight
+ * requests. By comparing the total service time in this case with
+ * the above baseline, it is possible to know at which extent the
+ * current value of the limit is inflating the total service
+ * time. If the inflation is below a certain threshold, then bfqq
+ * is assumed to be suffering from no perceivable loss of its
+ * service guarantees, and the limit is even tentatively
+ * increased. If the inflation is above the threshold, then the
+ * limit is decreased. Due to the lack of any hysteresis, this
+ * logic makes the limit oscillate even in steady workload
+ * conditions. Yet we opted for it, because it is fast in reaching
+ * the best value for the limit, as a function of the current I/O
+ * workload. To reduce oscillations, this step is disabled for a
+ * short time interval after the limit happens to be decreased.
+ *
+ * (3) Periodically, after resetting the limit, to make sure that the
+ * limit eventually drops in case the workload changes. This is
+ * needed because, after the limit has gone safely up for a
+ * certain workload, it is impossible to guess whether the
+ * baseline total service time may have changed, without measuring
+ * it again without injection. A more effective version of this
+ * step might be to just sample the baseline, by interrupting
+ * injection only once, and then to reset/lower the limit only if
+ * the total service time with the current limit does happen to be
+ * too large.
+ *
+ * More details on each step are provided in the comments on the
+ * pieces of code that implement these steps: the branch handling the
+ * transition from empty to non empty in bfq_add_request(), the branch
+ * handling injection in bfq_select_queue(), and the function
+ * bfq_choose_bfqq_for_injection(). These comments also explain some
+ * exceptions, made by the injection mechanism in some special cases.
+ */
+static void bfq_update_inject_limit(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq)
+{
+ u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
+ unsigned int old_limit = bfqq->inject_limit;
+
+ if (bfqq->last_serv_time_ns > 0) {
+ u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
+
+ if (tot_time_ns >= threshold && old_limit > 0) {
+ bfqq->inject_limit--;
+ bfqq->decrease_time_jif = jiffies;
+ } else if (tot_time_ns < threshold &&
+ old_limit < bfqd->max_rq_in_driver<<1)
+ bfqq->inject_limit++;
+ }
+
+ /*
+ * Either we still have to compute the base value for the
+ * total service time, and there seem to be the right
+ * conditions to do it, or we can lower the last base value
+ * computed.
+ */
+ if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 0) ||
+ tot_time_ns < bfqq->last_serv_time_ns) {
+ bfqq->last_serv_time_ns = tot_time_ns;
+ /*
+ * Now we certainly have a base value: make sure we
+ * start trying injection.
+ */
+ bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
+ }
+
+ /* update complete, not waiting for any request completion any longer */
+ bfqd->waited_rq = NULL;
+}
+
+/*
* Handle either a requeue or a finish for rq. The things to do are
* the same in both cases: all references to rq are to be dropped. In
* particular, rq is considered completed from the point of view of
@@ -4939,6 +5457,9 @@ static void bfq_finish_requeue_request(struct request *rq)
spin_lock_irqsave(&bfqd->lock, flags);
+ if (rq == bfqd->waited_rq)
+ bfq_update_inject_limit(bfqd, bfqq);
+
bfq_completed_request(bfqq, bfqd);
bfq_finish_requeue_request_body(bfqq);
@@ -5102,7 +5623,7 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
* preparation is that, after the prepare_request hook is invoked for
* rq, rq may still be transformed into a request with no icq, i.e., a
* request not associated with any queue. No bfq hook is invoked to
- * signal this tranformation. As a consequence, should these
+ * signal this transformation. As a consequence, should these
* preparation operations be performed when the prepare_request hook
* is invoked, and should rq be transformed one moment later, bfq
* would end up in an inconsistent state, because it would have
@@ -5193,7 +5714,29 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
}
}
- if (unlikely(bfq_bfqq_just_created(bfqq)))
+ /*
+ * Consider bfqq as possibly belonging to a burst of newly
+ * created queues only if:
+ * 1) A burst is actually happening (bfqd->burst_size > 0)
+ * or
+ * 2) There is no other active queue. In fact, if, in
+ * contrast, there are active queues not belonging to the
+ * possible burst bfqq may belong to, then there is no gain
+ * in considering bfqq as belonging to a burst, and
+ * therefore in not weight-raising bfqq. See comments on
+ * bfq_handle_burst().
+ *
+ * This filtering also helps eliminating false positives,
+ * occurring when bfqq does not belong to an actual large
+ * burst, but some background task (e.g., a service) happens
+ * to trigger the creation of new queues very close to when
+ * bfqq and its possible companion queues are created. See
+ * comments on bfq_handle_burst() for further details also on
+ * this issue.
+ */
+ if (unlikely(bfq_bfqq_just_created(bfqq) &&
+ (bfqd->burst_size > 0 ||
+ bfq_tot_busy_queues(bfqd) == 0)))
bfq_handle_burst(bfqd, bfqq);
return bfqq;
@@ -5342,7 +5885,7 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
return min_shallow;
}
-static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
@@ -5350,6 +5893,11 @@ static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
+}
+
+static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
+{
+ bfq_depth_updated(hctx);
return 0;
}
@@ -5448,7 +5996,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
HRTIMER_MODE_REL);
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
- bfqd->queue_weights_tree = RB_ROOT;
+ bfqd->queue_weights_tree = RB_ROOT_CACHED;
bfqd->num_groups_with_pending_reqs = 0;
INIT_LIST_HEAD(&bfqd->active_list);
@@ -5456,6 +6004,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
INIT_HLIST_HEAD(&bfqd->burst_list);
bfqd->hw_tag = -1;
+ bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue);
bfqd->bfq_max_budget = bfq_default_max_budget;
@@ -5772,6 +6321,7 @@ static struct elevator_type iosched_bfq_mq = {
.requests_merged = bfq_requests_merged,
.request_merged = bfq_request_merged,
.has_work = bfq_has_work,
+ .depth_updated = bfq_depth_updated,
.init_hctx = bfq_init_hctx,
.init_sched = bfq_init_queue,
.exit_sched = bfq_exit_queue,
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 0b02bf302de0..c2faa77824f8 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Header file for the BFQ I/O scheduler: data structures and
* prototypes of interface functions among BFQ components.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#ifndef _BFQ_H
#define _BFQ_H
@@ -32,6 +23,8 @@
#define BFQ_DEFAULT_GRP_IOPRIO 0
#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
+#define MAX_PID_STR_LENGTH 12
+
/*
* Soft real-time applications are extremely more latency sensitive
* than interactive ones. Over-raise the weight of the former to
@@ -89,7 +82,7 @@ struct bfq_service_tree {
* expiration. This peculiar definition allows for the following
* optimization, not yet exploited: while a given entity is still in
* service, we already know which is the best candidate for next
- * service among the other active entitities in the same parent
+ * service among the other active entities in the same parent
* entity. We can then quickly compare the timestamps of the
* in-service entity with those of such best candidate.
*
@@ -140,7 +133,7 @@ struct bfq_weight_counter {
*
* Unless cgroups are used, the weight value is calculated from the
* ioprio to export the same interface as CFQ. When dealing with
- * ``well-behaved'' queues (i.e., queues that do not spend too much
+ * "well-behaved" queues (i.e., queues that do not spend too much
* time to consume their budget and have true sequential behavior, and
* when there are no external factors breaking anticipation) the
* relative weights at each level of the cgroups hierarchy should be
@@ -240,6 +233,13 @@ struct bfq_queue {
/* next ioprio and ioprio class if a change is in progress */
unsigned short new_ioprio, new_ioprio_class;
+ /* last total-service-time sample, see bfq_update_inject_limit() */
+ u64 last_serv_time_ns;
+ /* limit for request injection */
+ unsigned int inject_limit;
+ /* last time the inject limit has been decreased, in jiffies */
+ unsigned long decrease_time_jif;
+
/*
* Shared bfq_queue if queue is cooperating with one or more
* other queues.
@@ -357,29 +357,6 @@ struct bfq_queue {
/* max service rate measured so far */
u32 max_service_rate;
- /*
- * Ratio between the service received by bfqq while it is in
- * service, and the cumulative service (of requests of other
- * queues) that may be injected while bfqq is empty but still
- * in service. To increase precision, the coefficient is
- * measured in tenths of unit. Here are some example of (1)
- * ratios, (2) resulting percentages of service injected
- * w.r.t. to the total service dispatched while bfqq is in
- * service, and (3) corresponding values of the coefficient:
- * 1 (50%) -> 10
- * 2 (33%) -> 20
- * 10 (9%) -> 100
- * 9.9 (9%) -> 99
- * 1.5 (40%) -> 15
- * 0.5 (66%) -> 5
- * 0.1 (90%) -> 1
- *
- * So, if the coefficient is lower than 10, then
- * injected service is more than bfqq service.
- */
- unsigned int inject_coeff;
- /* amount of service injected in current service slot */
- unsigned int injected_service;
};
/**
@@ -419,6 +396,15 @@ struct bfq_io_cq {
bool was_in_burst_list;
/*
+ * Save the weight when a merge occurs, to be able
+ * to restore it in case of split. If the weight is not
+ * correctly resumed when the queue is recycled,
+ * then the weight of the recycled queue could differ
+ * from the weight of the original queue.
+ */
+ unsigned int saved_weight;
+
+ /*
* Similar to previous fields: save wr information.
*/
unsigned long saved_wr_coeff;
@@ -450,7 +436,7 @@ struct bfq_data {
* weight-raised @bfq_queue (see the comments to the functions
* bfq_weights_tree_[add|remove] for further details).
*/
- struct rb_root queue_weights_tree;
+ struct rb_root_cached queue_weights_tree;
/*
* Number of groups with at least one descendant process that
@@ -501,10 +487,11 @@ struct bfq_data {
unsigned int num_groups_with_pending_reqs;
/*
- * Number of bfq_queues containing requests (including the
- * queue in service, even if it is idling).
+ * Per-class (RT, BE, IDLE) number of bfq_queues containing
+ * requests (including the queue in service, even if it is
+ * idling).
*/
- int busy_queues;
+ unsigned int busy_queues[3];
/* number of weight-raised busy @bfq_queues */
int wr_busy_queues;
/* number of queued requests */
@@ -512,6 +499,9 @@ struct bfq_data {
/* number of requests dispatched and waiting for completion */
int rq_in_driver;
+ /* true if the device is non rotational and performs queueing */
+ bool nonrot_with_queueing;
+
/*
* Maximum number of requests in driver in the last
* @hw_tag_samples completed requests.
@@ -537,9 +527,32 @@ struct bfq_data {
/* on-disk position of the last served request */
sector_t last_position;
+ /* position of the last served request for the in-service queue */
+ sector_t in_serv_last_pos;
+
/* time of last request completion (ns) */
u64 last_completion;
+ /* time of last transition from empty to non-empty (ns) */
+ u64 last_empty_occupied_ns;
+
+ /*
+ * Flag set to activate the sampling of the total service time
+ * of a just-arrived first I/O request (see
+ * bfq_update_inject_limit()). This will cause the setting of
+ * waited_rq when the request is finally dispatched.
+ */
+ bool wait_dispatch;
+ /*
+ * If set, then bfq_update_inject_limit() is invoked when
+ * waited_rq is eventually completed.
+ */
+ struct request *waited_rq;
+ /*
+ * True if some request has been injected during the last service hole.
+ */
+ bool rqs_injected;
+
/* time of first rq dispatch in current observation interval (ns) */
u64 first_dispatch;
/* time of last rq dispatch in current observation interval (ns) */
@@ -549,6 +562,7 @@ struct bfq_data {
ktime_t last_budget_start;
/* beginning of the last idle slice */
ktime_t last_idling_start;
+ unsigned long last_idling_start_jiffies;
/* number of samples in current observation interval */
int peak_rate_samples;
@@ -894,10 +908,10 @@ void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync);
struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
- struct rb_root *root);
+ struct rb_root_cached *root);
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq,
- struct rb_root *root);
+ struct rb_root_cached *root);
void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq);
void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -974,6 +988,7 @@ extern struct blkcg_policy blkcg_policy_bfq;
struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq);
struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity);
+unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd);
struct bfq_service_tree *bfq_entity_service_tree(struct bfq_entity *entity);
struct bfq_entity *bfq_entity_of(struct rb_node *node);
unsigned short bfq_ioprio_to_weight(int ioprio);
@@ -990,7 +1005,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity,
bool ins_into_idle_tree);
bool next_queue_may_preempt(struct bfq_data *bfqd);
struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bool ins_into_idle_tree, bool expiration);
void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
@@ -1003,13 +1018,23 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
/* --------------- end of interface of B-WF2Q+ ---------------- */
/* Logging facilities. */
+static inline void bfq_pid_to_str(int pid, char *str, int len)
+{
+ if (pid != -1)
+ snprintf(str, len, "%d", pid);
+ else
+ snprintf(str, len, "SHARED-");
+}
+
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char pid_str[MAX_PID_STR_LENGTH]; \
+ bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
blk_add_cgroup_trace_msg((bfqd)->queue, \
bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \
- "bfq%d%c " fmt, (bfqq)->pid, \
+ "bfq%s%c " fmt, pid_str, \
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args); \
} while (0)
@@ -1020,10 +1045,13 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#else /* CONFIG_BFQ_GROUP_IOSCHED */
-#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
- blk_add_trace_msg((bfqd)->queue, "bfq%d%c " fmt, (bfqq)->pid, \
+#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
+ char pid_str[MAX_PID_STR_LENGTH]; \
+ bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
+ blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str, \
bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
- ##args)
+ ##args); \
+} while (0)
#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
#endif /* CONFIG_BFQ_GROUP_IOSCHED */
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 72adbbe975d5..c9ba225081ce 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Hierarchical Budget Worst-case Fair Weighted Fair Queueing
* (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O
* scheduler schedules generic entities. The latter can represent
* either single bfq queues (associated with processes) or groups of
* bfq queues (associated with cgroups).
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
*/
#include "bfq-iosched.h"
@@ -44,6 +35,12 @@ static unsigned int bfq_class_idx(struct bfq_entity *entity)
BFQ_DEFAULT_GRP_CLASS - 1;
}
+unsigned int bfq_tot_busy_queues(struct bfq_data *bfqd)
+{
+ return bfqd->busy_queues[0] + bfqd->busy_queues[1] +
+ bfqd->busy_queues[2];
+}
+
static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
bool expiration);
@@ -53,7 +50,7 @@ static bool bfq_update_parent_budget(struct bfq_entity *next_in_service);
* bfq_update_next_in_service - update sd->next_in_service
* @sd: sched_data for which to perform the update.
* @new_entity: if not NULL, pointer to the entity whose activation,
- * requeueing or repositionig triggered the invocation of
+ * requeueing or repositioning triggered the invocation of
* this function.
* @expiration: id true, this function is being invoked after the
* expiration of the in-service entity
@@ -84,7 +81,7 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
/*
* If this update is triggered by the activation, requeueing
- * or repositiong of an entity that does not coincide with
+ * or repositioning of an entity that does not coincide with
* sd->next_in_service, then a full lookup in the active tree
* can be avoided. In fact, it is enough to check whether the
* just-modified entity has the same priority as
@@ -731,7 +728,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
unsigned int prev_weight, new_weight;
struct bfq_data *bfqd = NULL;
- struct rb_root *root;
+ struct rb_root_cached *root;
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_sched_data *sd;
struct bfq_group *bfqg;
@@ -1006,7 +1003,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
entity->on_st = true;
}
-#ifdef BFQ_GROUP_IOSCHED_ENABLED
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
struct bfq_group *bfqg =
container_of(entity, struct bfq_group, entity);
@@ -1390,7 +1387,7 @@ left:
* In this first case, update the virtual time in @st too (see the
* comments on this update inside the function).
*
- * In constrast, if there is an in-service entity, then return the
+ * In contrast, if there is an in-service entity, then return the
* entity that would be set in service if not only the above
* conditions, but also the next one held true: the currently
* in-service entity, on expiration,
@@ -1473,12 +1470,12 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
* is being invoked as a part of the expiration path
* of the in-service queue. In this case, even if
* sd->in_service_entity is not NULL,
- * sd->in_service_entiy at this point is actually not
+ * sd->in_service_entity at this point is actually not
* in service any more, and, if needed, has already
* been properly queued or requeued into the right
* tree. The reason why sd->in_service_entity is still
* not NULL here, even if expiration is true, is that
- * sd->in_service_entiy is reset as a last step in the
+ * sd->in_service_entity is reset as a last step in the
* expiration path. So, if expiration is true, tell
* __bfq_lookup_next_entity that there is no
* sd->in_service_entity.
@@ -1513,7 +1510,7 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
struct bfq_sched_data *sd;
struct bfq_queue *bfqq;
- if (bfqd->busy_queues == 0)
+ if (bfq_tot_busy_queues(bfqd) == 0)
return NULL;
/*
@@ -1599,7 +1596,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
return bfqq;
}
-void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+/* returns true if the in-service queue gets freed */
+bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
{
struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
@@ -1623,8 +1621,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
* service tree either, then release the service reference to
* the queue it represents (taken with bfq_get_entity).
*/
- if (!in_serv_entity->on_st)
+ if (!in_serv_entity->on_st) {
+ /*
+ * If no process is referencing in_serv_bfqq any
+ * longer, then the service reference may be the only
+ * reference to the queue. If this is the case, then
+ * bfqq gets freed here.
+ */
+ int ref = in_serv_bfqq->ref;
bfq_put_queue(in_serv_bfqq);
+ if (ref == 1)
+ return true;
+ }
+
+ return false;
}
void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
@@ -1665,10 +1675,7 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_clear_bfqq_busy(bfqq);
- bfqd->busy_queues--;
-
- if (!bfqq->dispatched)
- bfq_weights_tree_remove(bfqd, bfqq);
+ bfqd->busy_queues[bfqq->ioprio_class - 1]--;
if (bfqq->wr_coeff > 1)
bfqd->wr_busy_queues--;
@@ -1676,6 +1683,9 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqg_stats_update_dequeue(bfqq_group(bfqq));
bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
+
+ if (!bfqq->dispatched)
+ bfq_weights_tree_remove(bfqd, bfqq);
}
/*
@@ -1688,7 +1698,7 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_activate_bfqq(bfqd, bfqq);
bfq_mark_bfqq_busy(bfqq);
- bfqd->busy_queues++;
+ bfqd->busy_queues[bfqq->ioprio_class - 1]++;
if (!bfqq->dispatched)
if (bfqq->wr_coeff == 1)
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 1b633a3526d4..4db620849515 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -1,23 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bio-integrity.c - bio data integrity extensions
*
* Copyright (C) 2007, 2008, 2009 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
*/
#include <linux/blkdev.h>
@@ -57,8 +43,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
unsigned inline_vecs;
if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
- bip = kmalloc(sizeof(struct bio_integrity_payload) +
- sizeof(struct bio_vec) * nr_vecs, gfp_mask);
+ bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
inline_vecs = nr_vecs;
} else {
bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
diff --git a/block/bio.c b/block/bio.c
index 4db1008309ed..67bba12d273b 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
- *
*/
#include <linux/mm.h>
#include <linux/swap.h>
@@ -647,26 +634,68 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
}
EXPORT_SYMBOL(bio_clone_fast);
+static inline bool page_is_mergeable(const struct bio_vec *bv,
+ struct page *page, unsigned int len, unsigned int off,
+ bool *same_page)
+{
+ phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
+ bv->bv_offset + bv->bv_len - 1;
+ phys_addr_t page_addr = page_to_phys(page);
+
+ if (vec_end_addr + 1 != page_addr + off)
+ return false;
+ if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
+ return false;
+
+ *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
+ if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
+ return false;
+ return true;
+}
+
+/*
+ * Check if the @page can be added to the current segment(@bv), and make
+ * sure to call it only if page_is_mergeable(@bv, @page) is true
+ */
+static bool can_add_page_to_seg(struct request_queue *q,
+ struct bio_vec *bv, struct page *page, unsigned len,
+ unsigned offset)
+{
+ unsigned long mask = queue_segment_boundary(q);
+ phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
+ phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
+
+ if ((addr1 | mask) != (addr2 | mask))
+ return false;
+
+ if (bv->bv_len + len > queue_max_segment_size(q))
+ return false;
+
+ return true;
+}
+
/**
- * bio_add_pc_page - attempt to add page to bio
+ * __bio_add_pc_page - attempt to add page to passthrough bio
* @q: the target queue
* @bio: destination bio
* @page: page to add
* @len: vec entry length
* @offset: vec entry offset
+ * @put_same_page: put the page if it is same with last added page
*
* Attempt to add a page to the bio_vec maplist. This can fail for a
* number of reasons, such as the bio being full or target block device
* limitations. The target block device must allow bio's up to PAGE_SIZE,
* so it is always possible to add a single page to an empty bio.
*
- * This should only be used by REQ_PC bios.
+ * This should only be used by passthrough bios.
*/
-int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
- *page, unsigned int len, unsigned int offset)
+static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset,
+ bool put_same_page)
{
- int retried_segments = 0;
struct bio_vec *bvec;
+ bool same_page = false;
/*
* cloned bio must not modify vec list
@@ -677,18 +706,14 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
return 0;
- /*
- * For filesystems with a blocksize smaller than the pagesize
- * we will often be called with the same page as last time and
- * a consecutive offset. Optimize this special case.
- */
if (bio->bi_vcnt > 0) {
- struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
+ bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
- if (page == prev->bv_page &&
- offset == prev->bv_offset + prev->bv_len) {
- prev->bv_len += len;
- bio->bi_iter.bi_size += len;
+ if (page == bvec->bv_page &&
+ offset == bvec->bv_offset + bvec->bv_len) {
+ if (put_same_page)
+ put_page(page);
+ bvec->bv_len += len;
goto done;
}
@@ -696,72 +721,59 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
- if (bvec_gap_to_prev(q, prev, offset))
+ if (bvec_gap_to_prev(q, bvec, offset))
return 0;
+
+ if (page_is_mergeable(bvec, page, len, offset, &same_page) &&
+ can_add_page_to_seg(q, bvec, page, len, offset)) {
+ bvec->bv_len += len;
+ goto done;
+ }
}
- if (bio_full(bio))
+ if (bio_full(bio, len))
+ return 0;
+
+ if (bio->bi_phys_segments >= queue_max_segments(q))
return 0;
- /*
- * setup the new entry, we might clear it again later if we
- * cannot add the page
- */
bvec = &bio->bi_io_vec[bio->bi_vcnt];
bvec->bv_page = page;
bvec->bv_len = len;
bvec->bv_offset = offset;
bio->bi_vcnt++;
- bio->bi_phys_segments++;
- bio->bi_iter.bi_size += len;
-
- /*
- * Perform a recount if the number of segments is greater
- * than queue_max_segments(q).
- */
-
- while (bio->bi_phys_segments > queue_max_segments(q)) {
-
- if (retried_segments)
- goto failed;
-
- retried_segments = 1;
- blk_recount_segments(q, bio);
- }
-
- /* If we may be able to merge these biovecs, force a recount */
- if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
- bio_clear_flag(bio, BIO_SEG_VALID);
-
done:
+ bio->bi_iter.bi_size += len;
+ bio->bi_phys_segments = bio->bi_vcnt;
+ bio_set_flag(bio, BIO_SEG_VALID);
return len;
+}
- failed:
- bvec->bv_page = NULL;
- bvec->bv_len = 0;
- bvec->bv_offset = 0;
- bio->bi_vcnt--;
- bio->bi_iter.bi_size -= len;
- blk_recount_segments(q, bio);
- return 0;
+int bio_add_pc_page(struct request_queue *q, struct bio *bio,
+ struct page *page, unsigned int len, unsigned int offset)
+{
+ return __bio_add_pc_page(q, bio, page, len, offset, false);
}
EXPORT_SYMBOL(bio_add_pc_page);
/**
* __bio_try_merge_page - try appending data to an existing bvec.
* @bio: destination bio
- * @page: page to add
+ * @page: start page to add
* @len: length of the data to add
- * @off: offset of the data in @page
+ * @off: offset of the data relative to @page
+ * @same_page: return if the segment has been merged inside the same page
*
* Try to add the data at @page + @off to the last bvec of @bio. This is a
* a useful optimisation for file systems with a block size smaller than the
* page size.
*
+ * Warn if (@len, @off) crosses pages in case that @same_page is true.
+ *
* Return %true on success or %false on failure.
*/
bool __bio_try_merge_page(struct bio *bio, struct page *page,
- unsigned int len, unsigned int off)
+ unsigned int len, unsigned int off, bool *same_page)
{
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
return false;
@@ -769,7 +781,7 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
if (bio->bi_vcnt > 0) {
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
- if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
+ if (page_is_mergeable(bv, page, len, off, same_page)) {
bv->bv_len += len;
bio->bi_iter.bi_size += len;
return true;
@@ -780,11 +792,11 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
EXPORT_SYMBOL_GPL(__bio_try_merge_page);
/**
- * __bio_add_page - add page to a bio in a new segment
+ * __bio_add_page - add page(s) to a bio in a new segment
* @bio: destination bio
- * @page: page to add
- * @len: length of the data to add
- * @off: offset of the data in @page
+ * @page: start page to add
+ * @len: length of the data to add, may cross pages
+ * @off: offset of the data relative to @page, may cross pages
*
* Add the data at @page + @off to @bio as a new bvec. The caller must ensure
* that @bio has space for another bvec.
@@ -795,7 +807,7 @@ void __bio_add_page(struct bio *bio, struct page *page,
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
- WARN_ON_ONCE(bio_full(bio));
+ WARN_ON_ONCE(bio_full(bio, len));
bv->bv_page = page;
bv->bv_offset = off;
@@ -807,20 +819,22 @@ void __bio_add_page(struct bio *bio, struct page *page,
EXPORT_SYMBOL_GPL(__bio_add_page);
/**
- * bio_add_page - attempt to add page to bio
+ * bio_add_page - attempt to add page(s) to bio
* @bio: destination bio
- * @page: page to add
- * @len: vec entry length
- * @offset: vec entry offset
+ * @page: start page to add
+ * @len: vec entry length, may cross pages
+ * @offset: vec entry offset relative to @page, may cross pages
*
- * Attempt to add a page to the bio_vec maplist. This will only fail
+ * Attempt to add page(s) to the bio_vec maplist. This will only fail
* if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
*/
int bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
- if (!__bio_try_merge_page(bio, page, len, offset)) {
- if (bio_full(bio))
+ bool same_page = false;
+
+ if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
+ if (bio_full(bio, len))
return 0;
__bio_add_page(bio, page, len, offset);
}
@@ -828,6 +842,42 @@ int bio_add_page(struct bio *bio, struct page *page,
}
EXPORT_SYMBOL(bio_add_page);
+static void bio_get_pages(struct bio *bio)
+{
+ struct bvec_iter_all iter_all;
+ struct bio_vec *bvec;
+
+ bio_for_each_segment_all(bvec, bio, iter_all)
+ get_page(bvec->bv_page);
+}
+
+static void bio_release_pages(struct bio *bio)
+{
+ struct bvec_iter_all iter_all;
+ struct bio_vec *bvec;
+
+ bio_for_each_segment_all(bvec, bio, iter_all)
+ put_page(bvec->bv_page);
+}
+
+static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
+{
+ const struct bio_vec *bv = iter->bvec;
+ unsigned int len;
+ size_t size;
+
+ if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
+ return -EINVAL;
+
+ len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
+ size = bio_add_page(bio, bv->bv_page, len,
+ bv->bv_offset + iter->iov_offset);
+ if (unlikely(size != len))
+ return -EINVAL;
+ iov_iter_advance(iter, size);
+ return 0;
+}
+
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
/**
@@ -846,6 +896,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
+ bool same_page = false;
ssize_t size, left;
unsigned len, i;
size_t offset;
@@ -866,8 +917,15 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
struct page *page = pages[i];
len = min_t(size_t, PAGE_SIZE - offset, left);
- if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset) != len))
- return -EINVAL;
+
+ if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
+ if (same_page)
+ put_page(page);
+ } else {
+ if (WARN_ON_ONCE(bio_full(bio, len)))
+ return -EINVAL;
+ __bio_add_page(bio, page, len, offset);
+ }
offset = 0;
}
@@ -876,30 +934,46 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
}
/**
- * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * bio_iov_iter_get_pages - add user or kernel pages to a bio
* @bio: bio to add pages to
- * @iter: iov iterator describing the region to be mapped
+ * @iter: iov iterator describing the region to be added
+ *
+ * This takes either an iterator pointing to user memory, or one pointing to
+ * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
+ * map them into the kernel. On IO completion, the caller should put those
+ * pages. If we're adding kernel pages, and the caller told us it's safe to
+ * do so, we just have to add the pages to the bio directly. We don't grab an
+ * extra reference to those pages (the user should already have that), and we
+ * don't put the page on IO completion. The caller needs to check if the bio is
+ * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
+ * released.
*
- * Pins pages from *iter and appends them to @bio's bvec array. The
- * pages will have to be released using put_page() when done.
* The function tries, but does not guarantee, to pin as many pages as
- * fit into the bio, or are requested in *iter, whatever is smaller.
- * If MM encounters an error pinning the requested pages, it stops.
- * Error is returned only if 0 pages could be pinned.
+ * fit into the bio, or are requested in *iter, whatever is smaller. If
+ * MM encounters an error pinning the requested pages, it stops. Error
+ * is returned only if 0 pages could be pinned.
*/
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
- unsigned short orig_vcnt = bio->bi_vcnt;
+ const bool is_bvec = iov_iter_is_bvec(iter);
+ int ret;
- do {
- int ret = __bio_iov_iter_get_pages(bio, iter);
+ if (WARN_ON_ONCE(bio->bi_vcnt))
+ return -EINVAL;
- if (unlikely(ret))
- return bio->bi_vcnt > orig_vcnt ? 0 : ret;
+ do {
+ if (is_bvec)
+ ret = __bio_iov_bvec_add_pages(bio, iter);
+ else
+ ret = __bio_iov_iter_get_pages(bio, iter);
+ } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
- } while (iov_iter_count(iter) && !bio_full(bio));
+ if (iov_iter_bvec_no_ref(iter))
+ bio_set_flag(bio, BIO_NO_PAGE_REF);
+ else if (is_bvec)
+ bio_get_pages(bio);
- return 0;
+ return bio->bi_vcnt ? 0 : ret;
}
static void submit_bio_wait_endio(struct bio *bio)
@@ -1070,10 +1144,10 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
*/
static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
{
- int i;
struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
ssize_t ret;
ret = copy_page_from_iter(bvec->bv_page,
@@ -1101,10 +1175,10 @@ static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
*/
static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
{
- int i;
struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
ssize_t ret;
ret = copy_page_to_iter(bvec->bv_page,
@@ -1125,9 +1199,9 @@ static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
void bio_free_pages(struct bio *bio)
{
struct bio_vec *bvec;
- int i;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i)
+ bio_for_each_segment_all(bvec, bio, iter_all)
__free_page(bvec->bv_page);
}
EXPORT_SYMBOL(bio_free_pages);
@@ -1238,8 +1312,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
}
}
- if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
+ if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
+ if (!map_data)
+ __free_page(page);
break;
+ }
len -= bytes;
offset = 0;
@@ -1295,6 +1372,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
struct bio *bio;
int ret;
struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
if (!iov_iter_count(iter))
return ERR_PTR(-EINVAL);
@@ -1324,21 +1402,14 @@ struct bio *bio_map_user_iov(struct request_queue *q,
for (j = 0; j < npages; j++) {
struct page *page = pages[j];
unsigned int n = PAGE_SIZE - offs;
- unsigned short prev_bi_vcnt = bio->bi_vcnt;
if (n > bytes)
n = bytes;
- if (!bio_add_pc_page(q, bio, page, n, offs))
+ if (!__bio_add_pc_page(q, bio, page, n, offs,
+ true))
break;
- /*
- * check if vector was merged with previous
- * drop page reference if needed
- */
- if (bio->bi_vcnt == prev_bi_vcnt)
- put_page(page);
-
added += n;
bytes -= n;
offs = 0;
@@ -1368,7 +1439,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
return bio;
out_unmap:
- bio_for_each_segment_all(bvec, bio, j) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
put_page(bvec->bv_page);
}
bio_put(bio);
@@ -1378,12 +1449,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
static void __bio_unmap_user(struct bio *bio)
{
struct bio_vec *bvec;
- int i;
+ struct bvec_iter_all iter_all;
/*
* make sure we dirty pages we wrote to
*/
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page);
@@ -1474,9 +1545,9 @@ static void bio_copy_kern_endio_read(struct bio *bio)
{
char *p = bio->bi_private;
struct bio_vec *bvec;
- int i;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
p += bvec->bv_len;
}
@@ -1584,23 +1655,14 @@ cleanup:
void bio_set_pages_dirty(struct bio *bio)
{
struct bio_vec *bvec;
- int i;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
if (!PageCompound(bvec->bv_page))
set_page_dirty_lock(bvec->bv_page);
}
}
-static void bio_release_pages(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
-
- bio_for_each_segment_all(bvec, bio, i)
- put_page(bvec->bv_page);
-}
-
/*
* bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
* If they are, then fine. If, however, some pages are clean then they must
@@ -1634,7 +1696,8 @@ static void bio_dirty_fn(struct work_struct *work)
next = bio->bi_private;
bio_set_pages_dirty(bio);
- bio_release_pages(bio);
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+ bio_release_pages(bio);
bio_put(bio);
}
}
@@ -1643,14 +1706,15 @@ void bio_check_pages_dirty(struct bio *bio)
{
struct bio_vec *bvec;
unsigned long flags;
- int i;
+ struct bvec_iter_all iter_all;
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
goto defer;
}
- bio_release_pages(bio);
+ if (!bio_flagged(bio, BIO_NO_PAGE_REF))
+ bio_release_pages(bio);
bio_put(bio);
return;
defer:
@@ -2132,6 +2196,9 @@ static int __init init_bio(void)
bio_slab_nr = 0;
bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
GFP_KERNEL);
+
+ BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
+
if (!bio_slabs)
panic("bio: can't allocate bios\n");
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 2bed5725aa03..1f7127b03490 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Common Block IO controller cgroup interface
*
@@ -880,7 +881,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
blkg_free(new_blkg);
} else {
blkg = blkg_create(pos, q, new_blkg);
- if (unlikely(IS_ERR(blkg))) {
+ if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
goto fail_unlock;
}
@@ -1269,7 +1270,7 @@ void blkcg_drain_queue(struct request_queue *q)
* blkcg_exit_queue - exit and release blkcg part of request_queue
* @q: request_queue being released
*
- * Called from blk_release_queue(). Responsible for exiting blkcg part.
+ * Called from blk_exit_queue(). Responsible for exiting blkcg part.
*/
void blkcg_exit_queue(struct request_queue *q)
{
@@ -1736,8 +1737,8 @@ out:
/**
* blkcg_schedule_throttle - this task needs to check for throttling
- * @q - the request queue IO was submitted on
- * @use_memdelay - do we charge this to memory delay for PSI
+ * @q: the request queue IO was submitted on
+ * @use_memdelay: do we charge this to memory delay for PSI
*
* This is called by the IO controller when we know there's delay accumulated
* for the blkg for this task. We do not pass the blkg because there are places
@@ -1769,8 +1770,9 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
/**
* blkcg_add_delay - add delay to this blkg
- * @now - the current time in nanoseconds
- * @delta - how many nanoseconds of delay to add
+ * @blkg: blkg of interest
+ * @now: the current time in nanoseconds
+ * @delta: how many nanoseconds of delay to add
*
* Charge @delta to the blkg's current delay accumulation. This is used to
* throttle tasks if an IO controller thinks we need more throttling.
diff --git a/block/blk-core.c b/block/blk-core.c
index 6b78ec56a4f2..8340f69670d8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994, Karl Keyte: Added support for disk statistics
@@ -232,15 +233,6 @@ void blk_sync_queue(struct request_queue *q)
{
del_timer_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
-
- if (queue_is_mq(q)) {
- struct blk_mq_hw_ctx *hctx;
- int i;
-
- cancel_delayed_work_sync(&q->requeue_work);
- queue_for_each_hw_ctx(q, hctx, i)
- cancel_delayed_work_sync(&hctx->run_work);
- }
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -290,35 +282,6 @@ void blk_set_queue_dying(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
-/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
-void blk_exit_queue(struct request_queue *q)
-{
- /*
- * Since the I/O scheduler exit code may access cgroup information,
- * perform I/O scheduler exit before disassociating from the block
- * cgroup controller.
- */
- if (q->elevator) {
- ioc_clear_queue(q);
- elevator_exit(q, q->elevator);
- q->elevator = NULL;
- }
-
- /*
- * Remove all references to @q from the block cgroup controller before
- * restoring @q->queue_lock to avoid that restoring this pointer causes
- * e.g. blkcg_print_blkgs() to crash.
- */
- blkcg_exit_queue(q);
-
- /*
- * Since the cgroup code may dereference the @q->backing_dev_info
- * pointer, only decrease its reference count after having removed the
- * association with the block cgroup controller.
- */
- bdi_put(q->backing_dev_info);
-}
-
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -347,18 +310,6 @@ void blk_cleanup_queue(struct request_queue *q)
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
- /*
- * make sure all in-progress dispatch are completed because
- * blk_freeze_queue() can only complete all requests, and
- * dispatch may still be in-progress since we dispatch requests
- * from more than one contexts.
- *
- * We rely on driver to deal with the race in case that queue
- * initialization isn't done.
- */
- if (queue_is_mq(q) && blk_queue_init_done(q))
- blk_mq_quiesce_queue(q);
-
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
@@ -366,16 +317,21 @@ void blk_cleanup_queue(struct request_queue *q)
del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
blk_sync_queue(q);
+ if (queue_is_mq(q))
+ blk_mq_exit_queue(q);
+
/*
- * I/O scheduler exit is only safe after the sysfs scheduler attribute
- * has been removed.
+ * In theory, request pool of sched_tags belongs to request queue.
+ * However, the current implementation requires tag_set for freeing
+ * requests, so free the pool now.
+ *
+ * Queue has become frozen, there can't be any in-queue requests, so
+ * it is safe to free requests now.
*/
- WARN_ON_ONCE(q->kobj.state_in_sysfs);
-
- blk_exit_queue(q);
-
- if (queue_is_mq(q))
- blk_mq_free_queue(q);
+ mutex_lock(&q->sysfs_lock);
+ if (q->elevator)
+ blk_mq_sched_free_requests(q);
+ mutex_unlock(&q->sysfs_lock);
percpu_ref_exit(&q->q_usage_counter);
@@ -433,7 +389,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
smp_rmb();
wait_event(q->mq_freeze_wq,
- (atomic_read(&q->mq_freeze_depth) == 0 &&
+ (!q->mq_freeze_depth &&
(pm || (blk_pm_request_resume(q),
!blk_queue_pm_only(q)))) ||
blk_queue_dying(q));
@@ -500,8 +456,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
if (!q->stats)
goto fail_stats;
- q->backing_dev_info->ra_pages =
- (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
+ q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info->name = "block";
q->node = node_id;
@@ -524,6 +479,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
spin_lock_init(&q->queue_lock);
init_waitqueue_head(&q->mq_freeze_wq);
+ mutex_init(&q->mq_freeze_lock);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
@@ -1014,22 +970,8 @@ blk_qc_t generic_make_request(struct bio *bio)
* yet.
*/
struct bio_list bio_list_on_stack[2];
- blk_mq_req_flags_t flags = 0;
- struct request_queue *q = bio->bi_disk->queue;
blk_qc_t ret = BLK_QC_T_NONE;
- if (bio->bi_opf & REQ_NOWAIT)
- flags = BLK_MQ_REQ_NOWAIT;
- if (bio_flagged(bio, BIO_QUEUE_ENTERED))
- blk_queue_enter_live(q);
- else if (blk_queue_enter(q, flags) < 0) {
- if (!blk_queue_dying(q) && (bio->bi_opf & REQ_NOWAIT))
- bio_wouldblock_error(bio);
- else
- bio_io_error(bio);
- return ret;
- }
-
if (!generic_make_request_checks(bio))
goto out;
@@ -1066,22 +1008,11 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack;
do {
- bool enter_succeeded = true;
-
- if (unlikely(q != bio->bi_disk->queue)) {
- if (q)
- blk_queue_exit(q);
- q = bio->bi_disk->queue;
- flags = 0;
- if (bio->bi_opf & REQ_NOWAIT)
- flags = BLK_MQ_REQ_NOWAIT;
- if (blk_queue_enter(q, flags) < 0) {
- enter_succeeded = false;
- q = NULL;
- }
- }
+ struct request_queue *q = bio->bi_disk->queue;
+ blk_mq_req_flags_t flags = bio->bi_opf & REQ_NOWAIT ?
+ BLK_MQ_REQ_NOWAIT : 0;
- if (enter_succeeded) {
+ if (likely(blk_queue_enter(q, flags) == 0)) {
struct bio_list lower, same;
/* Create a fresh bio_list for all subordinate requests */
@@ -1089,6 +1020,8 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
+ blk_queue_exit(q);
+
/* sort new bios into those for a lower level
* and those for the same level
*/
@@ -1115,8 +1048,6 @@ blk_qc_t generic_make_request(struct bio *bio)
current->bio_list = NULL; /* deactivate */
out:
- if (q)
- blk_queue_exit(q);
return ret;
}
EXPORT_SYMBOL(generic_make_request);
@@ -1220,7 +1151,9 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
struct request *rq)
{
if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
- printk(KERN_ERR "%s: over max size limit.\n", __func__);
+ printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
+ __func__, blk_rq_sectors(rq),
+ blk_queue_get_max_sectors(q, req_op(rq)));
return -EIO;
}
@@ -1232,7 +1165,8 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
*/
blk_recalc_rq_segments(rq);
if (rq->nr_phys_segments > queue_max_segments(q)) {
- printk(KERN_ERR "%s: over max segments limit.\n", __func__);
+ printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
+ __func__, rq->nr_phys_segments, queue_max_segments(q));
return -EIO;
}
@@ -1246,8 +1180,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
*/
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{
- blk_qc_t unused;
-
if (blk_cloned_rq_check_limits(q, rq))
return BLK_STS_IOERR;
@@ -1263,7 +1195,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
* bypass a potential scheduler on the bottom device for
* insert.
*/
- return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
+ return blk_mq_request_issue_directly(rq, true);
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index a34b7d918742..1db44ca0f4a6 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions related to setting various queue properties from drivers
*/
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 6e0f2d97fc6d..aedd9320e605 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -1,11 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions to sequence PREFLUSH and FUA writes.
*
* Copyright (C) 2011 Max Planck Institute for Gravitational Physics
* Copyright (C) 2011 Tejun Heo <tj@kernel.org>
*
- * This file is released under the GPLv2.
- *
* REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
* optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
* properties and hardware capability.
@@ -220,7 +219,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
flush_rq->tag = -1;
} else {
- blk_mq_put_driver_tag_hctx(hctx, flush_rq);
+ blk_mq_put_driver_tag(flush_rq);
flush_rq->internal_tag = -1;
}
@@ -324,7 +323,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
if (q->elevator) {
WARN_ON(rq->tag < 0);
- blk_mq_put_driver_tag_hctx(hctx, rq);
+ blk_mq_put_driver_tag(rq);
}
/*
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index d1ab089e0919..825c9c070458 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -1,23 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* blk-integrity.c - Block layer data integrity extensions
*
* Copyright (C) 2007, 2008 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
*/
#include <linux/blkdev.h>
@@ -365,6 +351,7 @@ static struct attribute *integrity_attrs[] = {
&integrity_device_entry.attr,
NULL,
};
+ATTRIBUTE_GROUPS(integrity);
static const struct sysfs_ops integrity_ops = {
.show = &integrity_attr_show,
@@ -372,7 +359,7 @@ static const struct sysfs_ops integrity_ops = {
};
static struct kobj_type integrity_ktype = {
- .default_attrs = integrity_attrs,
+ .default_groups = integrity_groups,
.sysfs_ops = &integrity_ops,
};
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 2620baa1f699..d22e61bced86 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block rq-qos base io controller
*
@@ -75,6 +76,7 @@
#include <linux/blk-mq.h>
#include "blk-rq-qos.h"
#include "blk-stat.h"
+#include "blk.h"
#define DEFAULT_SCALE_COOKIE 1000000U
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 71e9ac03f621..17713d7d98d5 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,23 +12,6 @@
#include "blk.h"
-/*
- * Check if the two bvecs from two bios can be merged to one segment. If yes,
- * no need to check gap between the two bios since the 1st bio and the 1st bvec
- * in the 2nd bio can be handled in one segment.
- */
-static inline bool bios_segs_mergeable(struct request_queue *q,
- struct bio *prev, struct bio_vec *prev_last_bv,
- struct bio_vec *next_first_bv)
-{
- if (!biovec_phys_mergeable(q, prev_last_bv, next_first_bv))
- return false;
- if (prev->bi_seg_back_size + next_first_bv->bv_len >
- queue_max_segment_size(q))
- return false;
- return true;
-}
-
static inline bool bio_will_gap(struct request_queue *q,
struct request *prev_rq, struct bio *prev, struct bio *next)
{
@@ -60,7 +43,7 @@ static inline bool bio_will_gap(struct request_queue *q,
*/
bio_get_last_bvec(prev, &pb);
bio_get_first_bvec(next, &nb);
- if (bios_segs_mergeable(q, prev, &pb, &nb))
+ if (biovec_phys_mergeable(q, &pb, &nb))
return false;
return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
}
@@ -161,6 +144,56 @@ static inline unsigned get_max_io_size(struct request_queue *q,
return sectors;
}
+static unsigned get_max_segment_size(struct request_queue *q,
+ unsigned offset)
+{
+ unsigned long mask = queue_segment_boundary(q);
+
+ /* default segment boundary mask means no boundary limit */
+ if (mask == BLK_SEG_BOUNDARY_MASK)
+ return queue_max_segment_size(q);
+
+ return min_t(unsigned long, mask - (mask & offset) + 1,
+ queue_max_segment_size(q));
+}
+
+/*
+ * Split the bvec @bv into segments, and update all kinds of
+ * variables.
+ */
+static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
+ unsigned *nsegs, unsigned *sectors, unsigned max_segs)
+{
+ unsigned len = bv->bv_len;
+ unsigned total_len = 0;
+ unsigned new_nsegs = 0, seg_size = 0;
+
+ /*
+ * Multi-page bvec may be too big to hold in one segment, so the
+ * current bvec has to be splitted as multiple segments.
+ */
+ while (len && new_nsegs + *nsegs < max_segs) {
+ seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
+ seg_size = min(seg_size, len);
+
+ new_nsegs++;
+ total_len += seg_size;
+ len -= seg_size;
+
+ if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
+ break;
+ }
+
+ if (new_nsegs) {
+ *nsegs += new_nsegs;
+ if (sectors)
+ *sectors += total_len >> 9;
+ }
+
+ /* split in the middle of the bvec if len != 0 */
+ return !!len;
+}
+
static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio *bio,
struct bio_set *bs,
@@ -168,13 +201,13 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
{
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
- unsigned seg_size = 0, nsegs = 0, sectors = 0;
- unsigned front_seg_size = bio->bi_seg_front_size;
+ unsigned nsegs = 0, sectors = 0;
bool do_split = true;
struct bio *new = NULL;
const unsigned max_sectors = get_max_io_size(q, bio);
+ const unsigned max_segs = queue_max_segments(q);
- bio_for_each_segment(bv, bio, iter) {
+ bio_for_each_bvec(bv, bio, iter) {
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
@@ -187,40 +220,29 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
* Consider this a new segment if we're splitting in
* the middle of this vector.
*/
- if (nsegs < queue_max_segments(q) &&
+ if (nsegs < max_segs &&
sectors < max_sectors) {
- nsegs++;
- sectors = max_sectors;
+ /* split in the middle of bvec */
+ bv.bv_len = (max_sectors - sectors) << 9;
+ bvec_split_segs(q, &bv, &nsegs,
+ &sectors, max_segs);
}
goto split;
}
- if (bvprvp) {
- if (seg_size + bv.bv_len > queue_max_segment_size(q))
- goto new_segment;
- if (!biovec_phys_mergeable(q, bvprvp, &bv))
- goto new_segment;
-
- seg_size += bv.bv_len;
- bvprv = bv;
- bvprvp = &bvprv;
- sectors += bv.bv_len >> 9;
-
- continue;
- }
-new_segment:
- if (nsegs == queue_max_segments(q))
+ if (nsegs == max_segs)
goto split;
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
-
- nsegs++;
bvprv = bv;
bvprvp = &bvprv;
- seg_size = bv.bv_len;
- sectors += bv.bv_len >> 9;
+ if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
+ nsegs++;
+ sectors += bv.bv_len >> 9;
+ } else if (bvec_split_segs(q, &bv, &nsegs, &sectors,
+ max_segs)) {
+ goto split;
+ }
}
do_split = false;
@@ -233,12 +255,6 @@ split:
bio = new;
}
- if (nsegs == 1 && seg_size > front_seg_size)
- front_seg_size = seg_size;
- bio->bi_seg_front_size = front_seg_size;
- if (seg_size > bio->bi_seg_back_size)
- bio->bi_seg_back_size = seg_size;
-
return do_split ? new : NULL;
}
@@ -291,14 +307,11 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
EXPORT_SYMBOL(blk_queue_split);
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
- struct bio *bio,
- bool no_sg_merge)
+ struct bio *bio)
{
- struct bio_vec bv, bvprv = { NULL };
- int prev = 0;
- unsigned int seg_size, nr_phys_segs;
- struct bio *fbio, *bbio;
+ unsigned int nr_phys_segs = 0;
struct bvec_iter iter;
+ struct bio_vec bv;
if (!bio)
return 0;
@@ -312,161 +325,143 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
return 1;
}
- fbio = bio;
- seg_size = 0;
- nr_phys_segs = 0;
for_each_bio(bio) {
- bio_for_each_segment(bv, bio, iter) {
- /*
- * If SG merging is disabled, each bio vector is
- * a segment
- */
- if (no_sg_merge)
- goto new_segment;
-
- if (prev) {
- if (seg_size + bv.bv_len
- > queue_max_segment_size(q))
- goto new_segment;
- if (!biovec_phys_mergeable(q, &bvprv, &bv))
- goto new_segment;
-
- seg_size += bv.bv_len;
- bvprv = bv;
- continue;
- }
-new_segment:
- if (nr_phys_segs == 1 && seg_size >
- fbio->bi_seg_front_size)
- fbio->bi_seg_front_size = seg_size;
-
- nr_phys_segs++;
- bvprv = bv;
- prev = 1;
- seg_size = bv.bv_len;
- }
- bbio = bio;
+ bio_for_each_bvec(bv, bio, iter)
+ bvec_split_segs(q, &bv, &nr_phys_segs, NULL, UINT_MAX);
}
- if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
- fbio->bi_seg_front_size = seg_size;
- if (seg_size > bbio->bi_seg_back_size)
- bbio->bi_seg_back_size = seg_size;
-
return nr_phys_segs;
}
void blk_recalc_rq_segments(struct request *rq)
{
- bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
- &rq->q->queue_flags);
-
- rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
- no_sg_merge);
+ rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
}
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
- unsigned short seg_cnt;
+ struct bio *nxt = bio->bi_next;
- /* estimate segment number by bi_vcnt for non-cloned bio */
- if (bio_flagged(bio, BIO_CLONED))
- seg_cnt = bio_segments(bio);
- else
- seg_cnt = bio->bi_vcnt;
-
- if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
- (seg_cnt < queue_max_segments(q)))
- bio->bi_phys_segments = seg_cnt;
- else {
- struct bio *nxt = bio->bi_next;
-
- bio->bi_next = NULL;
- bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
- bio->bi_next = nxt;
- }
+ bio->bi_next = NULL;
+ bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+ bio->bi_next = nxt;
bio_set_flag(bio, BIO_SEG_VALID);
}
-static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
- struct bio *nxt)
+static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
+ struct scatterlist *sglist)
{
- struct bio_vec end_bv = { NULL }, nxt_bv;
-
- if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
- queue_max_segment_size(q))
- return 0;
+ if (!*sg)
+ return sglist;
- if (!bio_has_data(bio))
- return 1;
-
- bio_get_last_bvec(bio, &end_bv);
- bio_get_first_bvec(nxt, &nxt_bv);
-
- return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
+ /*
+ * If the driver previously mapped a shorter list, we could see a
+ * termination bit prematurely unless it fully inits the sg table
+ * on each mapping. We KNOW that there must be more entries here
+ * or the driver would be buggy, so force clear the termination bit
+ * to avoid doing a full sg_init_table() in drivers for each command.
+ */
+ sg_unmark_end(*sg);
+ return sg_next(*sg);
}
-static inline void
-__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
- struct scatterlist *sglist, struct bio_vec *bvprv,
- struct scatterlist **sg, int *nsegs)
+static unsigned blk_bvec_map_sg(struct request_queue *q,
+ struct bio_vec *bvec, struct scatterlist *sglist,
+ struct scatterlist **sg)
{
+ unsigned nbytes = bvec->bv_len;
+ unsigned nsegs = 0, total = 0;
- int nbytes = bvec->bv_len;
+ while (nbytes > 0) {
+ unsigned offset = bvec->bv_offset + total;
+ unsigned len = min(get_max_segment_size(q, offset), nbytes);
+ struct page *page = bvec->bv_page;
- if (*sg) {
- if ((*sg)->length + nbytes > queue_max_segment_size(q))
- goto new_segment;
- if (!biovec_phys_mergeable(q, bvprv, bvec))
- goto new_segment;
-
- (*sg)->length += nbytes;
- } else {
-new_segment:
- if (!*sg)
- *sg = sglist;
- else {
- /*
- * If the driver previously mapped a shorter
- * list, we could see a termination bit
- * prematurely unless it fully inits the sg
- * table on each mapping. We KNOW that there
- * must be more entries here or the driver
- * would be buggy, so force clear the
- * termination bit to avoid doing a full
- * sg_init_table() in drivers for each command.
- */
- sg_unmark_end(*sg);
- *sg = sg_next(*sg);
- }
+ /*
+ * Unfortunately a fair number of drivers barf on scatterlists
+ * that have an offset larger than PAGE_SIZE, despite other
+ * subsystems dealing with that invariant just fine. For now
+ * stick to the legacy format where we never present those from
+ * the block layer, but the code below should be removed once
+ * these offenders (mostly MMC/SD drivers) are fixed.
+ */
+ page += (offset >> PAGE_SHIFT);
+ offset &= ~PAGE_MASK;
- sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
- (*nsegs)++;
+ *sg = blk_next_sg(sg, sglist);
+ sg_set_page(*sg, page, len, offset);
+
+ total += len;
+ nbytes -= len;
+ nsegs++;
}
- *bvprv = *bvec;
+
+ return nsegs;
}
-static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
+static inline int __blk_bvec_map_sg(struct bio_vec bv,
struct scatterlist *sglist, struct scatterlist **sg)
{
- *sg = sglist;
+ *sg = blk_next_sg(sg, sglist);
sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
return 1;
}
+/* only try to merge bvecs into one sg if they are from two bios */
+static inline bool
+__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
+ struct bio_vec *bvprv, struct scatterlist **sg)
+{
+
+ int nbytes = bvec->bv_len;
+
+ if (!*sg)
+ return false;
+
+ if ((*sg)->length + nbytes > queue_max_segment_size(q))
+ return false;
+
+ if (!biovec_phys_mergeable(q, bvprv, bvec))
+ return false;
+
+ (*sg)->length += nbytes;
+
+ return true;
+}
+
static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist,
struct scatterlist **sg)
{
- struct bio_vec bvec, bvprv = { NULL };
+ struct bio_vec uninitialized_var(bvec), bvprv = { NULL };
struct bvec_iter iter;
int nsegs = 0;
+ bool new_bio = false;
- for_each_bio(bio)
- bio_for_each_segment(bvec, bio, iter)
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
- &nsegs);
+ for_each_bio(bio) {
+ bio_for_each_bvec(bvec, bio, iter) {
+ /*
+ * Only try to merge bvecs from two bios given we
+ * have done bio internal merge when adding pages
+ * to bio
+ */
+ if (new_bio &&
+ __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
+ goto next_bvec;
+
+ if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
+ nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
+ else
+ nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
+ next_bvec:
+ new_bio = false;
+ }
+ if (likely(bio->bi_iter.bi_size)) {
+ bvprv = bvec;
+ new_bio = true;
+ }
+ }
return nsegs;
}
@@ -482,9 +477,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nsegs = 0;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
+ nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
- nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
+ nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
else if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
@@ -613,8 +608,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next)
{
int total_phys_segments;
- unsigned int seg_size =
- req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
if (req_gap_back_merge(req, next->bio))
return 0;
@@ -627,14 +620,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
- if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
- if (req->nr_phys_segments == 1)
- req->bio->bi_seg_front_size = seg_size;
- if (next->nr_phys_segments == 1)
- next->biotail->bi_seg_back_size = seg_size;
- total_phys_segments--;
- }
-
if (total_phys_segments > queue_max_segments(q))
return 0;
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 03a534820271..f945621a0e8f 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CPU <-> hardware queue mapping helpers
*
@@ -41,8 +42,8 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
/*
* First do sequential mapping between CPUs and queues.
* In case we still have CPUs to map, and we have some number of
- * threads per cores then map sibling threads to the same queue for
- * performace optimizations.
+ * threads per cores then map sibling threads to the same queue
+ * for performance optimizations.
*/
if (cpu < nr_queues) {
map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu);
@@ -59,7 +60,11 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
}
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
-/*
+/**
+ * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
+ * @qmap: CPU to hardware queue map.
+ * @index: hardware queue index.
+ *
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
*/
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 7921573aebbc..3afe327f816f 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
@@ -115,7 +104,6 @@ static int queue_pm_only_show(void *data, struct seq_file *m)
static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(STOPPED),
QUEUE_FLAG_NAME(DYING),
- QUEUE_FLAG_NAME(BIDI),
QUEUE_FLAG_NAME(NOMERGES),
QUEUE_FLAG_NAME(SAME_COMP),
QUEUE_FLAG_NAME(FAIL_IO),
@@ -128,11 +116,9 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(SAME_FORCE),
QUEUE_FLAG_NAME(DEAD),
QUEUE_FLAG_NAME(INIT_DONE),
- QUEUE_FLAG_NAME(NO_SG_MERGE),
QUEUE_FLAG_NAME(POLL),
QUEUE_FLAG_NAME(WC),
QUEUE_FLAG_NAME(FUA),
- QUEUE_FLAG_NAME(FLUSH_NQ),
QUEUE_FLAG_NAME(DAX),
QUEUE_FLAG_NAME(STATS),
QUEUE_FLAG_NAME(POLL_STATS),
@@ -251,7 +237,6 @@ static const char *const alloc_policy_name[] = {
static const char *const hctx_flag_name[] = {
HCTX_FLAG_NAME(SHOULD_MERGE),
HCTX_FLAG_NAME(TAG_SHARED),
- HCTX_FLAG_NAME(SG_MERGE),
HCTX_FLAG_NAME(BLOCKING),
HCTX_FLAG_NAME(NO_SCHED),
};
@@ -836,38 +821,28 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
{},
};
-static bool debugfs_create_files(struct dentry *parent, void *data,
+static void debugfs_create_files(struct dentry *parent, void *data,
const struct blk_mq_debugfs_attr *attr)
{
if (IS_ERR_OR_NULL(parent))
- return false;
+ return;
d_inode(parent)->i_private = data;
- for (; attr->name; attr++) {
- if (!debugfs_create_file(attr->name, attr->mode, parent,
- (void *)attr, &blk_mq_debugfs_fops))
- return false;
- }
- return true;
+ for (; attr->name; attr++)
+ debugfs_create_file(attr->name, attr->mode, parent,
+ (void *)attr, &blk_mq_debugfs_fops);
}
-int blk_mq_debugfs_register(struct request_queue *q)
+void blk_mq_debugfs_register(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
- if (!blk_debugfs_root)
- return -ENOENT;
-
q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
blk_debugfs_root);
- if (!q->debugfs_dir)
- return -ENOMEM;
- if (!debugfs_create_files(q->debugfs_dir, q,
- blk_mq_debugfs_queue_attrs))
- goto err;
+ debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
/*
* blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
@@ -879,11 +854,10 @@ int blk_mq_debugfs_register(struct request_queue *q)
/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
queue_for_each_hw_ctx(q, hctx, i) {
- if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
- goto err;
- if (q->elevator && !hctx->sched_debugfs_dir &&
- blk_mq_debugfs_register_sched_hctx(q, hctx))
- goto err;
+ if (!hctx->debugfs_dir)
+ blk_mq_debugfs_register_hctx(q, hctx);
+ if (q->elevator && !hctx->sched_debugfs_dir)
+ blk_mq_debugfs_register_sched_hctx(q, hctx);
}
if (q->rq_qos) {
@@ -894,12 +868,6 @@ int blk_mq_debugfs_register(struct request_queue *q)
rqos = rqos->next;
}
}
-
- return 0;
-
-err:
- blk_mq_debugfs_unregister(q);
- return -ENOMEM;
}
void blk_mq_debugfs_unregister(struct request_queue *q)
@@ -909,52 +877,32 @@ void blk_mq_debugfs_unregister(struct request_queue *q)
q->debugfs_dir = NULL;
}
-static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
- struct blk_mq_ctx *ctx)
+static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
+ struct blk_mq_ctx *ctx)
{
struct dentry *ctx_dir;
char name[20];
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
- if (!ctx_dir)
- return -ENOMEM;
- if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
- return -ENOMEM;
-
- return 0;
+ debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
}
-int blk_mq_debugfs_register_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+void blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_ctx *ctx;
char name[20];
int i;
- if (!q->debugfs_dir)
- return -ENOENT;
-
snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
- if (!hctx->debugfs_dir)
- return -ENOMEM;
- if (!debugfs_create_files(hctx->debugfs_dir, hctx,
- blk_mq_debugfs_hctx_attrs))
- goto err;
-
- hctx_for_each_ctx(hctx, ctx, i) {
- if (blk_mq_debugfs_register_ctx(hctx, ctx))
- goto err;
- }
+ debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
- return 0;
-
-err:
- blk_mq_debugfs_unregister_hctx(hctx);
- return -ENOMEM;
+ hctx_for_each_ctx(hctx, ctx, i)
+ blk_mq_debugfs_register_ctx(hctx, ctx);
}
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -964,17 +912,13 @@ void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
hctx->debugfs_dir = NULL;
}
-int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+void blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
- queue_for_each_hw_ctx(q, hctx, i) {
- if (blk_mq_debugfs_register_hctx(q, hctx))
- return -ENOMEM;
- }
-
- return 0;
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_debugfs_register_hctx(q, hctx);
}
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
@@ -986,29 +930,23 @@ void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
blk_mq_debugfs_unregister_hctx(hctx);
}
-int blk_mq_debugfs_register_sched(struct request_queue *q)
+void blk_mq_debugfs_register_sched(struct request_queue *q)
{
struct elevator_type *e = q->elevator->type;
+ /*
+ * If the parent directory has not been created yet, return, we will be
+ * called again later on and the directory/files will be created then.
+ */
if (!q->debugfs_dir)
- return -ENOENT;
+ return;
if (!e->queue_debugfs_attrs)
- return 0;
+ return;
q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
- if (!q->sched_debugfs_dir)
- return -ENOMEM;
-
- if (!debugfs_create_files(q->sched_debugfs_dir, q,
- e->queue_debugfs_attrs))
- goto err;
- return 0;
-
-err:
- blk_mq_debugfs_unregister_sched(q);
- return -ENOMEM;
+ debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
}
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
@@ -1023,36 +961,22 @@ void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
rqos->debugfs_dir = NULL;
}
-int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
+void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
{
struct request_queue *q = rqos->q;
const char *dir_name = rq_qos_id_to_name(rqos->id);
- if (!q->debugfs_dir)
- return -ENOENT;
-
if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
- return 0;
+ return;
- if (!q->rqos_debugfs_dir) {
+ if (!q->rqos_debugfs_dir)
q->rqos_debugfs_dir = debugfs_create_dir("rqos",
q->debugfs_dir);
- if (!q->rqos_debugfs_dir)
- return -ENOMEM;
- }
rqos->debugfs_dir = debugfs_create_dir(dir_name,
rqos->q->rqos_debugfs_dir);
- if (!rqos->debugfs_dir)
- return -ENOMEM;
- if (!debugfs_create_files(rqos->debugfs_dir, rqos,
- rqos->ops->debugfs_attrs))
- goto err;
- return 0;
- err:
- blk_mq_debugfs_unregister_rqos(rqos);
- return -ENOMEM;
+ debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
}
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
@@ -1061,27 +985,18 @@ void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
q->rqos_debugfs_dir = NULL;
}
-int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
struct elevator_type *e = q->elevator->type;
- if (!hctx->debugfs_dir)
- return -ENOENT;
-
if (!e->hctx_debugfs_attrs)
- return 0;
+ return;
hctx->sched_debugfs_dir = debugfs_create_dir("sched",
hctx->debugfs_dir);
- if (!hctx->sched_debugfs_dir)
- return -ENOMEM;
-
- if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
- e->hctx_debugfs_attrs))
- return -ENOMEM;
-
- return 0;
+ debugfs_create_files(hctx->sched_debugfs_dir, hctx,
+ e->hctx_debugfs_attrs);
}
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h
index 8c9012a578c1..a68aa6041a10 100644
--- a/block/blk-mq-debugfs.h
+++ b/block/blk-mq-debugfs.h
@@ -18,74 +18,68 @@ struct blk_mq_debugfs_attr {
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
-int blk_mq_debugfs_register(struct request_queue *q);
+void blk_mq_debugfs_register(struct request_queue *q);
void blk_mq_debugfs_unregister(struct request_queue *q);
-int blk_mq_debugfs_register_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx);
+void blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
-int blk_mq_debugfs_register_hctxs(struct request_queue *q);
+void blk_mq_debugfs_register_hctxs(struct request_queue *q);
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
-int blk_mq_debugfs_register_sched(struct request_queue *q);
+void blk_mq_debugfs_register_sched(struct request_queue *q);
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
-int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
-int blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
+void blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
#else
-static inline int blk_mq_debugfs_register(struct request_queue *q)
+static inline void blk_mq_debugfs_register(struct request_queue *q)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
{
}
-static inline int blk_mq_debugfs_register_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
{
}
-static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
+static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
{
}
-static inline int blk_mq_debugfs_register_sched(struct request_queue *q)
+static inline void blk_mq_debugfs_register_sched(struct request_queue *q)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
{
}
-static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx)
+static inline void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
{
}
-static inline int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
+static inline void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
{
- return 0;
}
static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index 1dce18553984..b595a94c4d16 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 Christoph Hellwig.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/kobject.h>
#include <linux/blkdev.h>
@@ -21,7 +13,7 @@
/**
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device
- * @set: tagset to provide the mapping for
+ * @qmap: CPU to hardware queue map.
* @pdev: PCI device associated with @set.
* @offset: Offset to use for the pci irq vector
*
diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
index 45030a81a1ed..14f968e58b8f 100644
--- a/block/blk-mq-rdma.c
+++ b/block/blk-mq-rdma.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017 Sagi Grimberg.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/blk-mq.h>
#include <linux/blk-mq-rdma.h>
@@ -16,8 +8,8 @@
/**
* blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
- * @set: tagset to provide the mapping for
- * @dev: rdma device associated with @set.
+ * @map: CPU to hardware queue map.
+ * @dev: rdma device to provide a mapping for.
* @first_vec: first interrupt vectors to use for queues (usually 0)
*
* This function assumes the rdma device @dev has at least as many available
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 140933e4a7d1..2766066a15db 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* blk-mq scheduling framework
*
@@ -321,7 +322,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
bool ret = false;
enum hctx_type type;
@@ -413,6 +414,14 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
struct list_head *list, bool run_queue_async)
{
struct elevator_queue *e;
+ struct request_queue *q = hctx->queue;
+
+ /*
+ * blk_mq_sched_insert_requests() is called from flush plug
+ * context only, and hold one usage counter to prevent queue
+ * from being released.
+ */
+ percpu_ref_get(&q->q_usage_counter);
e = hctx->queue->elevator;
if (e && e->type->ops.insert_requests)
@@ -423,13 +432,17 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
* busy in case of 'none' scheduler, and this way may save
* us one extra enqueue & dequeue to sw queue.
*/
- if (!hctx->dispatch_busy && !e && !run_queue_async)
+ if (!hctx->dispatch_busy && !e && !run_queue_async) {
blk_mq_try_issue_list_directly(hctx, list);
- else
- blk_mq_insert_requests(hctx, ctx, list);
+ if (list_empty(list))
+ goto out;
+ }
+ blk_mq_insert_requests(hctx, ctx, list);
}
blk_mq_run_hw_queue(hctx, run_queue_async);
+ out:
+ percpu_ref_put(&q->q_usage_counter);
}
static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
@@ -462,14 +475,18 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
return ret;
}
+/* called in queue's release handler, tagset has gone away */
static void blk_mq_sched_tags_teardown(struct request_queue *q)
{
- struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx;
int i;
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_sched_free_tags(set, hctx, i);
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->sched_tags) {
+ blk_mq_free_rq_map(hctx->sched_tags);
+ hctx->sched_tags = NULL;
+ }
+ }
}
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
@@ -510,6 +527,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
ret = e->ops.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
+ blk_mq_sched_free_requests(q);
blk_mq_exit_sched(q, eq);
kobject_put(&eq->kobj);
return ret;
@@ -521,11 +539,29 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return 0;
err:
+ blk_mq_sched_free_requests(q);
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
return ret;
}
+/*
+ * called in either blk_queue_cleanup or elevator_switch, tagset
+ * is required for freeing requests
+ */
+void blk_mq_sched_free_requests(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ lockdep_assert_held(&q->sysfs_lock);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->sched_tags)
+ blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
+ }
+}
+
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
struct blk_mq_hw_ctx *hctx;
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index c7bdb52367ac..3cf92cbbd8ac 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -28,6 +28,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
+void blk_mq_sched_free_requests(struct request_queue *q);
static inline bool
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 3f9c3f4ac44c..d6e1a9bd7131 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
@@ -10,6 +11,7 @@
#include <linux/smp.h>
#include <linux/blk-mq.h>
+#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-tag.h"
@@ -33,6 +35,13 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
{
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
kobj);
+
+ cancel_delayed_work_sync(&hctx->run_work);
+
+ if (hctx->flags & BLK_MQ_F_BLOCKING)
+ cleanup_srcu_struct(hctx->srcu);
+ blk_free_flush_queue(hctx->fq);
+ sbitmap_free(&hctx->ctx_map);
free_cpumask_var(hctx->cpumask);
kfree(hctx->ctxs);
kfree(hctx);
@@ -173,10 +182,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
return ret;
}
-static struct attribute *default_ctx_attrs[] = {
- NULL,
-};
-
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
.attr = {.name = "nr_tags", .mode = 0444 },
.show = blk_mq_hw_sysfs_nr_tags_show,
@@ -196,6 +201,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_cpus.attr,
NULL,
};
+ATTRIBUTE_GROUPS(default_hw_ctx);
static const struct sysfs_ops blk_mq_sysfs_ops = {
.show = blk_mq_sysfs_show,
@@ -214,13 +220,12 @@ static struct kobj_type blk_mq_ktype = {
static struct kobj_type blk_mq_ctx_ktype = {
.sysfs_ops = &blk_mq_sysfs_ops,
- .default_attrs = default_ctx_attrs,
.release = blk_mq_ctx_sysfs_release,
};
static struct kobj_type blk_mq_hw_ktype = {
.sysfs_ops = &blk_mq_hw_sysfs_ops,
- .default_attrs = default_hw_ctx_attrs,
+ .default_groups = default_hw_ctx_groups,
.release = blk_mq_hw_sysfs_release,
};
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 2089c6c62f44..7513c8eaabee 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Tag allocation using scalable bitmaps. Uses active queue tracking to support
* fairer distribution of tags between multiple submitters when a shared tag map
@@ -170,7 +171,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
data->ctx = blk_mq_get_ctx(data->q);
data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
- data->ctx->cpu);
+ data->ctx);
tags = blk_mq_tags_from_data(data);
if (data->flags & BLK_MQ_REQ_RESERVED)
bt = &tags->breserved_tags;
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
index 370827163835..488341628256 100644
--- a/block/blk-mq-virtio.c
+++ b/block/blk-mq-virtio.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016 Christoph Hellwig.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/device.h>
#include <linux/blk-mq.h>
@@ -19,8 +11,8 @@
/**
* blk_mq_virtio_map_queues - provide a default queue mapping for virtio device
- * @set: tagset to provide the mapping for
- * @vdev: virtio device associated with @set.
+ * @qmap: CPU to hardware queue map.
+ * @vdev: virtio device to provide a mapping for.
* @first_vec: first interrupt vectors to use for queues (usually 0)
*
* This function assumes the virtio device @vdev has at least as many available
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9437a5eb07cf..ce0f5f4ede70 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block multiqueue core code
*
@@ -59,7 +60,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
}
/*
- * Check if any of the ctx's have pending work in this hardware queue
+ * Check if any of the ctx, dispatch list or elevator
+ * have pending work in this hardware queue.
*/
static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
{
@@ -142,13 +144,14 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
void blk_freeze_queue_start(struct request_queue *q)
{
- int freeze_depth;
-
- freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
- if (freeze_depth == 1) {
+ mutex_lock(&q->mq_freeze_lock);
+ if (++q->mq_freeze_depth == 1) {
percpu_ref_kill(&q->q_usage_counter);
+ mutex_unlock(&q->mq_freeze_lock);
if (queue_is_mq(q))
blk_mq_run_hw_queues(q, false);
+ } else {
+ mutex_unlock(&q->mq_freeze_lock);
}
}
EXPORT_SYMBOL_GPL(blk_freeze_queue_start);
@@ -197,14 +200,14 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q)
{
- int freeze_depth;
-
- freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
- WARN_ON_ONCE(freeze_depth < 0);
- if (!freeze_depth) {
+ mutex_lock(&q->mq_freeze_lock);
+ q->mq_freeze_depth--;
+ WARN_ON_ONCE(q->mq_freeze_depth < 0);
+ if (!q->mq_freeze_depth) {
percpu_ref_resurrect(&q->q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
+ mutex_unlock(&q->mq_freeze_lock);
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
@@ -331,7 +334,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
#if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0;
#endif
- rq->special = NULL;
/* tag was already set */
rq->extra_len = 0;
WRITE_ONCE(rq->deadline, 0);
@@ -340,7 +342,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->end_io = NULL;
rq->end_io_data = NULL;
- rq->next_rq = NULL;
data->ctx->rq_dispatched[op_is_sync(op)]++;
refcount_set(&rq->ref, 1);
@@ -364,7 +365,7 @@ static struct request *blk_mq_get_request(struct request_queue *q,
}
if (likely(!data->hctx))
data->hctx = blk_mq_map_queue(q, data->cmd_flags,
- data->ctx->cpu);
+ data->ctx);
if (data->cmd_flags & REQ_NOWAIT)
data->flags |= BLK_MQ_REQ_NOWAIT;
@@ -550,8 +551,6 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
rq_qos_done(rq->q, rq);
rq->end_io(rq, error);
} else {
- if (unlikely(blk_bidi_rq(rq)))
- blk_mq_free_request(rq->next_rq);
blk_mq_free_request(rq);
}
}
@@ -657,6 +656,13 @@ bool blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);
+void blk_mq_complete_request_sync(struct request *rq)
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+ rq->q->mq_ops->complete(rq);
+}
+EXPORT_SYMBOL_GPL(blk_mq_complete_request_sync);
+
int blk_mq_request_started(struct request *rq)
{
return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
@@ -786,7 +792,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
if (kick_requeue_list)
blk_mq_kick_requeue_list(q);
}
-EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q)
{
@@ -1076,7 +1081,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
spin_lock(&hctx->dispatch_wait_lock);
- list_del_init(&wait->entry);
+ if (!list_empty(&wait->entry)) {
+ struct sbitmap_queue *sbq;
+
+ list_del_init(&wait->entry);
+ sbq = &hctx->tags->bitmap_tags;
+ atomic_dec(&sbq->ws_active);
+ }
spin_unlock(&hctx->dispatch_wait_lock);
blk_mq_run_hw_queue(hctx, true);
@@ -1092,13 +1103,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
+ struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
struct wait_queue_head *wq;
wait_queue_entry_t *wait;
bool ret;
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
- if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
- set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ blk_mq_sched_mark_restart_hctx(hctx);
/*
* It's possible that a tag was freed in the window between the
@@ -1115,7 +1126,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
if (!list_empty_careful(&wait->entry))
return false;
- wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
+ wq = &bt_wait_ptr(sbq, hctx)->wait;
spin_lock_irq(&wq->lock);
spin_lock(&hctx->dispatch_wait_lock);
@@ -1125,6 +1136,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
return false;
}
+ atomic_inc(&sbq->ws_active);
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
__add_wait_queue(wq, wait);
@@ -1145,6 +1157,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
* someone else gets the wakeup.
*/
list_del_init(&wait->entry);
+ atomic_dec(&sbq->ws_active);
spin_unlock(&hctx->dispatch_wait_lock);
spin_unlock_irq(&wq->lock);
@@ -1707,11 +1720,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
unsigned int depth;
list_splice_init(&plug->mq_list, &list);
- plug->rq_count = 0;
if (plug->rq_count > 2 && plug->multiple_queues)
list_sort(NULL, &list, plug_rq_cmp);
+ plug->rq_count = 0;
+
this_q = NULL;
this_hctx = NULL;
this_ctx = NULL;
@@ -1796,74 +1810,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
return ret;
}
-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie,
- bool bypass, bool last)
+ bool bypass_insert, bool last)
{
struct request_queue *q = rq->q;
bool run_queue = true;
- blk_status_t ret = BLK_STS_RESOURCE;
- int srcu_idx;
- bool force = false;
- hctx_lock(hctx, &srcu_idx);
/*
- * hctx_lock is needed before checking quiesced flag.
+ * RCU or SRCU read lock is needed before checking quiesced flag.
*
- * When queue is stopped or quiesced, ignore 'bypass', insert
- * and return BLK_STS_OK to caller, and avoid driver to try to
- * dispatch again.
+ * When queue is stopped or quiesced, ignore 'bypass_insert' from
+ * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
+ * and avoid driver to try to dispatch again.
*/
- if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
+ if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
run_queue = false;
- bypass = false;
- goto out_unlock;
+ bypass_insert = false;
+ goto insert;
}
- if (unlikely(q->elevator && !bypass))
- goto out_unlock;
+ if (q->elevator && !bypass_insert)
+ goto insert;
if (!blk_mq_get_dispatch_budget(hctx))
- goto out_unlock;
+ goto insert;
if (!blk_mq_get_driver_tag(rq)) {
blk_mq_put_dispatch_budget(hctx);
- goto out_unlock;
+ goto insert;
}
- /*
- * Always add a request that has been through
- *.queue_rq() to the hardware dispatch list.
- */
- force = true;
- ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
-out_unlock:
+ return __blk_mq_issue_directly(hctx, rq, cookie, last);
+insert:
+ if (bypass_insert)
+ return BLK_STS_RESOURCE;
+
+ blk_mq_request_bypass_insert(rq, run_queue);
+ return BLK_STS_OK;
+}
+
+static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq, blk_qc_t *cookie)
+{
+ blk_status_t ret;
+ int srcu_idx;
+
+ might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+
+ hctx_lock(hctx, &srcu_idx);
+
+ ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
+ blk_mq_request_bypass_insert(rq, true);
+ else if (ret != BLK_STS_OK)
+ blk_mq_end_request(rq, ret);
+
+ hctx_unlock(hctx, srcu_idx);
+}
+
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
+{
+ blk_status_t ret;
+ int srcu_idx;
+ blk_qc_t unused_cookie;
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+
+ hctx_lock(hctx, &srcu_idx);
+ ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
hctx_unlock(hctx, srcu_idx);
- switch (ret) {
- case BLK_STS_OK:
- break;
- case BLK_STS_DEV_RESOURCE:
- case BLK_STS_RESOURCE:
- if (force) {
- blk_mq_request_bypass_insert(rq, run_queue);
- /*
- * We have to return BLK_STS_OK for the DM
- * to avoid livelock. Otherwise, we return
- * the real result to indicate whether the
- * request is direct-issued successfully.
- */
- ret = bypass ? BLK_STS_OK : ret;
- } else if (!bypass) {
- blk_mq_sched_insert_request(rq, false,
- run_queue, false);
- }
- break;
- default:
- if (!bypass)
- blk_mq_end_request(rq, ret);
- break;
- }
return ret;
}
@@ -1871,20 +1887,22 @@ out_unlock:
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list)
{
- blk_qc_t unused;
- blk_status_t ret = BLK_STS_OK;
-
while (!list_empty(list)) {
+ blk_status_t ret;
struct request *rq = list_first_entry(list, struct request,
queuelist);
list_del_init(&rq->queuelist);
- if (ret == BLK_STS_OK)
- ret = blk_mq_try_issue_directly(hctx, rq, &unused,
- false,
+ ret = blk_mq_request_issue_directly(rq, list_empty(list));
+ if (ret != BLK_STS_OK) {
+ if (ret == BLK_STS_RESOURCE ||
+ ret == BLK_STS_DEV_RESOURCE) {
+ blk_mq_request_bypass_insert(rq,
list_empty(list));
- else
- blk_mq_sched_insert_request(rq, false, true, false);
+ break;
+ }
+ blk_mq_end_request(rq, ret);
+ }
}
/*
@@ -1892,7 +1910,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
* the driver there was more coming, but that turned out to
* be a lie.
*/
- if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
+ if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
hctx->queue->mq_ops->commit_rqs(hctx);
}
@@ -1999,19 +2017,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
plug->rq_count--;
}
blk_add_rq_to_plug(plug, rq);
+ trace_block_plug(q);
blk_mq_put_ctx(data.ctx);
if (same_queue_rq) {
data.hctx = same_queue_rq->mq_hctx;
+ trace_block_unplug(q, 1, true);
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
- &cookie, false, true);
+ &cookie);
}
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
!data.hctx->dispatch_busy)) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
- blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
+ blk_mq_try_issue_directly(data.hctx, rq, &cookie);
} else {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
@@ -2044,7 +2064,7 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
list_del_init(&page->lru);
/*
* Remove kmemleak object previously allocated in
- * blk_mq_init_rq_map().
+ * blk_mq_alloc_rqs().
*/
kmemleak_free(page_address(page));
__free_pages(page, page->private);
@@ -2069,7 +2089,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags;
int node;
- node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
+ node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
if (node == NUMA_NO_NODE)
node = set->numa_node;
@@ -2125,7 +2145,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
size_t rq_size, left;
int node;
- node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
+ node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
if (node == NUMA_NO_NODE)
node = set->numa_node;
@@ -2249,12 +2269,11 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
- if (hctx->flags & BLK_MQ_F_BLOCKING)
- cleanup_srcu_struct(hctx->srcu);
-
blk_mq_remove_cpuhp(hctx);
- blk_free_flush_queue(hctx->fq);
- sbitmap_free(&hctx->ctx_map);
+
+ spin_lock(&q->unused_hctx_lock);
+ list_add(&hctx->hctx_list, &q->unused_hctx_list);
+ spin_unlock(&q->unused_hctx_lock);
}
static void blk_mq_exit_hw_queues(struct request_queue *q,
@@ -2271,15 +2290,65 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
}
}
+static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
+{
+ int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
+
+ BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
+ __alignof__(struct blk_mq_hw_ctx)) !=
+ sizeof(struct blk_mq_hw_ctx));
+
+ if (tag_set->flags & BLK_MQ_F_BLOCKING)
+ hw_ctx_size += sizeof(struct srcu_struct);
+
+ return hw_ctx_size;
+}
+
static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
{
- int node;
+ hctx->queue_num = hctx_idx;
+
+ cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
+
+ hctx->tags = set->tags[hctx_idx];
+
+ if (set->ops->init_hctx &&
+ set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
+ goto unregister_cpu_notifier;
+
+ if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx,
+ hctx->numa_node))
+ goto exit_hctx;
+ return 0;
- node = hctx->numa_node;
+ exit_hctx:
+ if (set->ops->exit_hctx)
+ set->ops->exit_hctx(hctx, hctx_idx);
+ unregister_cpu_notifier:
+ blk_mq_remove_cpuhp(hctx);
+ return -1;
+}
+
+static struct blk_mq_hw_ctx *
+blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
+ int node)
+{
+ struct blk_mq_hw_ctx *hctx;
+ gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
+
+ hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
+ if (!hctx)
+ goto fail_alloc_hctx;
+
+ if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
+ goto free_hctx;
+
+ atomic_set(&hctx->nr_active, 0);
if (node == NUMA_NO_NODE)
- node = hctx->numa_node = set->numa_node;
+ node = set->numa_node;
+ hctx->numa_node = node;
INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
spin_lock_init(&hctx->lock);
@@ -2287,58 +2356,47 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->queue = q;
hctx->flags = set->flags & ~BLK_MQ_F_TAG_SHARED;
- cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
-
- hctx->tags = set->tags[hctx_idx];
+ INIT_LIST_HEAD(&hctx->hctx_list);
/*
* Allocate space for all possible cpus to avoid allocation at
* runtime
*/
hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
+ gfp, node);
if (!hctx->ctxs)
- goto unregister_cpu_notifier;
+ goto free_cpumask;
if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
+ gfp, node))
goto free_ctxs;
-
hctx->nr_ctx = 0;
spin_lock_init(&hctx->dispatch_wait_lock);
init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake);
INIT_LIST_HEAD(&hctx->dispatch_wait.entry);
- if (set->ops->init_hctx &&
- set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
- goto free_bitmap;
-
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
+ gfp);
if (!hctx->fq)
- goto exit_hctx;
-
- if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
- goto free_fq;
+ goto free_bitmap;
if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(hctx->srcu);
+ blk_mq_hctx_kobj_init(hctx);
- return 0;
+ return hctx;
- free_fq:
- kfree(hctx->fq);
- exit_hctx:
- if (set->ops->exit_hctx)
- set->ops->exit_hctx(hctx, hctx_idx);
free_bitmap:
sbitmap_free(&hctx->ctx_map);
free_ctxs:
kfree(hctx->ctxs);
- unregister_cpu_notifier:
- blk_mq_remove_cpuhp(hctx);
- return -1;
+ free_cpumask:
+ free_cpumask_var(hctx->cpumask);
+ free_hctx:
+ kfree(hctx);
+ fail_alloc_hctx:
+ return NULL;
}
static void blk_mq_init_cpu_queues(struct request_queue *q,
@@ -2424,7 +2482,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
- hctx_idx = set->map[0].mq_map[i];
+ hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
@@ -2434,16 +2492,19 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
- set->map[0].mq_map[i] = 0;
+ set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0;
}
ctx = per_cpu_ptr(q->queue_ctx, i);
for (j = 0; j < set->nr_maps; j++) {
- if (!set->map[j].nr_queues)
+ if (!set->map[j].nr_queues) {
+ ctx->hctxs[j] = blk_mq_map_queue_type(q,
+ HCTX_TYPE_DEFAULT, i);
continue;
+ }
hctx = blk_mq_map_queue_type(q, j, i);
-
+ ctx->hctxs[j] = hctx;
/*
* If the CPU is already set in the mask, then we've
* mapped this one already. This can happen if
@@ -2463,6 +2524,10 @@ static void blk_mq_map_swqueue(struct request_queue *q)
*/
BUG_ON(!hctx->nr_ctx);
}
+
+ for (; j < HCTX_MAX_TYPES; j++)
+ ctx->hctxs[j] = blk_mq_map_queue_type(q,
+ HCTX_TYPE_DEFAULT, i);
}
mutex_unlock(&q->sysfs_lock);
@@ -2606,13 +2671,17 @@ static int blk_mq_alloc_ctxs(struct request_queue *q)
*/
void blk_mq_release(struct request_queue *q)
{
- struct blk_mq_hw_ctx *hctx;
- unsigned int i;
+ struct blk_mq_hw_ctx *hctx, *next;
+ int i;
- /* hctx kobj stays in hctx */
- queue_for_each_hw_ctx(q, hctx, i) {
- if (!hctx)
- continue;
+ cancel_delayed_work_sync(&q->requeue_work);
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
+
+ /* all hctx are in .unused_hctx_list now */
+ list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) {
+ list_del_init(&hctx->hctx_list);
kobject_put(&hctx->kobj);
}
@@ -2675,51 +2744,38 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
}
EXPORT_SYMBOL(blk_mq_init_sq_queue);
-static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
-{
- int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
-
- BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
- __alignof__(struct blk_mq_hw_ctx)) !=
- sizeof(struct blk_mq_hw_ctx));
-
- if (tag_set->flags & BLK_MQ_F_BLOCKING)
- hw_ctx_size += sizeof(struct srcu_struct);
-
- return hw_ctx_size;
-}
-
static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
struct blk_mq_tag_set *set, struct request_queue *q,
int hctx_idx, int node)
{
- struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_hw_ctx *hctx = NULL, *tmp;
- hctx = kzalloc_node(blk_mq_hw_ctx_size(set),
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
- node);
- if (!hctx)
- return NULL;
-
- if (!zalloc_cpumask_var_node(&hctx->cpumask,
- GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
- node)) {
- kfree(hctx);
- return NULL;
+ /* reuse dead hctx first */
+ spin_lock(&q->unused_hctx_lock);
+ list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) {
+ if (tmp->numa_node == node) {
+ hctx = tmp;
+ break;
+ }
}
+ if (hctx)
+ list_del_init(&hctx->hctx_list);
+ spin_unlock(&q->unused_hctx_lock);
- atomic_set(&hctx->nr_active, 0);
- hctx->numa_node = node;
- hctx->queue_num = hctx_idx;
+ if (!hctx)
+ hctx = blk_mq_alloc_hctx(q, set, node);
+ if (!hctx)
+ goto fail;
- if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) {
- free_cpumask_var(hctx->cpumask);
- kfree(hctx);
- return NULL;
- }
- blk_mq_hctx_kobj_init(hctx);
+ if (blk_mq_init_hctx(q, set, hctx, hctx_idx))
+ goto free_hctx;
return hctx;
+
+ free_hctx:
+ kobject_put(&hctx->kobj);
+ fail:
+ return NULL;
}
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
@@ -2734,7 +2790,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int node;
struct blk_mq_hw_ctx *hctx;
- node = blk_mq_hw_queue_to_node(&set->map[0], i);
+ node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i);
/*
* If the hw queue has been mapped to another numa node,
* we need to realloc the hctx. If allocation fails, fallback
@@ -2745,10 +2801,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
if (hctx) {
- if (hctxs[i]) {
+ if (hctxs[i])
blk_mq_exit_hctx(q, set, hctxs[i], i);
- kobject_put(&hctxs[i]->kobj);
- }
hctxs[i] = hctx;
} else {
if (hctxs[i])
@@ -2779,9 +2833,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
if (hctx->tags)
blk_mq_free_map_and_requests(set, j);
blk_mq_exit_hctx(q, set, hctx, j);
- kobject_put(&hctx->kobj);
hctxs[j] = NULL;
-
}
}
mutex_unlock(&q->sysfs_lock);
@@ -2813,7 +2865,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
goto err_exit;
if (blk_mq_alloc_ctxs(q))
- goto err_exit;
+ goto err_poll;
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
@@ -2824,6 +2876,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!q->queue_hw_ctx)
goto err_sys_init;
+ INIT_LIST_HEAD(&q->unused_hctx_list);
+ spin_lock_init(&q->unused_hctx_lock);
+
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
goto err_hctxs;
@@ -2838,9 +2893,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
set->map[HCTX_TYPE_POLL].nr_queues)
blk_queue_flag_set(QUEUE_FLAG_POLL, q);
- if (!(set->flags & BLK_MQ_F_SG_MERGE))
- blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
-
q->sg_reserved_size = INT_MAX;
INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work);
@@ -2857,7 +2909,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
/*
* Default to classic polling
*/
- q->poll_nsec = -1;
+ q->poll_nsec = BLK_MQ_POLL_CLASSIC;
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_add_queue_tag_set(set, q);
@@ -2877,13 +2929,17 @@ err_hctxs:
kfree(q->queue_hw_ctx);
err_sys_init:
blk_mq_sysfs_deinit(q);
+err_poll:
+ blk_stat_free_callback(q->poll_cb);
+ q->poll_cb = NULL;
err_exit:
q->mq_ops = NULL;
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
-void blk_mq_free_queue(struct request_queue *q)
+/* tags can _not_ be used after returning from blk_mq_exit_queue */
+void blk_mq_exit_queue(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
@@ -2968,7 +3024,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
return set->ops->map_queues(set);
} else {
BUG_ON(set->nr_maps > 1);
- return blk_mq_map_queues(&set->map[0]);
+ return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
}
}
@@ -3090,6 +3146,9 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
if (!set)
return -EINVAL;
+ if (q->nr_requests == nr)
+ return 0;
+
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
@@ -3110,6 +3169,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
}
if (ret)
break;
+ if (q->elevator && q->elevator->type->ops.depth_updated)
+ q->elevator->type->ops.depth_updated(hctx);
}
if (!ret)
@@ -3235,7 +3296,7 @@ fallback:
pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
nr_hw_queues, prev_nr_hw_queues);
set->nr_hw_queues = prev_nr_hw_queues;
- blk_mq_map_queues(&set->map[0]);
+ blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
goto fallback;
}
blk_mq_map_swqueue(q);
@@ -3389,7 +3450,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
{
struct request *rq;
- if (q->poll_nsec == -1)
+ if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
return false;
if (!blk_qc_t_is_internal(cookie))
diff --git a/block/blk-mq.h b/block/blk-mq.h
index d0b3dd54ef8d..633a5a77ee8b 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -23,6 +23,7 @@ struct blk_mq_ctx {
unsigned int cpu;
unsigned short index_hw[HCTX_MAX_TYPES];
+ struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES];
/* incremented at dispatch time */
unsigned long rq_dispatched[2];
@@ -36,10 +37,12 @@ struct blk_mq_ctx {
struct kobject kobj;
} ____cacheline_aligned_in_smp;
-void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_exit_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+ bool kick_requeue_list);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_get_driver_tag(struct request *rq);
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
@@ -67,10 +70,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
-blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
- struct request *rq,
- blk_qc_t *cookie,
- bool bypass, bool last);
+/* Used by blk_insert_cloned_request() to issue request directly */
+blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list);
@@ -96,26 +97,23 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
* @q: request queue
* @flags: request command flags
- * @cpu: CPU
+ * @cpu: cpu ctx
*/
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
unsigned int flags,
- unsigned int cpu)
+ struct blk_mq_ctx *ctx)
{
enum hctx_type type = HCTX_TYPE_DEFAULT;
- if ((flags & REQ_HIPRI) &&
- q->tag_set->nr_maps > HCTX_TYPE_POLL &&
- q->tag_set->map[HCTX_TYPE_POLL].nr_queues &&
- test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ /*
+ * The caller ensure that if REQ_HIPRI, poll must be enabled.
+ */
+ if (flags & REQ_HIPRI)
type = HCTX_TYPE_POLL;
-
- else if (((flags & REQ_OP_MASK) == REQ_OP_READ) &&
- q->tag_set->nr_maps > HCTX_TYPE_READ &&
- q->tag_set->map[HCTX_TYPE_READ].nr_queues)
+ else if ((flags & REQ_OP_MASK) == REQ_OP_READ)
type = HCTX_TYPE_READ;
- return blk_mq_map_queue_type(q, type, cpu);
+ return ctx->hctxs[type];
}
/*
@@ -224,15 +222,6 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
}
}
-static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
-{
- if (rq->tag == -1 || rq->internal_tag == -1)
- return;
-
- __blk_mq_put_driver_tag(hctx, rq);
-}
-
static inline void blk_mq_put_driver_tag(struct request *rq)
{
if (rq->tag == -1 || rq->internal_tag == -1)
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index d169d7188fa6..659ccb8b693f 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -1,3 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
#include "blk-rq-qos.h"
/*
@@ -207,9 +209,10 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
/**
* rq_qos_wait - throttle on a rqw if we need to
- * @private_data - caller provided specific data
- * @acquire_inflight_cb - inc the rqw->inflight counter if we can
- * @cleanup_cb - the callback to cleanup in case we race with a waker
+ * @rqw: rqw to throttle on
+ * @private_data: caller provided specific data
+ * @acquire_inflight_cb: inc the rqw->inflight counter if we can
+ * @cleanup_cb: the callback to cleanup in case we race with a waker
*
* This provides a uniform place for the rq_qos users to do their throttling.
* Since you can end up with a lot of things sleeping at once, this manages the
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 564851889550..2300e038b9fa 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef RQ_QOS_H
#define RQ_QOS_H
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 3e7038e475ee..2ae348c101a0 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions related to setting various queue properties from drivers
*/
@@ -309,6 +310,9 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
__func__, max_size);
}
+ /* see blk_queue_virt_boundary() for the explanation */
+ WARN_ON_ONCE(q->limits.virt_boundary_mask);
+
q->limits.max_segment_size = max_size;
}
EXPORT_SYMBOL(blk_queue_max_segment_size);
@@ -663,22 +667,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
EXPORT_SYMBOL(disk_stack_limits);
/**
- * blk_queue_dma_pad - set pad mask
- * @q: the request queue for the device
- * @mask: pad mask
- *
- * Set dma pad mask.
- *
- * Appending pad buffer to a request modifies the last entry of a
- * scatter list such that it includes the pad buffer.
- **/
-void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
-{
- q->dma_pad_mask = mask;
-}
-EXPORT_SYMBOL(blk_queue_dma_pad);
-
-/**
* blk_queue_update_dma_pad - update pad mask
* @q: the request queue for the device
* @mask: pad mask
@@ -757,6 +745,14 @@ EXPORT_SYMBOL(blk_queue_segment_boundary);
void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
{
q->limits.virt_boundary_mask = mask;
+
+ /*
+ * Devices that require a virtual boundary do not support scatter/gather
+ * I/O natively, but instead require a descriptor list entry for each
+ * page (which might not be idential to the Linux PAGE_SIZE). Because
+ * of that they are not limited by our notion of "segment size".
+ */
+ q->limits.max_segment_size = UINT_MAX;
}
EXPORT_SYMBOL(blk_queue_virt_boundary);
@@ -799,15 +795,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
-void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
-{
- if (queueable)
- blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
- else
- blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
-}
-EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
-
/**
* blk_set_queue_depth - tell the block layer about the device queue depth
* @q: the request queue for the device
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 696a04176e4d..940f15d600f8 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block stat tracking code
*
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 590d1ef2f961..977c659dcd18 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -360,8 +360,8 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
{
int val;
- if (q->poll_nsec == -1)
- val = -1;
+ if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
+ val = BLK_MQ_POLL_CLASSIC;
else
val = q->poll_nsec / 1000;
@@ -380,10 +380,12 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
if (err < 0)
return err;
- if (val == -1)
- q->poll_nsec = -1;
- else
+ if (val == BLK_MQ_POLL_CLASSIC)
+ q->poll_nsec = BLK_MQ_POLL_CLASSIC;
+ else if (val >= 0)
q->poll_nsec = val * 1000;
+ else
+ return -EINVAL;
return count;
}
@@ -468,6 +470,9 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
else if (val >= 0)
val *= 1000ULL;
+ if (wbt_get_min_lat(q) == val)
+ return count;
+
/*
* Ensure that the queue is idled, in case the latency update
* ends up either enabling or disabling wbt completely. We can't
@@ -723,7 +728,7 @@ static struct queue_sysfs_entry throtl_sample_time_entry = {
};
#endif
-static struct attribute *default_attrs[] = {
+static struct attribute *queue_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
&queue_max_hw_sectors_entry.attr,
@@ -765,6 +770,25 @@ static struct attribute *default_attrs[] = {
NULL,
};
+static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
+
+ if (attr == &queue_io_timeout_entry.attr &&
+ (!q->mq_ops || !q->mq_ops->timeout))
+ return 0;
+
+ return attr->mode;
+}
+
+static struct attribute_group queue_attr_group = {
+ .attrs = queue_attrs,
+ .is_visible = queue_attr_visible,
+};
+
+
#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
static ssize_t
@@ -816,22 +840,47 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
kmem_cache_free(blk_requestq_cachep, q);
}
+/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
+static void blk_exit_queue(struct request_queue *q)
+{
+ /*
+ * Since the I/O scheduler exit code may access cgroup information,
+ * perform I/O scheduler exit before disassociating from the block
+ * cgroup controller.
+ */
+ if (q->elevator) {
+ ioc_clear_queue(q);
+ __elevator_exit(q, q->elevator);
+ q->elevator = NULL;
+ }
+
+ /*
+ * Remove all references to @q from the block cgroup controller before
+ * restoring @q->queue_lock to avoid that restoring this pointer causes
+ * e.g. blkcg_print_blkgs() to crash.
+ */
+ blkcg_exit_queue(q);
+
+ /*
+ * Since the cgroup code may dereference the @q->backing_dev_info
+ * pointer, only decrease its reference count after having removed the
+ * association with the block cgroup controller.
+ */
+ bdi_put(q->backing_dev_info);
+}
+
+
/**
- * __blk_release_queue - release a request queue when it is no longer needed
+ * __blk_release_queue - release a request queue
* @work: pointer to the release_work member of the request queue to be released
*
* Description:
- * blk_release_queue is the counterpart of blk_init_queue(). It should be
- * called when a request queue is being released; typically when a block
- * device is being de-registered. Its primary task it to free the queue
- * itself.
- *
- * Notes:
- * The low level driver must have finished any outstanding requests first
- * via blk_cleanup_queue().
- *
- * Although blk_release_queue() may be called with preemption disabled,
- * __blk_release_queue() may sleep.
+ * This function is called when a block device is being unregistered. The
+ * process of releasing a request queue starts with blk_cleanup_queue, which
+ * set the appropriate flags and then calls blk_put_queue, that decrements
+ * the reference counter of the request queue. Once the reference counter
+ * of the request queue reaches zero, blk_release_queue is called to release
+ * all allocated resources of the request queue.
*/
static void __blk_release_queue(struct work_struct *work)
{
@@ -841,23 +890,10 @@ static void __blk_release_queue(struct work_struct *work)
blk_stat_remove_callback(q, q->poll_cb);
blk_stat_free_callback(q->poll_cb);
- if (!blk_queue_dead(q)) {
- /*
- * Last reference was dropped without having called
- * blk_cleanup_queue().
- */
- WARN_ONCE(blk_queue_init_done(q),
- "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n",
- q);
- blk_exit_queue(q);
- }
-
- WARN(blk_queue_root_blkg(q),
- "request queue %p is being released but it has not yet been removed from the blkcg controller\n",
- q);
-
blk_free_queue_stats(q->stats);
+ blk_exit_queue(q);
+
blk_queue_free_zone_bitmaps(q);
if (queue_is_mq(q))
@@ -890,7 +926,6 @@ static const struct sysfs_ops queue_sysfs_ops = {
struct kobj_type blk_queue_ktype = {
.sysfs_ops = &queue_sysfs_ops,
- .default_attrs = default_attrs,
.release = blk_release_queue,
};
@@ -939,6 +974,14 @@ int blk_register_queue(struct gendisk *disk)
goto unlock;
}
+ ret = sysfs_create_group(&q->kobj, &queue_attr_group);
+ if (ret) {
+ blk_trace_remove_sysfs(dev);
+ kobject_del(&q->kobj);
+ kobject_put(&dev->kobj);
+ goto unlock;
+ }
+
if (queue_is_mq(q)) {
__blk_mq_register_dev(dev, q);
blk_mq_debugfs_register(q);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 1b97a73d2fb1..9ea7c0ecad10 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1220,7 +1220,7 @@ static bool throtl_can_upgrade(struct throtl_data *td,
struct throtl_grp *this_tg);
/**
* throtl_pending_timer_fn - timer function for service_queue->pending_timer
- * @arg: the throtl_service_queue being serviced
+ * @t: the pending_timer member of the throtl_service_queue being serviced
*
* This timer is armed when a child throtl_grp with active bio's become
* pending and queued on the service_queue's pending_tree and expires when
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 124c26128bf6..8aa68fae96ad 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions related to generic timeout handling of requests.
*/
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index fd166fbb0f65..313f45a37e9d 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* buffered writeback throttling. loosely based on CoDel. We can't drop
* packets for IO scheduling, so the logic is something like this:
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 2d98803faec2..ae7e91bd0618 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Zoned block device handling
*
diff --git a/block/blk.h b/block/blk.h
index 17867ce71696..231fb77ef910 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -6,6 +6,7 @@
#include <linux/blk-mq.h>
#include <xen/xen.h>
#include "blk-mq.h"
+#include "blk-mq-sched.h"
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
@@ -38,7 +39,7 @@ extern struct ida blk_queue_ida;
static inline struct blk_flush_queue *
blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
{
- return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx->cpu)->fq;
+ return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
}
static inline void __blk_get_queue(struct request_queue *q)
@@ -50,7 +51,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
-void blk_exit_queue(struct request_queue *q);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_freeze_queue(struct request_queue *q);
@@ -75,7 +75,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
if (addr1 + vec1->bv_len != addr2)
return false;
- if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2))
+ if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
return false;
if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
return false;
@@ -177,10 +177,17 @@ void blk_insert_flush(struct request *rq);
int elevator_init_mq(struct request_queue *q);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e);
-void elevator_exit(struct request_queue *, struct elevator_queue *);
+void __elevator_exit(struct request_queue *, struct elevator_queue *);
int elv_register_queue(struct request_queue *q);
void elv_unregister_queue(struct request_queue *q);
+static inline void elevator_exit(struct request_queue *q,
+ struct elevator_queue *e)
+{
+ blk_mq_sched_free_requests(q);
+ __elevator_exit(q, e);
+}
+
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
#ifdef CONFIG_FAIL_IO_TIMEOUT
diff --git a/block/bounce.c b/block/bounce.c
index ffb9e9ecfa7e..f8ed677a1bf7 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -163,13 +163,13 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
{
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, orig_vec;
- int i;
struct bvec_iter orig_iter = bio_orig->bi_iter;
+ struct bvec_iter_all iter_all;
/*
* free up bounce indirect pages used
*/
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter_all) {
orig_vec = bio_iter_iovec(bio_orig, orig_iter);
if (bvec->bv_page != orig_vec.bv_page) {
dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
@@ -313,7 +313,12 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
bio = bounce_clone_bio(*bio_orig, GFP_NOIO, passthrough ? NULL :
&bounce_bio_set);
- bio_for_each_segment_all(to, bio, i) {
+ /*
+ * Bvec table can't be updated by bio_for_each_segment_all(),
+ * so retrieve bvec from the table directly. This way is safe
+ * because the 'bio' is single-page bvec.
+ */
+ for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
struct page *page = to->bv_page;
if (page_to_pfn(page) <= q->limits.bounce_pfn)
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 192129856342..785dd58947f1 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -1,24 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BSG helper library
*
* Copyright (C) 2008 James Smart, Emulex Corporation
* Copyright (C) 2011 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011 Mike Christie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#include <linux/slab.h>
#include <linux/blk-mq.h>
@@ -51,11 +37,40 @@ static int bsg_transport_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
fmode_t mode)
{
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
+ int ret;
job->request_len = hdr->request_len;
job->request = memdup_user(uptr64(hdr->request), hdr->request_len);
+ if (IS_ERR(job->request))
+ return PTR_ERR(job->request);
+
+ if (hdr->dout_xfer_len && hdr->din_xfer_len) {
+ job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0);
+ if (IS_ERR(job->bidi_rq)) {
+ ret = PTR_ERR(job->bidi_rq);
+ goto out;
+ }
+
+ ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL,
+ uptr64(hdr->din_xferp), hdr->din_xfer_len,
+ GFP_KERNEL);
+ if (ret)
+ goto out_free_bidi_rq;
- return PTR_ERR_OR_ZERO(job->request);
+ job->bidi_bio = job->bidi_rq->bio;
+ } else {
+ job->bidi_rq = NULL;
+ job->bidi_bio = NULL;
+ }
+
+ return 0;
+
+out_free_bidi_rq:
+ if (job->bidi_rq)
+ blk_put_request(job->bidi_rq);
+out:
+ kfree(job->request);
+ return ret;
}
static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
@@ -93,7 +108,7 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
/* we assume all request payload was transferred, residual == 0 */
hdr->dout_resid = 0;
- if (rq->next_rq) {
+ if (job->bidi_rq) {
unsigned int rsp_len = job->reply_payload.payload_len;
if (WARN_ON(job->reply_payload_rcv_len > rsp_len))
@@ -111,6 +126,11 @@ static void bsg_transport_free_rq(struct request *rq)
{
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
+ if (job->bidi_rq) {
+ blk_rq_unmap_user(job->bidi_bio);
+ blk_put_request(job->bidi_rq);
+ }
+
kfree(job->request);
}
@@ -200,7 +220,6 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
*/
static bool bsg_prepare_job(struct device *dev, struct request *req)
{
- struct request *rsp = req->next_rq;
struct bsg_job *job = blk_mq_rq_to_pdu(req);
int ret;
@@ -211,8 +230,8 @@ static bool bsg_prepare_job(struct device *dev, struct request *req)
if (ret)
goto failjob_rls_job;
}
- if (rsp && rsp->bio) {
- ret = bsg_map_buffer(&job->reply_payload, rsp);
+ if (job->bidi_rq) {
+ ret = bsg_map_buffer(&job->reply_payload, job->bidi_rq);
if (ret)
goto failjob_rls_rqst_payload;
}
@@ -335,6 +354,7 @@ static const struct blk_mq_ops bsg_mq_ops = {
* @dev: device to attach bsg device to
* @name: device to give bsg device
* @job_fn: bsg job handler
+ * @timeout: timeout handler function pointer
* @dd_job_size: size of LLD data needed for each job
*/
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
@@ -369,7 +389,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
}
q->queuedata = dev;
- blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
diff --git a/block/bsg.c b/block/bsg.c
index 50e5f8f666f2..833c44b3d458 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -1,13 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bsg.c - block layer implementation of the sg v4 interface
- *
- * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
- * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License version 2. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -74,6 +67,11 @@ static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
{
struct scsi_request *sreq = scsi_req(rq);
+ if (hdr->dout_xfer_len && hdr->din_xfer_len) {
+ pr_warn_once("BIDI support in bsg has been removed.\n");
+ return -EOPNOTSUPP;
+ }
+
sreq->cmd_len = hdr->request_len;
if (sreq->cmd_len > BLK_MAX_CDB) {
sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
@@ -114,14 +112,10 @@ static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
hdr->response_len = len;
}
- if (rq->next_rq) {
- hdr->dout_resid = sreq->resid_len;
- hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
- } else if (rq_data_dir(rq) == READ) {
+ if (rq_data_dir(rq) == READ)
hdr->din_resid = sreq->resid_len;
- } else {
+ else
hdr->dout_resid = sreq->resid_len;
- }
return ret;
}
@@ -138,32 +132,35 @@ static const struct bsg_ops bsg_scsi_ops = {
.free_rq = bsg_scsi_free_rq,
};
-static struct request *
-bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
+static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
{
- struct request *rq, *next_rq = NULL;
+ struct request *rq;
+ struct bio *bio;
+ struct sg_io_v4 hdr;
int ret;
- if (!q->bsg_dev.class_dev)
- return ERR_PTR(-ENXIO);
+ if (copy_from_user(&hdr, uarg, sizeof(hdr)))
+ return -EFAULT;
- if (hdr->guard != 'Q')
- return ERR_PTR(-EINVAL);
+ if (!q->bsg_dev.class_dev)
+ return -ENXIO;
- ret = q->bsg_dev.ops->check_proto(hdr);
+ if (hdr.guard != 'Q')
+ return -EINVAL;
+ ret = q->bsg_dev.ops->check_proto(&hdr);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- rq = blk_get_request(q, hdr->dout_xfer_len ?
+ rq = blk_get_request(q, hdr.dout_xfer_len ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
if (IS_ERR(rq))
- return rq;
+ return PTR_ERR(rq);
- ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode);
+ ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
if (ret)
- goto out;
+ return ret;
- rq->timeout = msecs_to_jiffies(hdr->timeout);
+ rq->timeout = msecs_to_jiffies(hdr.timeout);
if (!rq->timeout)
rq->timeout = q->sg_timeout;
if (!rq->timeout)
@@ -171,68 +168,28 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
- if (hdr->dout_xfer_len && hdr->din_xfer_len) {
- if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- pr_warn_once(
- "BIDI support in bsg has been deprecated and might be removed. "
- "Please report your use case to linux-scsi@vger.kernel.org\n");
-
- next_rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
- if (IS_ERR(next_rq)) {
- ret = PTR_ERR(next_rq);
- goto out;
- }
-
- rq->next_rq = next_rq;
- ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp),
- hdr->din_xfer_len, GFP_KERNEL);
- if (ret)
- goto out_free_nextrq;
- }
-
- if (hdr->dout_xfer_len) {
- ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp),
- hdr->dout_xfer_len, GFP_KERNEL);
- } else if (hdr->din_xfer_len) {
- ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
- hdr->din_xfer_len, GFP_KERNEL);
+ if (hdr.dout_xfer_len) {
+ ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp),
+ hdr.dout_xfer_len, GFP_KERNEL);
+ } else if (hdr.din_xfer_len) {
+ ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp),
+ hdr.din_xfer_len, GFP_KERNEL);
}
if (ret)
- goto out_unmap_nextrq;
- return rq;
-
-out_unmap_nextrq:
- if (rq->next_rq)
- blk_rq_unmap_user(rq->next_rq->bio);
-out_free_nextrq:
- if (rq->next_rq)
- blk_put_request(rq->next_rq);
-out:
- q->bsg_dev.ops->free_rq(rq);
- blk_put_request(rq);
- return ERR_PTR(ret);
-}
+ goto out_free_rq;
-static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
- struct bio *bio, struct bio *bidi_bio)
-{
- int ret;
-
- ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr);
-
- if (rq->next_rq) {
- blk_rq_unmap_user(bidi_bio);
- blk_put_request(rq->next_rq);
- }
+ bio = rq->bio;
+ blk_execute_rq(q, NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL));
+ ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr);
blk_rq_unmap_user(bio);
+
+out_free_rq:
rq->q->bsg_dev.ops->free_rq(rq);
blk_put_request(rq);
+ if (!ret && copy_to_user(uarg, &hdr, sizeof(hdr)))
+ return -EFAULT;
return ret;
}
@@ -367,31 +324,39 @@ static int bsg_release(struct inode *inode, struct file *file)
return bsg_put_device(bd);
}
+static int bsg_get_command_q(struct bsg_device *bd, int __user *uarg)
+{
+ return put_user(bd->max_queue, uarg);
+}
+
+static int bsg_set_command_q(struct bsg_device *bd, int __user *uarg)
+{
+ int queue;
+
+ if (get_user(queue, uarg))
+ return -EFAULT;
+ if (queue < 1)
+ return -EINVAL;
+
+ spin_lock_irq(&bd->lock);
+ bd->max_queue = queue;
+ spin_unlock_irq(&bd->lock);
+ return 0;
+}
+
static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct bsg_device *bd = file->private_data;
- int __user *uarg = (int __user *) arg;
- int ret;
+ void __user *uarg = (void __user *) arg;
switch (cmd) {
- /*
- * our own ioctls
- */
+ /*
+ * Our own ioctls
+ */
case SG_GET_COMMAND_Q:
- return put_user(bd->max_queue, uarg);
- case SG_SET_COMMAND_Q: {
- int queue;
-
- if (get_user(queue, uarg))
- return -EFAULT;
- if (queue < 1)
- return -EINVAL;
-
- spin_lock_irq(&bd->lock);
- bd->max_queue = queue;
- spin_unlock_irq(&bd->lock);
- return 0;
- }
+ return bsg_get_command_q(bd, uarg);
+ case SG_SET_COMMAND_Q:
+ return bsg_set_command_q(bd, uarg);
/*
* SCSI/sg ioctls
@@ -404,36 +369,10 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case SG_GET_RESERVED_SIZE:
case SG_SET_RESERVED_SIZE:
case SG_EMULATED_HOST:
- case SCSI_IOCTL_SEND_COMMAND: {
- void __user *uarg = (void __user *) arg;
+ case SCSI_IOCTL_SEND_COMMAND:
return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
- }
- case SG_IO: {
- struct request *rq;
- struct bio *bio, *bidi_bio = NULL;
- struct sg_io_v4 hdr;
- int at_head;
-
- if (copy_from_user(&hdr, uarg, sizeof(hdr)))
- return -EFAULT;
-
- rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- bio = rq->bio;
- if (rq->next_rq)
- bidi_bio = rq->next_rq->bio;
-
- at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
- blk_execute_rq(bd->queue, NULL, rq, at_head);
- ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
-
- if (copy_to_user(uarg, &hdr, sizeof(hdr)))
- return -EFAULT;
-
- return ret;
- }
+ case SG_IO:
+ return bsg_sg_io(bd->queue, file->f_mode, uarg);
default:
return -ENOTTY;
}
diff --git a/block/elevator.c b/block/elevator.c
index f05e90d4e695..2f17d66d0e61 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Block device elevator/IO-scheduler.
*
@@ -177,7 +178,7 @@ static void elevator_release(struct kobject *kobj)
kfree(e);
}
-void elevator_exit(struct request_queue *q, struct elevator_queue *e)
+void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
{
mutex_lock(&e->sysfs_lock);
if (e->type->ops.exit_sched)
@@ -509,8 +510,6 @@ void elv_unregister_queue(struct request_queue *q)
int elv_register(struct elevator_type *e)
{
- char *def = "";
-
/* create icq_cache if requested */
if (e->icq_size) {
if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
@@ -535,8 +534,8 @@ int elv_register(struct elevator_type *e)
list_add_tail(&e->list, &elv_list);
spin_unlock(&elv_list_lock);
- printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
- def);
+ printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name);
+
return 0;
}
EXPORT_SYMBOL_GPL(elv_register);
@@ -667,8 +666,11 @@ static int __elevator_change(struct request_queue *q, const char *name)
/*
* Special case for mq, turn off scheduling
*/
- if (!strncmp(name, "none", 4))
+ if (!strncmp(name, "none", 4)) {
+ if (!q->elevator)
+ return 0;
return elevator_switch(q, NULL);
+ }
strlcpy(elevator_name, name, sizeof(elevator_name));
e = elevator_get(q, strstrip(elevator_name), true);
diff --git a/block/genhd.c b/block/genhd.c
index 1dd8fd6613b8..24654e1d83e6 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* gendisk handling
*/
@@ -365,8 +366,8 @@ int register_blkdev(unsigned int major, const char *name)
}
if (index == 0) {
- printk("register_blkdev: failed to get major for %s\n",
- name);
+ printk("%s: failed to get major for %s\n",
+ __func__, name);
ret = -EBUSY;
goto out;
}
@@ -375,8 +376,8 @@ int register_blkdev(unsigned int major, const char *name)
}
if (major >= BLKDEV_MAJOR_MAX) {
- pr_err("register_blkdev: major requested (%u) is greater than the maximum (%u) for %s\n",
- major, BLKDEV_MAJOR_MAX-1, name);
+ pr_err("%s: major requested (%u) is greater than the maximum (%u) for %s\n",
+ __func__, major, BLKDEV_MAJOR_MAX-1, name);
ret = -EINVAL;
goto out;
@@ -531,6 +532,18 @@ void blk_free_devt(dev_t devt)
}
}
+/*
+ * We invalidate devt by assigning NULL pointer for devt in idr.
+ */
+void blk_invalidate_devt(dev_t devt)
+{
+ if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
+ spin_lock_bh(&ext_devt_lock);
+ idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
+ spin_unlock_bh(&ext_devt_lock);
+ }
+}
+
static char *bdevt_str(dev_t devt, char *buf)
{
if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
@@ -655,10 +668,12 @@ exit:
kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
disk_part_iter_exit(&piter);
- err = sysfs_create_link(&ddev->kobj,
- &disk->queue->backing_dev_info->dev->kobj,
- "bdi");
- WARN_ON(err);
+ if (disk->queue->backing_dev_info->dev) {
+ err = sysfs_create_link(&ddev->kobj,
+ &disk->queue->backing_dev_info->dev->kobj,
+ "bdi");
+ WARN_ON(err);
+ }
}
/**
@@ -791,6 +806,13 @@ void del_gendisk(struct gendisk *disk)
if (!(disk->flags & GENHD_FL_HIDDEN))
blk_unregister_region(disk_devt(disk), disk->minors);
+ /*
+ * Remove gendisk pointer from idr so that it cannot be looked up
+ * while RCU period before freeing gendisk is running to prevent
+ * use-after-free issues. Note that the device number stays
+ * "in-use" until we really free the gendisk.
+ */
+ blk_invalidate_devt(disk_devt(disk));
kobject_put(disk->part0.holder_dir);
kobject_put(disk->slave_dir);
@@ -1626,12 +1648,11 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
/*
* If device-specific poll interval is set, always use it. If
- * the default is being used, poll iff there are events which
- * can't be monitored asynchronously.
+ * the default is being used, poll if the POLL flag is set.
*/
if (ev->poll_msecs >= 0)
intv_msecs = ev->poll_msecs;
- else if (disk->events & ~disk->async_events)
+ else if (disk->event_flags & DISK_EVENT_FLAG_POLL)
intv_msecs = disk_events_dfl_poll_msecs;
return msecs_to_jiffies(intv_msecs);
@@ -1841,11 +1862,13 @@ static void disk_check_events(struct disk_events *ev,
/*
* Tell userland about new events. Only the events listed in
- * @disk->events are reported. Unlisted events are processed the
- * same internally but never get reported to userland.
+ * @disk->events are reported, and only if DISK_EVENT_FLAG_UEVENT
+ * is set. Otherwise, events are processed internally but never
+ * get reported to userland.
*/
for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
- if (events & disk->events & (1 << i))
+ if ((events & disk->events & (1 << i)) &&
+ (disk->event_flags & DISK_EVENT_FLAG_UEVENT))
envp[nr_events++] = disk_uevents[i];
if (nr_events)
@@ -1858,6 +1881,7 @@ static void disk_check_events(struct disk_events *ev,
*
* events : list of all supported events
* events_async : list of events which can be detected w/o polling
+ * (always empty, only for backwards compatibility)
* events_poll_msecs : polling interval, 0: disable, -1: system default
*/
static ssize_t __disk_events_show(unsigned int events, char *buf)
@@ -1882,15 +1906,16 @@ static ssize_t disk_events_show(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
+ if (!(disk->event_flags & DISK_EVENT_FLAG_UEVENT))
+ return 0;
+
return __disk_events_show(disk->events, buf);
}
static ssize_t disk_events_async_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct gendisk *disk = dev_to_disk(dev);
-
- return __disk_events_show(disk->async_events, buf);
+ return 0;
}
static ssize_t disk_events_poll_msecs_show(struct device *dev,
@@ -1899,6 +1924,9 @@ static ssize_t disk_events_poll_msecs_show(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
+ if (!disk->ev)
+ return sprintf(buf, "-1\n");
+
return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
}
@@ -1915,6 +1943,9 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev,
if (intv < 0 && intv != -1)
return -EINVAL;
+ if (!disk->ev)
+ return -ENODEV;
+
disk_block_events(disk);
disk->ev->poll_msecs = intv;
__disk_unblock_events(disk, true);
@@ -1979,7 +2010,7 @@ static void disk_alloc_events(struct gendisk *disk)
{
struct disk_events *ev;
- if (!disk->fops->check_events)
+ if (!disk->fops->check_events || !disk->events)
return;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -2001,14 +2032,14 @@ static void disk_alloc_events(struct gendisk *disk)
static void disk_add_events(struct gendisk *disk)
{
- if (!disk->ev)
- return;
-
/* FIXME: error handling */
if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
pr_warn("%s: failed to create sysfs files for events\n",
disk->disk_name);
+ if (!disk->ev)
+ return;
+
mutex_lock(&disk_events_mutex);
list_add_tail(&disk->ev->node, &disk_events);
mutex_unlock(&disk_events_mutex);
@@ -2022,14 +2053,13 @@ static void disk_add_events(struct gendisk *disk)
static void disk_del_events(struct gendisk *disk)
{
- if (!disk->ev)
- return;
-
- disk_block_events(disk);
+ if (disk->ev) {
+ disk_block_events(disk);
- mutex_lock(&disk_events_mutex);
- list_del_init(&disk->ev->node);
- mutex_unlock(&disk_events_mutex);
+ mutex_lock(&disk_events_mutex);
+ list_del_init(&disk->ev->node);
+ mutex_unlock(&disk_events_mutex);
+ }
sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
}
diff --git a/block/ioctl.c b/block/ioctl.c
index 4825c78a6baa..15a0eb80ada9 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/capability.h>
#include <linux/blkdev.h>
#include <linux/export.h>
diff --git a/block/ioprio.c b/block/ioprio.c
index f9821080c92c..2e0559f157c8 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* fs/ioprio.c
*
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index ec6a04e01bc1..c3b05119cebd 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -1,20 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* The Kyber I/O scheduler. Controls latency by throttling queue depths using
* scalable techniques.
*
* Copyright (C) 2017 Facebook
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 14288f864e94..1876f5712bfd 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
* for the blk-mq scheduling framework
diff --git a/block/opal_proto.h b/block/opal_proto.h
index e20be8258854..d9a05ad02eb5 100644
--- a/block/opal_proto.h
+++ b/block/opal_proto.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright © 2016 Intel Corporation
*
* Authors:
* Rafael Antognolli <rafael.antognolli@intel.com>
* Scott Bauer <scott.bauer@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/types.h>
@@ -170,6 +162,8 @@ enum opal_token {
OPAL_READLOCKED = 0x07,
OPAL_WRITELOCKED = 0x08,
OPAL_ACTIVEKEY = 0x0A,
+ /* lockingsp table */
+ OPAL_LIFECYCLE = 0x06,
/* locking info table */
OPAL_MAXRANGES = 0x04,
/* mbr control */
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 8e596a8dff32..aee643ce13d1 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -285,6 +285,13 @@ void delete_partition(struct gendisk *disk, int partno)
kobject_put(part->holder_dir);
device_del(part_to_dev(part));
+ /*
+ * Remove gendisk pointer from idr so that it cannot be looked up
+ * while RCU period before freeing gendisk is running to prevent
+ * use-after-free issues. Note that the device number stays
+ * "in-use" until we really free the gendisk.
+ */
+ blk_invalidate_devt(part_devt(part));
hd_struct_kill(part);
}
diff --git a/block/partitions/acorn.c b/block/partitions/acorn.c
index fbeb697374d5..7587700fad4a 100644
--- a/block/partitions/acorn.c
+++ b/block/partitions/acorn.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/fs/partitions/acorn.c
- *
* Copyright (c) 1996-2000 Russell King.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Scan ADFS partitions on hard disk drives. Unfortunately, there
* isn't a standard for partitioning drives on Acorn machines, so
* every single manufacturer of SCSI and IDE cards created their own
diff --git a/block/partitions/aix.h b/block/partitions/aix.h
index e0c66a987523..b4449f0b9f2b 100644
--- a/block/partitions/aix.h
+++ b/block/partitions/aix.h
@@ -1 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
extern int aix_partition(struct parsed_partitions *state);
diff --git a/block/partitions/amiga.h b/block/partitions/amiga.h
index d094585cadaa..7e63f4d9d969 100644
--- a/block/partitions/amiga.h
+++ b/block/partitions/amiga.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/amiga.h
*/
diff --git a/block/partitions/efi.c b/block/partitions/efi.c
index 39f70d968754..db2fef7dfc47 100644
--- a/block/partitions/efi.c
+++ b/block/partitions/efi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/************************************************************
* EFI GUID Partition Table handling
*
@@ -7,21 +8,6 @@
* efi.[ch] by Matt Domsch <Matt_Domsch@dell.com>
* Copyright 2000,2001,2002,2004 Dell Inc.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
* TODO:
*
* Changelog:
diff --git a/block/partitions/efi.h b/block/partitions/efi.h
index abd0b19288a6..3e8576157575 100644
--- a/block/partitions/efi.h
+++ b/block/partitions/efi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/************************************************************
* EFI GUID Partition Table
* Per Intel EFI Specification v1.02
@@ -5,21 +6,6 @@
*
* By Matt Domsch <Matt_Domsch@dell.com> Fri Sep 22 22:15:56 CDT 2000
* Copyright 2000,2001 Dell Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
************************************************************/
#ifndef FS_PART_EFI_H_INCLUDED
diff --git a/block/partitions/ibm.h b/block/partitions/ibm.h
index 08fb0804a812..8bf13febb2b6 100644
--- a/block/partitions/ibm.h
+++ b/block/partitions/ibm.h
@@ -1 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
int ibm_partition(struct parsed_partitions *);
diff --git a/block/partitions/karma.h b/block/partitions/karma.h
index c764b2e9df21..48e074d417fb 100644
--- a/block/partitions/karma.h
+++ b/block/partitions/karma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/karma.h
*/
diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c
index 16766f267559..fe5d970e2e60 100644
--- a/block/partitions/ldm.c
+++ b/block/partitions/ldm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/**
* ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
*
@@ -6,21 +7,6 @@
* Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
*
* Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation; either version 2 of the License, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program (in the main directory of the source in the file COPYING); if
- * not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
- * Boston, MA 02111-1307 USA
*/
#include <linux/slab.h>
@@ -33,7 +19,7 @@
#include "check.h"
#include "msdos.h"
-/**
+/*
* ldm_debug/info/error/crit - Output an error message
* @f: A printf format string containing the message
* @...: Variables to substitute into @f
diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h
index f4c6055df956..1ca63e97bccc 100644
--- a/block/partitions/ldm.h
+++ b/block/partitions/ldm.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/**
* ldm - Part of the Linux-NTFS project.
*
@@ -6,21 +7,6 @@
* Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
*
* Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program (in the main directory of the Linux-NTFS source
- * in the file COPYING); if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _FS_PT_LDM_H_
diff --git a/block/partitions/msdos.h b/block/partitions/msdos.h
index 38c781c490b3..fcacfc486092 100644
--- a/block/partitions/msdos.h
+++ b/block/partitions/msdos.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/msdos.h
*/
diff --git a/block/partitions/osf.h b/block/partitions/osf.h
index 20ed2315ec16..4d8088e7ea8c 100644
--- a/block/partitions/osf.h
+++ b/block/partitions/osf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/osf.h
*/
diff --git a/block/partitions/sgi.h b/block/partitions/sgi.h
index b9553ebdd5a9..a5b77c3987cf 100644
--- a/block/partitions/sgi.h
+++ b/block/partitions/sgi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/sgi.h
*/
diff --git a/block/partitions/sun.h b/block/partitions/sun.h
index 2424baa8319f..ae1b9eed3fd7 100644
--- a/block/partitions/sun.h
+++ b/block/partitions/sun.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/sun.h
*/
diff --git a/block/partitions/sysv68.h b/block/partitions/sysv68.h
index bf2f5ffa97ac..4fb6b8ec78ae 100644
--- a/block/partitions/sysv68.h
+++ b/block/partitions/sysv68.h
@@ -1 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
extern int sysv68_partition(struct parsed_partitions *state);
diff --git a/block/partitions/ultrix.h b/block/partitions/ultrix.h
index a3cc00b2bded..9f676cead222 100644
--- a/block/partitions/ultrix.h
+++ b/block/partitions/ultrix.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/partitions/ultrix.h
*/
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 533f4aee8567..f5e0ad65e86a 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -1,20 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 Jens Axboe <axboe@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
- *
*/
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/block/sed-opal.c b/block/sed-opal.c
index e0de4dd448b3..a46e8d13e16d 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2016 Intel Corporation
*
* Authors:
* Scott Bauer <scott.bauer@intel.com>
* Rafael Antognolli <rafael.antognolli@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":OPAL: " fmt
@@ -85,7 +77,6 @@ struct opal_dev {
void *data;
sec_send_recv *send_recv;
- const struct opal_step *steps;
struct mutex dev_lock;
u16 comid;
u32 hsn;
@@ -157,7 +148,7 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
/* C_PIN_TABLE object ID's */
- [OPAL_C_PIN_MSID] =
+ [OPAL_C_PIN_MSID] =
{ 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x84, 0x02},
[OPAL_C_PIN_SID] =
{ 0x00, 0x00, 0x00, 0x0B, 0x00, 0x00, 0x00, 0x01},
@@ -181,7 +172,7 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
* Derived from: TCG_Storage_Architecture_Core_Spec_v2.01_r1.00
* Section: 6.3 Assigned UIDs
*/
-static const u8 opalmethod[][OPAL_UID_LENGTH] = {
+static const u8 opalmethod[][OPAL_METHOD_LENGTH] = {
[OPAL_PROPERTIES] =
{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01 },
[OPAL_STARTSESSION] =
@@ -217,6 +208,7 @@ static const u8 opalmethod[][OPAL_UID_LENGTH] = {
};
static int end_opal_session_error(struct opal_dev *dev);
+static int opal_discovery0_step(struct opal_dev *dev);
struct opal_suspend_data {
struct opal_lock_unlock unlk;
@@ -382,37 +374,50 @@ static void check_geometry(struct opal_dev *dev, const void *data)
dev->lowest_lba = geo->lowest_aligned_lba;
}
-static int next(struct opal_dev *dev)
+static int execute_step(struct opal_dev *dev,
+ const struct opal_step *step, size_t stepIndex)
{
- const struct opal_step *step;
- int state = 0, error = 0;
+ int error = step->fn(dev, step->data);
- do {
- step = &dev->steps[state];
- if (!step->fn)
- break;
+ if (error) {
+ pr_debug("Step %zu (%pS) failed with error %d: %s\n",
+ stepIndex, step->fn, error,
+ opal_error_to_human(error));
+ }
- error = step->fn(dev, step->data);
- if (error) {
- pr_debug("Error on step function: %d with error %d: %s\n",
- state, error,
- opal_error_to_human(error));
-
- /* For each OPAL command we do a discovery0 then we
- * start some sort of session.
- * If we haven't passed state 1 then there was an error
- * on discovery0 or during the attempt to start a
- * session. Therefore we shouldn't attempt to terminate
- * a session, as one has not yet been created.
- */
- if (state > 1) {
- end_opal_session_error(dev);
- return error;
- }
+ return error;
+}
- }
- state++;
- } while (!error);
+static int execute_steps(struct opal_dev *dev,
+ const struct opal_step *steps, size_t n_steps)
+{
+ size_t state = 0;
+ int error;
+
+ /* first do a discovery0 */
+ error = opal_discovery0_step(dev);
+ if (error)
+ return error;
+
+ for (state = 0; state < n_steps; state++) {
+ error = execute_step(dev, &steps[state], state);
+ if (error)
+ goto out_error;
+ }
+
+ return 0;
+
+out_error:
+ /*
+ * For each OPAL command the first step in steps starts some sort of
+ * session. If an error occurred in the initial discovery0 or if an
+ * error occurred in the first step (and thus stopping the loop with
+ * state == 0) then there was an error before or during the attempt to
+ * start a session. Therefore we shouldn't attempt to terminate a
+ * session, as one has not yet been created.
+ */
+ if (state > 0)
+ end_opal_session_error(dev);
return error;
}
@@ -510,15 +515,32 @@ static int opal_discovery0(struct opal_dev *dev, void *data)
return opal_discovery0_end(dev);
}
-static void add_token_u8(int *err, struct opal_dev *cmd, u8 tok)
+static int opal_discovery0_step(struct opal_dev *dev)
+{
+ const struct opal_step discovery0_step = {
+ opal_discovery0,
+ };
+ return execute_step(dev, &discovery0_step, 0);
+}
+
+static bool can_add(int *err, struct opal_dev *cmd, size_t len)
{
if (*err)
- return;
- if (cmd->pos >= IO_BUFFER_LENGTH - 1) {
- pr_debug("Error adding u8: end of buffer.\n");
+ return false;
+
+ if (len > IO_BUFFER_LENGTH || cmd->pos > IO_BUFFER_LENGTH - len) {
+ pr_debug("Error adding %zu bytes: end of buffer.\n", len);
*err = -ERANGE;
- return;
+ return false;
}
+
+ return true;
+}
+
+static void add_token_u8(int *err, struct opal_dev *cmd, u8 tok)
+{
+ if (!can_add(err, cmd, 1))
+ return;
cmd->cmd[cmd->pos++] = tok;
}
@@ -551,7 +573,6 @@ static void add_medium_atom_header(struct opal_dev *cmd, bool bytestring,
static void add_token_u64(int *err, struct opal_dev *cmd, u64 number)
{
-
size_t len;
int msb;
@@ -563,9 +584,8 @@ static void add_token_u64(int *err, struct opal_dev *cmd, u64 number)
msb = fls64(number);
len = DIV_ROUND_UP(msb, 8);
- if (cmd->pos >= IO_BUFFER_LENGTH - len - 1) {
+ if (!can_add(err, cmd, len + 1)) {
pr_debug("Error adding u64: end of buffer.\n");
- *err = -ERANGE;
return;
}
add_short_atom_header(cmd, false, false, len);
@@ -573,24 +593,19 @@ static void add_token_u64(int *err, struct opal_dev *cmd, u64 number)
add_token_u8(err, cmd, number >> (len * 8));
}
-static void add_token_bytestring(int *err, struct opal_dev *cmd,
- const u8 *bytestring, size_t len)
+static u8 *add_bytestring_header(int *err, struct opal_dev *cmd, size_t len)
{
size_t header_len = 1;
bool is_short_atom = true;
- if (*err)
- return;
-
if (len & ~SHORT_ATOM_LEN_MASK) {
header_len = 2;
is_short_atom = false;
}
- if (len >= IO_BUFFER_LENGTH - cmd->pos - header_len) {
+ if (!can_add(err, cmd, header_len + len)) {
pr_debug("Error adding bytestring: end of buffer.\n");
- *err = -ERANGE;
- return;
+ return NULL;
}
if (is_short_atom)
@@ -598,9 +613,19 @@ static void add_token_bytestring(int *err, struct opal_dev *cmd,
else
add_medium_atom_header(cmd, true, false, len);
- memcpy(&cmd->cmd[cmd->pos], bytestring, len);
- cmd->pos += len;
+ return &cmd->cmd[cmd->pos];
+}
+
+static void add_token_bytestring(int *err, struct opal_dev *cmd,
+ const u8 *bytestring, size_t len)
+{
+ u8 *start;
+ start = add_bytestring_header(err, cmd, len);
+ if (!start)
+ return;
+ memcpy(start, bytestring, len);
+ cmd->pos += len;
}
static int build_locking_range(u8 *buffer, size_t length, u8 lr)
@@ -623,7 +648,7 @@ static int build_locking_range(u8 *buffer, size_t length, u8 lr)
static int build_locking_user(u8 *buffer, size_t length, u8 lr)
{
if (length > OPAL_UID_LENGTH) {
- pr_debug("Can't build locking range user, Length OOB\n");
+ pr_debug("Can't build locking range user. Length OOB\n");
return -ERANGE;
}
@@ -649,6 +674,9 @@ static int cmd_finalize(struct opal_dev *cmd, u32 hsn, u32 tsn)
struct opal_header *hdr;
int err = 0;
+ /* close the parameter list opened from cmd_start */
+ add_token_u8(&err, cmd, OPAL_ENDLIST);
+
add_token_u8(&err, cmd, OPAL_ENDOFDATA);
add_token_u8(&err, cmd, OPAL_STARTLIST);
add_token_u8(&err, cmd, 0);
@@ -687,6 +715,11 @@ static const struct opal_resp_tok *response_get_token(
{
const struct opal_resp_tok *tok;
+ if (!resp) {
+ pr_debug("Response is NULL\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (n >= resp->num) {
pr_debug("Token number doesn't exist: %d, resp: %d\n",
n, resp->num);
@@ -869,27 +902,19 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
const char **store)
{
u8 skip;
- const struct opal_resp_tok *token;
+ const struct opal_resp_tok *tok;
*store = NULL;
- if (!resp) {
- pr_debug("Response is NULL\n");
- return 0;
- }
-
- if (n >= resp->num) {
- pr_debug("Response has %d tokens. Can't access %d\n",
- resp->num, n);
+ tok = response_get_token(resp, n);
+ if (IS_ERR(tok))
return 0;
- }
- token = &resp->toks[n];
- if (token->type != OPAL_DTA_TOKENID_BYTESTRING) {
+ if (tok->type != OPAL_DTA_TOKENID_BYTESTRING) {
pr_debug("Token is not a byte string!\n");
return 0;
}
- switch (token->width) {
+ switch (tok->width) {
case OPAL_WIDTH_TINY:
case OPAL_WIDTH_SHORT:
skip = 1;
@@ -905,37 +930,29 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
return 0;
}
- *store = token->pos + skip;
- return token->len - skip;
+ *store = tok->pos + skip;
+ return tok->len - skip;
}
static u64 response_get_u64(const struct parsed_resp *resp, int n)
{
- if (!resp) {
- pr_debug("Response is NULL\n");
- return 0;
- }
+ const struct opal_resp_tok *tok;
- if (n >= resp->num) {
- pr_debug("Response has %d tokens. Can't access %d\n",
- resp->num, n);
+ tok = response_get_token(resp, n);
+ if (IS_ERR(tok))
return 0;
- }
- if (resp->toks[n].type != OPAL_DTA_TOKENID_UINT) {
- pr_debug("Token is not unsigned it: %d\n",
- resp->toks[n].type);
+ if (tok->type != OPAL_DTA_TOKENID_UINT) {
+ pr_debug("Token is not unsigned int: %d\n", tok->type);
return 0;
}
- if (!(resp->toks[n].width == OPAL_WIDTH_TINY ||
- resp->toks[n].width == OPAL_WIDTH_SHORT)) {
- pr_debug("Atom is not short or tiny: %d\n",
- resp->toks[n].width);
+ if (tok->width != OPAL_WIDTH_TINY && tok->width != OPAL_WIDTH_SHORT) {
+ pr_debug("Atom is not short or tiny: %d\n", tok->width);
return 0;
}
- return resp->toks[n].stored.u;
+ return tok->stored.u;
}
static bool response_token_matches(const struct opal_resp_tok *token, u8 match)
@@ -991,6 +1008,27 @@ static void clear_opal_cmd(struct opal_dev *dev)
memset(dev->cmd, 0, IO_BUFFER_LENGTH);
}
+static int cmd_start(struct opal_dev *dev, const u8 *uid, const u8 *method)
+{
+ int err = 0;
+
+ clear_opal_cmd(dev);
+ set_comid(dev, dev->comid);
+
+ add_token_u8(&err, dev, OPAL_CALL);
+ add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
+ add_token_bytestring(&err, dev, method, OPAL_METHOD_LENGTH);
+
+ /*
+ * Every method call is followed by its parameters enclosed within
+ * OPAL_STARTLIST and OPAL_ENDLIST tokens. We automatically open the
+ * parameter list here and close it later in cmd_finalize.
+ */
+ add_token_u8(&err, dev, OPAL_STARTLIST);
+
+ return err;
+}
+
static int start_opal_session_cont(struct opal_dev *dev)
{
u32 hsn, tsn;
@@ -1050,24 +1088,47 @@ static int finalize_and_send(struct opal_dev *dev, cont_fn cont)
return opal_send_recv(dev, cont);
}
+/*
+ * request @column from table @table on device @dev. On success, the column
+ * data will be available in dev->resp->tok[4]
+ */
+static int generic_get_column(struct opal_dev *dev, const u8 *table,
+ u64 column)
+{
+ int err;
+
+ err = cmd_start(dev, table, opalmethod[OPAL_GET]);
+
+ add_token_u8(&err, dev, OPAL_STARTLIST);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_STARTCOLUMN);
+ add_token_u64(&err, dev, column);
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_STARTNAME);
+ add_token_u8(&err, dev, OPAL_ENDCOLUMN);
+ add_token_u64(&err, dev, column);
+ add_token_u8(&err, dev, OPAL_ENDNAME);
+
+ add_token_u8(&err, dev, OPAL_ENDLIST);
+
+ if (err)
+ return err;
+
+ return finalize_and_send(dev, parse_and_check_status);
+}
+
static int gen_key(struct opal_dev *dev, void *data)
{
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(uid, dev->prev_data, min(sizeof(uid), dev->prev_d_len));
kfree(dev->prev_data);
dev->prev_data = NULL;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GENKEY],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_GENKEY]);
if (err) {
pr_debug("Error building gen key command\n");
@@ -1105,62 +1166,39 @@ static int get_active_key_cont(struct opal_dev *dev)
static int get_active_key(struct opal_dev *dev, void *data)
{
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
+ int err;
u8 *lr = data;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
err = build_locking_range(uid, sizeof(uid), *lr);
if (err)
return err;
- err = 0;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* startCloumn */
- add_token_u8(&err, dev, 10); /* ActiveKey */
- add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* endColumn */
- add_token_u8(&err, dev, 10); /* ActiveKey */
- add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
- if (err) {
- pr_debug("Error building get active key command\n");
+ err = generic_get_column(dev, uid, OPAL_ACTIVEKEY);
+ if (err)
return err;
- }
- return finalize_and_send(dev, get_active_key_cont);
+ return get_active_key_cont(dev);
}
static int generic_lr_enable_disable(struct opal_dev *dev,
u8 *uid, bool rle, bool wle,
bool rl, bool wl)
{
- int err = 0;
+ int err;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 5); /* ReadLockEnabled */
+ add_token_u8(&err, dev, OPAL_READLOCKENABLED);
add_token_u8(&err, dev, rle);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 6); /* WriteLockEnabled */
+ add_token_u8(&err, dev, OPAL_WRITELOCKENABLED);
add_token_u8(&err, dev, wle);
add_token_u8(&err, dev, OPAL_ENDNAME);
@@ -1176,7 +1214,6 @@ static int generic_lr_enable_disable(struct opal_dev *dev,
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
return err;
}
@@ -1197,10 +1234,7 @@ static int setup_locking_range(struct opal_dev *dev, void *data)
u8 uid[OPAL_UID_LENGTH];
struct opal_user_lr_setup *setup = data;
u8 lr;
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
lr = setup->session.opal_key.lr;
err = build_locking_range(uid, sizeof(uid), lr);
@@ -1210,40 +1244,34 @@ static int setup_locking_range(struct opal_dev *dev, void *data)
if (lr == 0)
err = enable_global_lr(dev, uid, setup);
else {
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Ranges Start */
+ add_token_u8(&err, dev, OPAL_RANGESTART);
add_token_u64(&err, dev, setup->range_start);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* Ranges length */
+ add_token_u8(&err, dev, OPAL_RANGELENGTH);
add_token_u64(&err, dev, setup->range_length);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 5); /*ReadLockEnabled */
+ add_token_u8(&err, dev, OPAL_READLOCKENABLED);
add_token_u64(&err, dev, !!setup->RLE);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 6); /*WriteLockEnabled*/
+ add_token_u8(&err, dev, OPAL_WRITELOCKENABLED);
add_token_u64(&err, dev, !!setup->WLE);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
}
if (err) {
pr_debug("Error building Setup Locking range command.\n");
@@ -1261,29 +1289,21 @@ static int start_generic_opal_session(struct opal_dev *dev,
u8 key_len)
{
u32 hsn;
- int err = 0;
+ int err;
if (key == NULL && auth != OPAL_ANYBODY_UID)
return OPAL_INVAL_PARAM;
- clear_opal_cmd(dev);
-
- set_comid(dev, dev->comid);
hsn = GENERIC_HOST_SESSION_NUM;
+ err = cmd_start(dev, opaluid[OPAL_SMUID_UID],
+ opalmethod[OPAL_STARTSESSION]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_SMUID_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_STARTSESSION],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u64(&err, dev, hsn);
add_token_bytestring(&err, dev, opaluid[sp_type], OPAL_UID_LENGTH);
add_token_u8(&err, dev, 1);
switch (auth) {
case OPAL_ANYBODY_UID:
- add_token_u8(&err, dev, OPAL_ENDLIST);
break;
case OPAL_ADMIN1_UID:
case OPAL_SID_UID:
@@ -1296,7 +1316,6 @@ static int start_generic_opal_session(struct opal_dev *dev,
add_token_bytestring(&err, dev, opaluid[auth],
OPAL_UID_LENGTH);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
break;
default:
pr_debug("Cannot start Admin SP session with auth %d\n", auth);
@@ -1324,6 +1343,7 @@ static int start_SIDASP_opal_session(struct opal_dev *dev, void *data)
if (!key) {
const struct opal_key *okey = data;
+
ret = start_generic_opal_session(dev, OPAL_SID_UID,
OPAL_ADMINSP_UID,
okey->key,
@@ -1341,6 +1361,7 @@ static int start_SIDASP_opal_session(struct opal_dev *dev, void *data)
static int start_admin1LSP_opal_session(struct opal_dev *dev, void *data)
{
struct opal_key *key = data;
+
return start_generic_opal_session(dev, OPAL_ADMIN1_UID,
OPAL_LOCKINGSP_UID,
key->key, key->key_len);
@@ -1356,30 +1377,21 @@ static int start_auth_opal_session(struct opal_dev *dev, void *data)
u8 *key = session->opal_key.key;
u32 hsn = GENERIC_HOST_SESSION_NUM;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- if (session->sum) {
+ if (session->sum)
err = build_locking_user(lk_ul_user, sizeof(lk_ul_user),
session->opal_key.lr);
- if (err)
- return err;
-
- } else if (session->who != OPAL_ADMIN1 && !session->sum) {
+ else if (session->who != OPAL_ADMIN1 && !session->sum)
err = build_locking_user(lk_ul_user, sizeof(lk_ul_user),
session->who - 1);
- if (err)
- return err;
- } else
+ else
memcpy(lk_ul_user, opaluid[OPAL_ADMIN1_UID], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_SMUID_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_STARTSESSION],
- OPAL_UID_LENGTH);
+ if (err)
+ return err;
+
+ err = cmd_start(dev, opaluid[OPAL_SMUID_UID],
+ opalmethod[OPAL_STARTSESSION]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u64(&err, dev, hsn);
add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
OPAL_UID_LENGTH);
@@ -1392,7 +1404,6 @@ static int start_auth_opal_session(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, 3);
add_token_bytestring(&err, dev, lk_ul_user, OPAL_UID_LENGTH);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building STARTSESSION command.\n");
@@ -1404,18 +1415,10 @@ static int start_auth_opal_session(struct opal_dev *dev, void *data)
static int revert_tper(struct opal_dev *dev, void *data)
{
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_ADMINSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_REVERT],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, opaluid[OPAL_ADMINSP_UID],
+ opalmethod[OPAL_REVERT]);
if (err) {
pr_debug("Error building REVERT TPER command.\n");
return err;
@@ -1428,18 +1431,12 @@ static int internal_activate_user(struct opal_dev *dev, void *data)
{
struct opal_session_info *session = data;
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(uid, opaluid[OPAL_USER1_UID], OPAL_UID_LENGTH);
uid[7] = session->who;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_SET]);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
@@ -1449,7 +1446,6 @@ static int internal_activate_user(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building Activate UserN command.\n");
@@ -1463,20 +1459,12 @@ static int erase_locking_range(struct opal_dev *dev, void *data)
{
struct opal_session_info *session = data;
u8 uid[OPAL_UID_LENGTH];
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
if (build_locking_range(uid, sizeof(uid), session->opal_key.lr) < 0)
return -ERANGE;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_ERASE],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
+ err = cmd_start(dev, uid, opalmethod[OPAL_ERASE]);
if (err) {
pr_debug("Error building Erase Locking Range Command.\n");
@@ -1488,26 +1476,20 @@ static int erase_locking_range(struct opal_dev *dev, void *data)
static int set_mbr_done(struct opal_dev *dev, void *data)
{
u8 *mbr_done_tf = data;
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, opaluid[OPAL_MBRCONTROL],
+ opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_MBRCONTROL],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 2); /* Done */
+ add_token_u8(&err, dev, OPAL_MBRDONE);
add_token_u8(&err, dev, *mbr_done_tf); /* Done T or F */
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error Building set MBR Done command\n");
@@ -1520,26 +1502,20 @@ static int set_mbr_done(struct opal_dev *dev, void *data)
static int set_mbr_enable_disable(struct opal_dev *dev, void *data)
{
u8 *mbr_en_dis = data;
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, opaluid[OPAL_MBRCONTROL],
+ opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_MBRCONTROL],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 1);
+ add_token_u8(&err, dev, OPAL_MBRENABLE);
add_token_u8(&err, dev, *mbr_en_dis);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error Building set MBR done command\n");
@@ -1552,26 +1528,19 @@ static int set_mbr_enable_disable(struct opal_dev *dev, void *data)
static int generic_pw_cmd(u8 *key, size_t key_len, u8 *cpin_uid,
struct opal_dev *dev)
{
- int err = 0;
+ int err;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ err = cmd_start(dev, cpin_uid, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, cpin_uid, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* PIN */
+ add_token_u8(&err, dev, OPAL_PIN);
add_token_bytestring(&err, dev, key, key_len);
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
return err;
}
@@ -1619,10 +1588,7 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
u8 lr_buffer[OPAL_UID_LENGTH];
u8 user_uid[OPAL_UID_LENGTH];
struct opal_lock_unlock *lkul = data;
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
+ int err;
memcpy(lr_buffer, opaluid[OPAL_LOCKINGRANGE_ACE_RDLOCKED],
OPAL_UID_LENGTH);
@@ -1637,12 +1603,8 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
user_uid[7] = lkul->session.who;
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, lr_buffer, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET],
- OPAL_UID_LENGTH);
+ err = cmd_start(dev, lr_buffer, opalmethod[OPAL_SET]);
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
@@ -1680,7 +1642,6 @@ static int add_user_to_lr(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, OPAL_ENDNAME);
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building add user to locking range command.\n");
@@ -1697,9 +1658,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
u8 read_locked = 1, write_locked = 1;
int err = 0;
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
if (build_locking_range(lr_buffer, sizeof(lr_buffer),
lkul->session.opal_key.lr) < 0)
return -ERANGE;
@@ -1714,17 +1672,15 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
write_locked = 0;
break;
case OPAL_LK:
- /* vars are initalized to locked */
+ /* vars are initialized to locked */
break;
default:
pr_debug("Tried to set an invalid locking state... returning to uland\n");
return OPAL_INVAL_PARAM;
}
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, lr_buffer, OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_SET], OPAL_UID_LENGTH);
- add_token_u8(&err, dev, OPAL_STARTLIST);
+ err = cmd_start(dev, lr_buffer, opalmethod[OPAL_SET]);
+
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, OPAL_VALUES);
add_token_u8(&err, dev, OPAL_STARTLIST);
@@ -1741,7 +1697,6 @@ static int lock_unlock_locking_range(struct opal_dev *dev, void *data)
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
if (err) {
pr_debug("Error building SET command.\n");
@@ -1775,7 +1730,7 @@ static int lock_unlock_locking_range_sum(struct opal_dev *dev, void *data)
write_locked = 0;
break;
case OPAL_LK:
- /* vars are initalized to locked */
+ /* vars are initialized to locked */
break;
default:
pr_debug("Tried to set an invalid locking state.\n");
@@ -1796,17 +1751,10 @@ static int activate_lsp(struct opal_dev *dev, void *data)
struct opal_lr_act *opal_act = data;
u8 user_lr[OPAL_UID_LENGTH];
u8 uint_3 = 0x83;
- int err = 0, i;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_ACTIVATE],
- OPAL_UID_LENGTH);
+ int err, i;
+ err = cmd_start(dev, opaluid[OPAL_LOCKINGSP_UID],
+ opalmethod[OPAL_ACTIVATE]);
if (opal_act->sum) {
err = build_locking_range(user_lr, sizeof(user_lr),
@@ -1814,7 +1762,6 @@ static int activate_lsp(struct opal_dev *dev, void *data)
if (err)
return err;
- add_token_u8(&err, dev, OPAL_STARTLIST);
add_token_u8(&err, dev, OPAL_STARTNAME);
add_token_u8(&err, dev, uint_3);
add_token_u8(&err, dev, 6);
@@ -1829,11 +1776,6 @@ static int activate_lsp(struct opal_dev *dev, void *data)
}
add_token_u8(&err, dev, OPAL_ENDLIST);
add_token_u8(&err, dev, OPAL_ENDNAME);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- } else {
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
}
if (err) {
@@ -1844,17 +1786,19 @@ static int activate_lsp(struct opal_dev *dev, void *data)
return finalize_and_send(dev, parse_and_check_status);
}
-static int get_lsp_lifecycle_cont(struct opal_dev *dev)
+/* Determine if we're in the Manufactured Inactive or Active state */
+static int get_lsp_lifecycle(struct opal_dev *dev, void *data)
{
u8 lc_status;
- int error = 0;
+ int err;
- error = parse_and_check_status(dev);
- if (error)
- return error;
+ err = generic_get_column(dev, opaluid[OPAL_LOCKINGSP_UID],
+ OPAL_LIFECYCLE);
+ if (err)
+ return err;
lc_status = response_get_u64(&dev->parsed, 4);
- /* 0x08 is Manufacured Inactive */
+ /* 0x08 is Manufactured Inactive */
/* 0x09 is Manufactured */
if (lc_status != OPAL_MANUFACTURED_INACTIVE) {
pr_debug("Couldn't determine the status of the Lifecycle state\n");
@@ -1864,56 +1808,19 @@ static int get_lsp_lifecycle_cont(struct opal_dev *dev)
return 0;
}
-/* Determine if we're in the Manufactured Inactive or Active state */
-static int get_lsp_lifecycle(struct opal_dev *dev, void *data)
-{
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_LOCKINGSP_UID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
-
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Start Column */
- add_token_u8(&err, dev, 6); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* End Column */
- add_token_u8(&err, dev, 6); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- if (err) {
- pr_debug("Error Building GET Lifecycle Status command\n");
- return err;
- }
-
- return finalize_and_send(dev, get_lsp_lifecycle_cont);
-}
-
-static int get_msid_cpin_pin_cont(struct opal_dev *dev)
+static int get_msid_cpin_pin(struct opal_dev *dev, void *data)
{
const char *msid_pin;
size_t strlen;
- int error = 0;
+ int err;
- error = parse_and_check_status(dev);
- if (error)
- return error;
+ err = generic_get_column(dev, opaluid[OPAL_C_PIN_MSID], OPAL_PIN);
+ if (err)
+ return err;
strlen = response_get_string(&dev->parsed, 4, &msid_pin);
if (!msid_pin) {
- pr_debug("%s: Couldn't extract PIN from response\n", __func__);
+ pr_debug("Couldn't extract MSID_CPIN from response\n");
return OPAL_INVAL_PARAM;
}
@@ -1926,42 +1833,6 @@ static int get_msid_cpin_pin_cont(struct opal_dev *dev)
return 0;
}
-static int get_msid_cpin_pin(struct opal_dev *dev, void *data)
-{
- int err = 0;
-
- clear_opal_cmd(dev);
- set_comid(dev, dev->comid);
-
- add_token_u8(&err, dev, OPAL_CALL);
- add_token_bytestring(&err, dev, opaluid[OPAL_C_PIN_MSID],
- OPAL_UID_LENGTH);
- add_token_bytestring(&err, dev, opalmethod[OPAL_GET], OPAL_UID_LENGTH);
-
- add_token_u8(&err, dev, OPAL_STARTLIST);
- add_token_u8(&err, dev, OPAL_STARTLIST);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 3); /* Start Column */
- add_token_u8(&err, dev, 3); /* PIN */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_STARTNAME);
- add_token_u8(&err, dev, 4); /* End Column */
- add_token_u8(&err, dev, 3); /* Lifecycle Column */
- add_token_u8(&err, dev, OPAL_ENDNAME);
-
- add_token_u8(&err, dev, OPAL_ENDLIST);
- add_token_u8(&err, dev, OPAL_ENDLIST);
-
- if (err) {
- pr_debug("Error building Get MSID CPIN PIN command.\n");
- return err;
- }
-
- return finalize_and_send(dev, get_msid_cpin_pin_cont);
-}
-
static int end_opal_session(struct opal_dev *dev, void *data)
{
int err = 0;
@@ -1977,18 +1848,14 @@ static int end_opal_session(struct opal_dev *dev, void *data)
static int end_opal_session_error(struct opal_dev *dev)
{
- const struct opal_step error_end_session[] = {
- { end_opal_session, },
- { NULL, }
+ const struct opal_step error_end_session = {
+ end_opal_session,
};
- dev->steps = error_end_session;
- return next(dev);
+ return execute_step(dev, &error_end_session, 0);
}
-static inline void setup_opal_dev(struct opal_dev *dev,
- const struct opal_step *steps)
+static inline void setup_opal_dev(struct opal_dev *dev)
{
- dev->steps = steps;
dev->tsn = 0;
dev->hsn = 0;
dev->prev_data = NULL;
@@ -1996,15 +1863,11 @@ static inline void setup_opal_dev(struct opal_dev *dev,
static int check_opal_support(struct opal_dev *dev)
{
- const struct opal_step steps[] = {
- { opal_discovery0, },
- { NULL, }
- };
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = opal_discovery0_step(dev);
dev->supported = !ret;
mutex_unlock(&dev->dev_lock);
return ret;
@@ -2057,18 +1920,16 @@ static int opal_secure_erase_locking_range(struct opal_dev *dev,
struct opal_session_info *opal_session)
{
const struct opal_step erase_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, opal_session },
{ get_active_key, &opal_session->opal_key.lr },
{ gen_key, },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, erase_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, erase_steps, ARRAY_SIZE(erase_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2077,17 +1938,15 @@ static int opal_erase_locking_range(struct opal_dev *dev,
struct opal_session_info *opal_session)
{
const struct opal_step erase_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, opal_session },
{ erase_locking_range, opal_session },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, erase_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, erase_steps, ARRAY_SIZE(erase_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2095,15 +1954,16 @@ static int opal_erase_locking_range(struct opal_dev *dev,
static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
struct opal_mbr_data *opal_mbr)
{
+ u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ?
+ OPAL_TRUE : OPAL_FALSE;
+
const struct opal_step mbr_steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &opal_mbr->key },
- { set_mbr_done, &opal_mbr->enable_disable },
+ { set_mbr_done, &enable_disable },
{ end_opal_session, },
{ start_admin1LSP_opal_session, &opal_mbr->key },
- { set_mbr_enable_disable, &opal_mbr->enable_disable },
- { end_opal_session, },
- { NULL, }
+ { set_mbr_enable_disable, &enable_disable },
+ { end_opal_session, }
};
int ret;
@@ -2112,8 +1972,8 @@ static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, mbr_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, mbr_steps, ARRAY_SIZE(mbr_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2130,7 +1990,7 @@ static int opal_save(struct opal_dev *dev, struct opal_lock_unlock *lk_unlk)
suspend->lr = lk_unlk->session.opal_key.lr;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, NULL);
+ setup_opal_dev(dev);
add_suspend_info(dev, suspend);
mutex_unlock(&dev->dev_lock);
return 0;
@@ -2140,11 +2000,9 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
struct opal_lock_unlock *lk_unlk)
{
const struct opal_step steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &lk_unlk->session.opal_key },
{ add_user_to_lr, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2166,8 +2024,8 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
}
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, steps, ARRAY_SIZE(steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2175,16 +2033,14 @@ static int opal_add_user_to_lr(struct opal_dev *dev,
static int opal_reverttper(struct opal_dev *dev, struct opal_key *opal)
{
const struct opal_step revert_steps[] = {
- { opal_discovery0, },
{ start_SIDASP_opal_session, opal },
- { revert_tper, }, /* controller will terminate session */
- { NULL, }
+ { revert_tper, } /* controller will terminate session */
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, revert_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, revert_steps, ARRAY_SIZE(revert_steps));
mutex_unlock(&dev->dev_lock);
/*
@@ -2201,37 +2057,34 @@ static int __opal_lock_unlock(struct opal_dev *dev,
struct opal_lock_unlock *lk_unlk)
{
const struct opal_step unlock_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &lk_unlk->session },
{ lock_unlock_locking_range, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
const struct opal_step unlock_sum_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &lk_unlk->session },
{ lock_unlock_locking_range_sum, lk_unlk },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
- dev->steps = lk_unlk->session.sum ? unlock_sum_steps : unlock_steps;
- return next(dev);
+ if (lk_unlk->session.sum)
+ return execute_steps(dev, unlock_sum_steps,
+ ARRAY_SIZE(unlock_sum_steps));
+ else
+ return execute_steps(dev, unlock_steps,
+ ARRAY_SIZE(unlock_steps));
}
static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key)
{
- u8 mbr_done_tf = 1;
- const struct opal_step mbrdone_step [] = {
- { opal_discovery0, },
+ u8 mbr_done_tf = OPAL_TRUE;
+ const struct opal_step mbrdone_step[] = {
{ start_admin1LSP_opal_session, key },
{ set_mbr_done, &mbr_done_tf },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
- dev->steps = mbrdone_step;
- return next(dev);
+ return execute_steps(dev, mbrdone_step, ARRAY_SIZE(mbrdone_step));
}
static int opal_lock_unlock(struct opal_dev *dev,
@@ -2252,14 +2105,12 @@ static int opal_lock_unlock(struct opal_dev *dev,
static int opal_take_ownership(struct opal_dev *dev, struct opal_key *opal)
{
const struct opal_step owner_steps[] = {
- { opal_discovery0, },
{ start_anybodyASP_opal_session, },
{ get_msid_cpin_pin, },
{ end_opal_session, },
{ start_SIDASP_opal_session, opal },
{ set_sid_cpin_pin, opal },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2267,21 +2118,20 @@ static int opal_take_ownership(struct opal_dev *dev, struct opal_key *opal)
return -ENODEV;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, owner_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, owner_steps, ARRAY_SIZE(owner_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
-static int opal_activate_lsp(struct opal_dev *dev, struct opal_lr_act *opal_lr_act)
+static int opal_activate_lsp(struct opal_dev *dev,
+ struct opal_lr_act *opal_lr_act)
{
const struct opal_step active_steps[] = {
- { opal_discovery0, },
{ start_SIDASP_opal_session, &opal_lr_act->key },
{ get_lsp_lifecycle, },
{ activate_lsp, opal_lr_act },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2289,8 +2139,8 @@ static int opal_activate_lsp(struct opal_dev *dev, struct opal_lr_act *opal_lr_a
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, active_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, active_steps, ARRAY_SIZE(active_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2299,17 +2149,15 @@ static int opal_setup_locking_range(struct opal_dev *dev,
struct opal_user_lr_setup *opal_lrs)
{
const struct opal_step lr_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &opal_lrs->session },
{ setup_locking_range, opal_lrs },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, lr_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, lr_steps, ARRAY_SIZE(lr_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2317,11 +2165,9 @@ static int opal_setup_locking_range(struct opal_dev *dev,
static int opal_set_new_pw(struct opal_dev *dev, struct opal_new_pw *opal_pw)
{
const struct opal_step pw_steps[] = {
- { opal_discovery0, },
{ start_auth_opal_session, &opal_pw->session },
{ set_new_pw, &opal_pw->new_user_pw },
- { end_opal_session, },
- { NULL }
+ { end_opal_session, }
};
int ret;
@@ -2332,8 +2178,8 @@ static int opal_set_new_pw(struct opal_dev *dev, struct opal_new_pw *opal_pw)
return -EINVAL;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, pw_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, pw_steps, ARRAY_SIZE(pw_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2342,11 +2188,9 @@ static int opal_activate_user(struct opal_dev *dev,
struct opal_session_info *opal_session)
{
const struct opal_step act_steps[] = {
- { opal_discovery0, },
{ start_admin1LSP_opal_session, &opal_session->opal_key },
{ internal_activate_user, opal_session },
- { end_opal_session, },
- { NULL, }
+ { end_opal_session, }
};
int ret;
@@ -2358,8 +2202,8 @@ static int opal_activate_user(struct opal_dev *dev,
}
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, act_steps);
- ret = next(dev);
+ setup_opal_dev(dev);
+ ret = execute_steps(dev, act_steps, ARRAY_SIZE(act_steps));
mutex_unlock(&dev->dev_lock);
return ret;
}
@@ -2376,7 +2220,7 @@ bool opal_unlock_from_suspend(struct opal_dev *dev)
return false;
mutex_lock(&dev->dev_lock);
- setup_opal_dev(dev, NULL);
+ setup_opal_dev(dev);
list_for_each_entry(suspend, &dev->unlk_lst, node) {
dev->tsn = 0;
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 62aed77d0bb9..0c0094609dd6 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -1,24 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* t10_pi.c - Functions for generating and verifying T10 Protection
* Information.
- *
- * Copyright (C) 2007, 2008, 2014 Oracle Corporation
- * Written by: Martin K. Petersen <martin.petersen@oracle.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, write to
- * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
- * USA.
- *
*/
#include <linux/t10-pi.h>