aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig5
-rw-r--r--block/blk-iocost.c5
-rw-r--r--block/blk-pm.c41
-rw-r--r--block/blk-settings.c37
-rw-r--r--block/blk-sysfs.c27
-rw-r--r--block/blk-zoned.c4
6 files changed, 61 insertions, 58 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 9357d7302398..bbad5e8bbffe 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -86,9 +86,10 @@ config BLK_DEV_ZONED
select MQ_IOSCHED_DEADLINE
help
Block layer zoned block device support. This option enables
- support for ZAC/ZBC host-managed and host-aware zoned block devices.
+ support for ZAC/ZBC/ZNS host-managed and host-aware zoned block
+ devices.
- Say yes here if you have a ZAC or ZBC storage device.
+ Say yes here if you have a ZAC, ZBC, or ZNS storage device.
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 521c29b8ae29..413e0b5c8e6b 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -406,7 +406,7 @@ struct ioc {
enum ioc_running running;
atomic64_t vtime_rate;
- seqcount_t period_seqcount;
+ seqcount_spinlock_t period_seqcount;
u32 period_at; /* wallclock starttime */
u64 period_at_vtime; /* vtime starttime */
@@ -873,7 +873,6 @@ static void ioc_now(struct ioc *ioc, struct ioc_now *now)
static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
{
- lockdep_assert_held(&ioc->lock);
WARN_ON_ONCE(ioc->running != IOC_RUNNING);
write_seqcount_begin(&ioc->period_seqcount);
@@ -2001,7 +2000,7 @@ static int blk_iocost_init(struct request_queue *q)
ioc->running = IOC_IDLE;
atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
- seqcount_init(&ioc->period_seqcount);
+ seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
ioc->period_at = ktime_to_us(ktime_get());
atomic64_set(&ioc->cur_period, 0);
atomic_set(&ioc->hweight_gen, 0);
diff --git a/block/blk-pm.c b/block/blk-pm.c
index 1adc1cd748b4..b85234d758f7 100644
--- a/block/blk-pm.c
+++ b/block/blk-pm.c
@@ -164,9 +164,8 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
*
* Description:
* Update the queue's runtime status according to the return value of the
- * device's runtime_resume function. If it is successfully resumed, process
- * the requests that are queued into the device's queue when it is resuming
- * and then mark last busy and initiate autosuspend for it.
+ * device's runtime_resume function. If the resume was successful, call
+ * blk_set_runtime_active() to do the real work of restarting the queue.
*
* This function should be called near the end of the device's
* runtime_resume callback.
@@ -175,19 +174,13 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
{
if (!q->dev)
return;
-
- spin_lock_irq(&q->queue_lock);
if (!err) {
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_mark_last_busy(q->dev);
- pm_request_autosuspend(q->dev);
+ blk_set_runtime_active(q);
} else {
+ spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_SUSPENDED;
+ spin_unlock_irq(&q->queue_lock);
}
- spin_unlock_irq(&q->queue_lock);
-
- if (!err)
- blk_clear_pm_only(q);
}
EXPORT_SYMBOL(blk_post_runtime_resume);
@@ -204,15 +197,25 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
* This function can be used in driver's resume hook to correct queue
* runtime PM status and re-enable peeking requests from the queue. It
* should be called before first request is added to the queue.
+ *
+ * This function is also called by blk_post_runtime_resume() for successful
+ * runtime resumes. It does everything necessary to restart the queue.
*/
void blk_set_runtime_active(struct request_queue *q)
{
- if (q->dev) {
- spin_lock_irq(&q->queue_lock);
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_mark_last_busy(q->dev);
- pm_request_autosuspend(q->dev);
- spin_unlock_irq(&q->queue_lock);
- }
+ int old_status;
+
+ if (!q->dev)
+ return;
+
+ spin_lock_irq(&q->queue_lock);
+ old_status = q->rpm_status;
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ pm_request_autosuspend(q->dev);
+ spin_unlock_irq(&q->queue_lock);
+
+ if (old_status != RPM_ACTIVE)
+ blk_clear_pm_only(q);
}
EXPORT_SYMBOL(blk_set_runtime_active);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 9a2c23cd9700..76a7e03bcd6c 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -456,17 +456,6 @@ void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
EXPORT_SYMBOL(blk_queue_io_opt);
/**
- * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
- * @t: the stacking driver (top)
- * @b: the underlying device (bottom)
- **/
-void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
-{
- blk_stack_limits(&t->limits, &b->limits, 0);
-}
-EXPORT_SYMBOL(blk_queue_stack_limits);
-
-/**
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top device)
* @b: the underlying queue limits (bottom, component device)
@@ -609,33 +598,12 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->chunk_sectors = min_not_zero(t->chunk_sectors,
b->chunk_sectors);
+ t->zoned = max(t->zoned, b->zoned);
return ret;
}
EXPORT_SYMBOL(blk_stack_limits);
/**
- * bdev_stack_limits - adjust queue limits for stacked drivers
- * @t: the stacking driver limits (top device)
- * @bdev: the component block_device (bottom)
- * @start: first data sector within component device
- *
- * Description:
- * Merges queue limits for a top device and a block_device. Returns
- * 0 if alignment didn't change. Returns -1 if adding the bottom
- * device caused misalignment.
- */
-int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
- sector_t start)
-{
- struct request_queue *bq = bdev_get_queue(bdev);
-
- start += get_start_sect(bdev);
-
- return blk_stack_limits(t, &bq->limits, start);
-}
-EXPORT_SYMBOL(bdev_stack_limits);
-
-/**
* disk_stack_limits - adjust queue limits for stacked drivers
* @disk: MD/DM gendisk (top)
* @bdev: the underlying block device (bottom)
@@ -650,7 +618,8 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
{
struct request_queue *t = disk->queue;
- if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
+ if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
+ get_start_sect(bdev) + (offset >> 9)) < 0) {
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
disk_name(disk, 0, top);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index be67952e7be2..7dda709f3ccb 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -306,6 +306,16 @@ static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
return queue_var_show(blk_queue_nr_zones(q), page);
}
+static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_max_open_zones(q), page);
+}
+
+static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(queue_max_active_zones(q), page);
+}
+
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{
return queue_var_show((blk_queue_nomerges(q) << 1) |
@@ -668,6 +678,16 @@ static struct queue_sysfs_entry queue_nr_zones_entry = {
.show = queue_nr_zones_show,
};
+static struct queue_sysfs_entry queue_max_open_zones_entry = {
+ .attr = {.name = "max_open_zones", .mode = 0444 },
+ .show = queue_max_open_zones_show,
+};
+
+static struct queue_sysfs_entry queue_max_active_zones_entry = {
+ .attr = {.name = "max_active_zones", .mode = 0444 },
+ .show = queue_max_active_zones_show,
+};
+
static struct queue_sysfs_entry queue_nomerges_entry = {
.attr = {.name = "nomerges", .mode = 0644 },
.show = queue_nomerges_show,
@@ -766,6 +786,8 @@ static struct attribute *queue_attrs[] = {
&queue_nonrot_entry.attr,
&queue_zoned_entry.attr,
&queue_nr_zones_entry.attr,
+ &queue_max_open_zones_entry.attr,
+ &queue_max_active_zones_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
@@ -793,6 +815,11 @@ static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
(!q->mq_ops || !q->mq_ops->timeout))
return 0;
+ if ((attr == &queue_max_open_zones_entry.attr ||
+ attr == &queue_max_active_zones_entry.attr) &&
+ !blk_queue_is_zoned(q))
+ return 0;
+
return attr->mode;
}
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 23831fa8701d..6817a673e5ce 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -312,6 +312,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
return ret;
rep.nr_zones = ret;
+ rep.flags = BLK_ZONE_REP_CAPACITY;
if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
return -EFAULT;
return 0;
@@ -497,6 +498,9 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
if (WARN_ON_ONCE(!queue_is_mq(q)))
return -EIO;
+ if (!get_capacity(disk))
+ return -EIO;
+
/*
* Ensure that all memory allocations in this context are done as if
* GFP_NOIO was specified.