Skip to content

Instantly share code, notes, and snippets.

@anarazel
Created March 3, 2021 18:27
Show Gist options
  • Save anarazel/a18e55d803f0b8cdc63586207eafb124 to your computer and use it in GitHub Desktop.
Save anarazel/a18e55d803f0b8cdc63586207eafb124 to your computer and use it in GitHub Desktop.
commit 9fe21de5134c4321667eb2aa03f3f3d0d0524ee7
Author: Andres Freund <andres@anarazel.de>
Date: 2020-12-28 11:46:36 -0800
WIP: block: Allow disabling fua support via sysfs.
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 76c1624cb06c..914f8748fa20 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -105,7 +105,7 @@ static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
if (fflags & (1UL << QUEUE_FLAG_WC)) {
if (rq->cmd_flags & REQ_PREFLUSH)
policy |= REQ_FSEQ_PREFLUSH;
- if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
+ if (!(fflags & (1UL << QUEUE_FLAG_FUA_ENABLED)) &&
(rq->cmd_flags & REQ_FUA))
policy |= REQ_FSEQ_POSTFLUSH;
}
@@ -379,7 +379,7 @@ void blk_insert_flush(struct request *rq)
* REQ_PREFLUSH and FUA for the driver.
*/
rq->cmd_flags &= ~REQ_PREFLUSH;
- if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
+ if (!(fflags & (1UL << QUEUE_FLAG_FUA_ENABLED)))
rq->cmd_flags &= ~REQ_FUA;
/*
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 4de03da9a624..c843e240d4f7 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -119,7 +119,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(STABLE_WRITES),
QUEUE_FLAG_NAME(POLL),
QUEUE_FLAG_NAME(WC),
- QUEUE_FLAG_NAME(FUA),
+ QUEUE_FLAG_NAME(FUA_HW),
QUEUE_FLAG_NAME(DAX),
QUEUE_FLAG_NAME(STATS),
QUEUE_FLAG_NAME(POLL_STATS),
@@ -130,6 +130,7 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(ZONE_RESETALL),
QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
QUEUE_FLAG_NAME(NOWAIT),
+ QUEUE_FLAG_NAME(FUA_ENABLED),
};
#undef QUEUE_FLAG_NAME
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 43990b1d148b..8bf0c8a60300 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -786,10 +786,14 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
blk_queue_flag_set(QUEUE_FLAG_WC, q);
else
blk_queue_flag_clear(QUEUE_FLAG_WC, q);
- if (fua)
- blk_queue_flag_set(QUEUE_FLAG_FUA, q);
- else
- blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
+ if (fua) {
+ blk_queue_flag_set(QUEUE_FLAG_FUA_HW, q);
+ blk_queue_flag_set(QUEUE_FLAG_FUA_ENABLED, q);
+ }
+ else {
+ blk_queue_flag_clear(QUEUE_FLAG_FUA_HW, q);
+ blk_queue_flag_clear(QUEUE_FLAG_FUA_ENABLED, q);
+ }
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index b513f1683af0..e9562ed6ac37 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -540,7 +540,31 @@ static ssize_t queue_wc_store(struct request_queue *q, const char *page,
static ssize_t queue_fua_show(struct request_queue *q, char *page)
{
- return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags));
+ return sprintf(page, "%u\n",
+ test_bit(QUEUE_FLAG_FUA_ENABLED, &q->queue_flags));
+}
+
+static ssize_t queue_fua_store(struct request_queue *q, const char *page, size_t count)
+{
+ int ret;
+ unsigned long set_fua_to;
+
+ ret = queue_var_store(&set_fua_to, page, count);
+ if (ret < 0)
+ return ret;
+
+ if (set_fua_to != 0 && set_fua_to != 1)
+ return -EINVAL;
+
+ if (set_fua_to && !test_bit(QUEUE_FLAG_FUA_HW, &q->queue_flags))
+ return -ENOTSUPP;
+
+ if (set_fua_to)
+ blk_queue_flag_set(QUEUE_FLAG_FUA_ENABLED, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_FUA_ENABLED, q);
+
+ return count;
}
static ssize_t queue_dax_show(struct request_queue *q, char *page)
@@ -596,7 +620,7 @@ QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity");
QUEUE_RW_ENTRY(queue_poll, "io_poll");
QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay");
QUEUE_RW_ENTRY(queue_wc, "write_cache");
-QUEUE_RO_ENTRY(queue_fua, "fua");
+QUEUE_RW_ENTRY(queue_fua, "fua");
QUEUE_RO_ENTRY(queue_dax, "dax");
QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout");
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4acf2342f7ad..9c8f2720bff5 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1832,7 +1832,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
- if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
+ if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA_HW)))
fua = true;
}
blk_queue_write_cache(q, wc, fua);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 8ed93fd205c7..ae0fd98f6879 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -700,7 +700,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
* is not enabled, or if initiator set the Force Unit Access bit.
*/
op = REQ_OP_WRITE;
- if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
+ if (test_bit(QUEUE_FLAG_FUA_ENABLED, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA)
op_flags = REQ_FUA;
else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f94ee3089e01..6d30e193fd14 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -609,7 +609,7 @@ struct request_queue {
#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
#define QUEUE_FLAG_WC 17 /* Write back caching */
-#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
+#define QUEUE_FLAG_FUA_HW 18 /* device supports FUA writes */
#define QUEUE_FLAG_DAX 19 /* device supports DAX */
#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */
@@ -621,6 +621,7 @@ struct request_queue {
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
+#define QUEUE_FLAG_FUA_ENABLED 30 /* FUA writes are enabled */
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
@@ -664,7 +665,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
-#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
+#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA_ENABLED, &(q)->queue_flags)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment