Skip to content

Commit 4424f86

Browse files
committed
zvol: Fix blk-mq sync
The zvol blk-mq codepaths would erroneously send FLUSH and TRIM commands down the read codepath, rather than write. This fixes the issue, and updates the zvol_misc_fua test to verify that sync writes are actually happening. Reviewed-by: Brian Behlendorf <[email protected]> Reviewed-by: Alexander Motin <[email protected]> Reviewed-by: Ameer Hamza <[email protected]> Signed-off-by: Tony Hutter <[email protected]> Closes openzfs#17761 Closes openzfs#17765
1 parent e2642ed commit 4424f86

File tree

3 files changed

+61
-20
lines changed

3 files changed

+61
-20
lines changed

include/os/linux/kernel/linux/blkdev_compat.h

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -542,24 +542,6 @@ blk_generic_alloc_queue(make_request_fn make_request, int node_id)
542542
}
543543
#endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
544544

545-
/*
546-
* All the io_*() helper functions below can operate on a bio, or a rq, but
547-
* not both. The older submit_bio() codepath will pass a bio, and the
548-
* newer blk-mq codepath will pass a rq.
549-
*/
550-
static inline int
551-
io_data_dir(struct bio *bio, struct request *rq)
552-
{
553-
if (rq != NULL) {
554-
if (op_is_write(req_op(rq))) {
555-
return (WRITE);
556-
} else {
557-
return (READ);
558-
}
559-
}
560-
return (bio_data_dir(bio));
561-
}
562-
563545
static inline int
564546
io_is_flush(struct bio *bio, struct request *rq)
565547
{

module/os/linux/zfs/zvol_os.c

Lines changed: 22 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -523,7 +523,28 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
523523
fstrans_cookie_t cookie = spl_fstrans_mark();
524524
uint64_t offset = io_offset(bio, rq);
525525
uint64_t size = io_size(bio, rq);
526-
int rw = io_data_dir(bio, rq);
526+
int rw;
527+
528+
if (rq != NULL) {
529+
/*
530+
* Flush & trim requests go down the zvol_write codepath. Or
531+
* more specifically:
532+
*
533+
* If request is a write, or if it's op_is_sync() and not a
534+
* read, or if it's a flush, or if it's a discard, then send the
535+
* request down the write path.
536+
*/
537+
if (op_is_write(rq->cmd_flags) ||
538+
(op_is_sync(rq->cmd_flags) && req_op(rq) != REQ_OP_READ) ||
539+
req_op(rq) == REQ_OP_FLUSH ||
540+
op_is_discard(rq->cmd_flags)) {
541+
rw = WRITE;
542+
} else {
543+
rw = READ;
544+
}
545+
} else {
546+
rw = bio_data_dir(bio);
547+
}
527548

528549
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
529550
zvol_end_io(bio, rq, -SET_ERROR(ENXIO));

tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_fua.ksh

Lines changed: 39 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,17 +50,53 @@ fi
5050

5151
typeset datafile1="$(mktemp -t zvol_misc_fua1.XXXXXX)"
5252
typeset datafile2="$(mktemp -t zvol_misc_fua2.XXXXXX)"
53+
typeset datafile3="$(mktemp -t zvol_misc_fua3_log.XXXXXX)"
5354
typeset zvolpath=${ZVOL_DEVDIR}/$TESTPOOL/$TESTVOL
5455

56+
typeset DISK1=${DISKS%% *}
5557
function cleanup
5658
{
57-
rm "$datafile1" "$datafile2"
59+
log_must zpool remove $TESTPOOL $datafile3
60+
rm "$datafile1" "$datafile2" "$datafile2"
61+
}
62+
63+
# Prints the total number of sync writes for a vdev
64+
# $1: vdev
65+
function get_sync
66+
{
67+
zpool iostat -p -H -v -r $TESTPOOL $1 | \
68+
awk '/[0-9]+$/{s+=$4+$5} END{print s}'
5869
}
5970

6071
function do_test {
6172
# Wait for udev to create symlinks to our zvol
6273
block_device_wait $zvolpath
6374

75+
# Write using sync (creates FLUSH calls after writes, but not FUA)
76+
old_vdev_writes=$(get_sync $DISK1)
77+
old_log_writes=$(get_sync $datafile3)
78+
79+
log_must fio --name=write_iops --size=5M \
80+
--ioengine=libaio --verify=0 --bs=4K \
81+
--iodepth=1 --rw=randwrite --group_reporting=1 \
82+
--filename=$zvolpath --sync=1
83+
84+
vdev_writes=$(( $(get_sync $DISK1) - $old_vdev_writes))
85+
log_writes=$(( $(get_sync $datafile3) - $old_log_writes))
86+
87+
# When we're doing sync writes, we should see many more writes go to
88+
# the log vs the first vdev. Experiments show anywhere from a 160-320x
89+
# ratio of writes to the log vs the first vdev (due to some straggler
90+
# writes to the first vdev).
91+
#
92+
# Check that we have a large ratio (100x) of sync writes going to the
93+
# log device
94+
ratio=$(($log_writes / $vdev_writes))
95+
log_note "Got $log_writes log writes, $vdev_writes vdev writes."
96+
if [ $ratio -lt 100 ] ; then
97+
log_fail "Expected > 100x more log writes than vdev writes. "
98+
fi
99+
64100
# Create a data file
65101
log_must dd if=/dev/urandom of="$datafile1" bs=1M count=5
66102

@@ -81,6 +117,8 @@ log_assert "Verify that a ZFS volume can do Force Unit Access (FUA)"
81117
log_onexit cleanup
82118

83119
log_must zfs set compression=off $TESTPOOL/$TESTVOL
120+
log_must truncate -s 100M $datafile3
121+
log_must zpool add $TESTPOOL log $datafile3
84122

85123
log_note "Testing without blk-mq"
86124

0 commit comments

Comments
 (0)