46
46
#include <linux/task_io_accounting_ops.h>
47
47
#include <linux/workqueue.h>
48
48
#include <linux/blk-mq.h>
49
+ #include <linux/blk_types.h> /* for 'enum req_opf' */
49
50
50
51
static void zvol_request_impl (zvol_state_t * zv , struct bio * bio ,
51
52
struct request * rq , boolean_t force_sync );
@@ -337,16 +338,14 @@ zvol_discard(zv_request_t *zvr)
337
338
}
338
339
339
340
/*
340
- * Align the request to volume block boundaries when a secure erase is
341
- * not required. This will prevent dnode_free_range() from zeroing out
342
- * the unaligned parts which is slow (read-modify-write) and useless
343
- * since we are not freeing any space by doing so.
341
+ * Align the request to volume block boundaries. This will prevent
342
+ * dnode_free_range() from zeroing out the unaligned parts which is
343
+ * slow (read-modify-write) and useless since we are not freeing any
344
+ * space by doing so.
344
345
*/
345
- if (!io_is_secure_erase (bio , rq )) {
346
- start = P2ROUNDUP (start , zv -> zv_volblocksize );
347
- end = P2ALIGN_TYPED (end , zv -> zv_volblocksize , uint64_t );
348
- size = end - start ;
349
- }
346
+ start = P2ROUNDUP (start , zv -> zv_volblocksize );
347
+ end = P2ALIGN_TYPED (end , zv -> zv_volblocksize , uint64_t );
348
+ size = end - start ;
350
349
351
350
if (start >= end )
352
351
goto unlock ;
@@ -467,6 +466,24 @@ zvol_read_task(void *arg)
467
466
zv_request_task_free (task );
468
467
}
469
468
469
+ /*
470
+ * Note:
471
+ *
472
+ * The kernel uses different enum names for the IO opcode, depending on the
473
+ * kernel version ('req_opf', 'req_op'). To sidestep this, use macros rather
474
+ * than inline functions for these checks.
475
+ */
476
+ /* Should this IO go down the zvol write path? */
477
+ #define ZVOL_OP_IS_WRITE (op ) \
478
+ (op == REQ_OP_WRITE || \
479
+ op == REQ_OP_FLUSH || \
480
+ op == REQ_OP_DISCARD)
481
+
482
+ /* Is this IO type supported by zvols? */
483
+ #define ZVOL_OP_IS_SUPPORTED (op ) (op == REQ_OP_READ || ZVOL_OP_IS_WRITE(op))
484
+
485
+ /* Get the IO opcode */
486
+ #define ZVOL_OP (bio , rq ) (bio != NULL ? bio_op(bio) : req_op(rq))
470
487
471
488
/*
472
489
* Process a BIO or request
@@ -486,27 +503,30 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
486
503
uint64_t size = io_size (bio , rq );
487
504
int rw ;
488
505
489
- if (rq != NULL ) {
490
- /*
491
- * Flush & trim requests go down the zvol_write codepath. Or
492
- * more specifically:
493
- *
494
- * If request is a write, or if it's op_is_sync() and not a
495
- * read, or if it's a flush, or if it's a discard, then send the
496
- * request down the write path.
497
- */
498
- if (op_is_write (rq -> cmd_flags ) ||
499
- (op_is_sync (rq -> cmd_flags ) && req_op (rq ) != REQ_OP_READ ) ||
500
- req_op (rq ) == REQ_OP_FLUSH ||
501
- op_is_discard (rq -> cmd_flags )) {
502
- rw = WRITE ;
503
- } else {
504
- rw = READ ;
505
- }
506
+ if (unlikely (!ZVOL_OP_IS_SUPPORTED (ZVOL_OP (bio , rq )))) {
507
+ zfs_dbgmsg ("Unsupported zvol IO, op=%d, cmd_flags=0x%x" ,
508
+ ZVOL_OP (bio , rq ), rq -> cmd_flags );
509
+ ASSERT (ZVOL_OP_IS_SUPPORTED (ZVOL_OP (bio , rq )));
510
+ zvol_end_io (bio , rq , SET_ERROR (ENOTSUPP ));
511
+ goto out ;
512
+ }
513
+
514
+ if (ZVOL_OP_IS_WRITE (ZVOL_OP (bio , rq ))) {
515
+ rw = WRITE ;
506
516
} else {
507
- rw = bio_data_dir ( bio ) ;
517
+ rw = READ ;
508
518
}
509
519
520
+ /*
521
+ * Sanity check
522
+ *
523
+ * If we're a BIO, check our rw matches the kernel's
524
+ * bio_data_dir(bio) rw. We need to check because we support fewer
525
+ * IO operations, and want to verify that what we think are reads and
526
+ * writes from those operations match what the kernel thinks.
527
+ */
528
+ ASSERT (rq != NULL || rw == bio_data_dir (bio ));
529
+
510
530
if (unlikely (zv -> zv_flags & ZVOL_REMOVING )) {
511
531
zvol_end_io (bio , rq , SET_ERROR (ENXIO ));
512
532
goto out ;
@@ -610,7 +630,7 @@ zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
610
630
* interfaces lack this functionality (they block waiting for
611
631
* the i/o to complete).
612
632
*/
613
- if (io_is_discard (bio , rq ) || io_is_secure_erase ( bio , rq ) ) {
633
+ if (io_is_discard (bio , rq )) {
614
634
if (force_sync ) {
615
635
zvol_discard (& zvr );
616
636
} else {
0 commit comments