@@ -490,6 +490,42 @@ bool tcmur_handler_is_passthrough_only(struct tcmur_handler *rhandler)
490
490
return true;
491
491
}
492
492
493
+ int tcmur_cmd_passthrough_handler (struct tcmu_device * dev ,
494
+ struct tcmulib_cmd * cmd )
495
+ {
496
+ int ret ;
497
+ struct tcmulib_handler * handler = tcmu_get_dev_handler (dev );
498
+ struct tcmur_handler * rhandler = handler -> hm_private ;
499
+ int wakeup ;
500
+
501
+ /*
502
+ * TCMU_NOT_HANDLED is returned when a tcmur passthrough handler
503
+ * does not handle any command.
504
+ */
505
+ if (!rhandler -> handle_cmd );
506
+ return TCMU_NOT_HANDLED ;
507
+
508
+ /*
509
+ * This could be omitting here, but with it could speed up the
510
+ * passthrough cmds handling without the aio pop/push routines.
511
+ */
512
+ if (!rhandler -> nr_threads )
513
+ return rhandler -> handle_cmd (dev , cmd );
514
+ /*
515
+ * Since we call ->handle_cmd via async_handle_cmd(), ->handle_cmd
516
+ * can finish in the callers context(asynchronous handler) or work
517
+ * queue context (synchronous handlers), thus we'd need to check if
518
+ * ->handle_cmd handled the passthough command here as well as in
519
+ * handle_passthrough_cbk().
520
+ */
521
+ track_aio_request_start (dev );
522
+ ret = handle_passthrough (dev , cmd );
523
+ if (ret != TCMU_ASYNC_HANDLED )
524
+ track_aio_request_finish (dev , & wakeup );
525
+
526
+ return ret ;
527
+ }
528
+
493
529
int tcmur_cmd_handler (struct tcmu_device * dev , struct tcmulib_cmd * cmd )
494
530
{
495
531
int ret = TCMU_NOT_HANDLED ;
@@ -499,48 +535,36 @@ int tcmur_cmd_handler(struct tcmu_device *dev, struct tcmulib_cmd *cmd)
499
535
500
536
track_aio_request_start (dev );
501
537
502
- if (tcmur_handler_is_passthrough_only (rhandler ))
503
- goto passthrough ;
504
-
505
538
switch (cdb [0 ]) {
506
539
case READ_6 :
507
540
case READ_10 :
508
541
case READ_12 :
509
542
case READ_16 :
510
543
ret = handle_read (dev , cmd );
511
- goto done ;
544
+ break ;
512
545
case WRITE_6 :
513
546
case WRITE_10 :
514
547
case WRITE_12 :
515
548
case WRITE_16 :
516
549
ret = handle_write (dev , cmd );
517
- goto done ;
550
+ break ;
518
551
case SYNCHRONIZE_CACHE :
519
552
case SYNCHRONIZE_CACHE_16 :
520
- if (!rhandler -> flush )
521
- goto done ;
522
- ret = handle_flush (dev , cmd );
523
- goto done ;
553
+ if (rhandler -> flush )
554
+ ret = handle_flush (dev , cmd );
555
+ break ;
524
556
case COMPARE_AND_WRITE :
525
557
ret = handle_caw (dev , cmd );
526
- goto done ;
558
+ break ;
527
559
case WRITE_VERIFY :
528
560
ret = handle_write_verify (dev , cmd );
529
- goto done ;
561
+ break ;
562
+ default :
563
+ /* Try to passthrough the default cmds */
564
+ if (rhandler -> handle_cmd )
565
+ ret = handle_passthrough (dev , cmd );
530
566
}
531
567
532
- passthrough :
533
- /*
534
- * note that TCMU_NOT_HANDLED is returned when a tcmur handler does not
535
- * handle a passthrough command, but since we call ->handle_cmd via
536
- * async_handle_cmd(), ->handle_cmd can finish in the callers context
537
- * (asynchronous handler) or work queue context (synchronous handlers),
538
- * thus we'd need to check if ->handle_cmd handled the passthough
539
- * command here as well as in handle_passthrough_cbk().
540
- */
541
- if (rhandler -> handle_cmd )
542
- ret = handle_passthrough (dev , cmd );
543
- done :
544
568
if (ret != TCMU_ASYNC_HANDLED )
545
569
track_aio_request_finish (dev , NULL );
546
570
return ret ;
0 commit comments