@@ -94,35 +94,6 @@ static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
9494 return credits ;
9595}
9696
97- /**
98- * drm_sched_can_queue -- Can we queue more to the hardware?
99- * @sched: scheduler instance
100- * @entity: the scheduler entity
101- *
102- * Return true if we can push at least one more job from @entity, false
103- * otherwise.
104- */
105- bool drm_sched_can_queue (struct drm_gpu_scheduler * sched ,
106- struct drm_sched_entity * entity )
107- {
108- struct drm_sched_job * s_job ;
109-
110- s_job = drm_sched_entity_queue_peek (entity );
111- if (!s_job )
112- return false;
113-
114- /* If a job exceeds the credit limit, truncate it to the credit limit
115- * itself to guarantee forward progress.
116- */
117- if (s_job -> credits > sched -> credit_limit ) {
118- dev_WARN (sched -> dev ,
119- "Jobs may not exceed the credit limit, truncate.\n" );
120- s_job -> credits = sched -> credit_limit ;
121- }
122-
123- return drm_sched_available_credits (sched ) >= s_job -> credits ;
124- }
125-
12697/**
12798 * drm_sched_run_job_queue - enqueue run-job work
12899 * @sched: scheduler instance
@@ -933,54 +904,72 @@ static void drm_sched_run_job_work(struct work_struct *w)
933904{
934905 struct drm_gpu_scheduler * sched =
935906 container_of (w , struct drm_gpu_scheduler , work_run_job );
907+ u32 job_credits , submitted_credits = 0 ;
936908 struct drm_sched_entity * entity ;
937- struct dma_fence * fence ;
938909 struct drm_sched_fence * s_fence ;
939910 struct drm_sched_job * sched_job ;
940- int r ;
911+ struct dma_fence * fence ;
941912
942- /* Find entity with a ready job */
943- entity = drm_sched_rq_select_entity (sched , sched -> rq );
944- if (IS_ERR_OR_NULL (entity ))
945- return ; /* No more work */
913+ while (!READ_ONCE (sched -> pause_submit )) {
914+ /* Find entity with a ready job */
915+ entity = drm_sched_rq_select_entity (sched , sched -> rq );
916+ if (!entity )
917+ break ; /* No more work */
946918
947- sched_job = drm_sched_entity_pop_job (entity );
948- if (!sched_job ) {
949- complete_all (& entity -> entity_idle );
950- drm_sched_run_job_queue (sched );
951- return ;
952- }
919+ /*
920+ * If a job exceeds the credit limit truncate it to guarantee
921+ * forward progress.
922+ */
923+ sched_job = drm_sched_entity_queue_peek (entity );
924+ job_credits = sched_job -> credits ;
925+ if (dev_WARN_ONCE (sched -> dev , job_credits > sched -> credit_limit ,
926+ "Jobs may not exceed the credit limit, truncating.\n" ))
927+ job_credits = sched_job -> credits = sched -> credit_limit ;
928+
929+ if (job_credits > drm_sched_available_credits (sched )) {
930+ complete_all (& entity -> entity_idle );
931+ break ;
932+ }
953933
954- s_fence = sched_job -> s_fence ;
934+ sched_job = drm_sched_entity_pop_job (entity );
935+ if (!sched_job ) {
936+ /* Top entity is not yet runnable after all */
937+ complete_all (& entity -> entity_idle );
938+ continue ;
939+ }
955940
956- atomic_add (sched_job -> credits , & sched -> credit_count );
957- drm_sched_job_begin (sched_job );
941+ s_fence = sched_job -> s_fence ;
942+ drm_sched_job_begin (sched_job );
943+ trace_drm_run_job (sched_job , entity );
944+ submitted_credits += job_credits ;
945+ atomic_add (job_credits , & sched -> credit_count );
958946
959- trace_drm_run_job (sched_job , entity );
960- /*
961- * The run_job() callback must by definition return a fence whose
962- * refcount has been incremented for the scheduler already.
963- */
964- fence = sched -> ops -> run_job (sched_job );
965- complete_all (& entity -> entity_idle );
966- drm_sched_fence_scheduled (s_fence , fence );
967-
968- if (!IS_ERR_OR_NULL (fence )) {
969- r = dma_fence_add_callback (fence , & sched_job -> cb ,
970- drm_sched_job_done_cb );
971- if (r == - ENOENT )
972- drm_sched_job_done (sched_job , fence -> error );
973- else if (r )
974- DRM_DEV_ERROR (sched -> dev , "fence add callback failed (%d)\n" , r );
947+ fence = sched -> ops -> run_job (sched_job );
948+ drm_sched_fence_scheduled (s_fence , fence );
975949
976- dma_fence_put (fence );
977- } else {
978- drm_sched_job_done (sched_job , IS_ERR (fence ) ?
979- PTR_ERR (fence ) : 0 );
950+ if (!IS_ERR_OR_NULL (fence )) {
951+ int r ;
952+
953+ /* Drop for original kref_init of the fence */
954+ dma_fence_put (fence );
955+
956+ r = dma_fence_add_callback (fence , & sched_job -> cb ,
957+ drm_sched_job_done_cb );
958+ if (r == - ENOENT )
959+ drm_sched_job_done (sched_job , fence -> error );
960+ else if (r )
961+ DRM_DEV_ERROR (sched -> dev ,
962+ "fence add callback failed (%d)\n" , r );
963+ } else {
964+ drm_sched_job_done (sched_job , IS_ERR (fence ) ?
965+ PTR_ERR (fence ) : 0 );
966+ }
967+
968+ complete_all (& entity -> entity_idle );
980969 }
981970
982- wake_up ( & sched -> job_scheduled );
983- drm_sched_run_job_queue ( sched );
971+ if ( submitted_credits )
972+ wake_up ( & sched -> job_scheduled );
984973}
985974
986975static struct workqueue_struct * drm_sched_alloc_wq (const char * name )
0 commit comments