@@ -130,19 +130,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
130130}
131131
132132static void increment_queue_count (struct device_queue_manager * dqm ,
133- enum kfd_queue_type type )
133+ struct qcm_process_device * qpd ,
134+ struct queue * q )
134135{
135136 dqm -> active_queue_count ++ ;
136- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ )
137+ if (q -> properties .type == KFD_QUEUE_TYPE_COMPUTE ||
138+ q -> properties .type == KFD_QUEUE_TYPE_DIQ )
137139 dqm -> active_cp_queue_count ++ ;
140+
141+ if (q -> properties .is_gws ) {
142+ dqm -> gws_queue_count ++ ;
143+ qpd -> mapped_gws_queue = true;
144+ }
138145}
139146
140147static void decrement_queue_count (struct device_queue_manager * dqm ,
141- enum kfd_queue_type type )
148+ struct qcm_process_device * qpd ,
149+ struct queue * q )
142150{
143151 dqm -> active_queue_count -- ;
144- if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ )
152+ if (q -> properties .type == KFD_QUEUE_TYPE_COMPUTE ||
153+ q -> properties .type == KFD_QUEUE_TYPE_DIQ )
145154 dqm -> active_cp_queue_count -- ;
155+
156+ if (q -> properties .is_gws ) {
157+ dqm -> gws_queue_count -- ;
158+ qpd -> mapped_gws_queue = false;
159+ }
146160}
147161
148162/*
@@ -412,7 +426,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
412426 list_add (& q -> list , & qpd -> queues_list );
413427 qpd -> queue_count ++ ;
414428 if (q -> properties .is_active )
415- increment_queue_count (dqm , q -> properties . type );
429+ increment_queue_count (dqm , qpd , q );
416430
417431 /*
418432 * Unconditionally increment this counter, regardless of the queue's
@@ -601,13 +615,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
601615 deallocate_vmid (dqm , qpd , q );
602616 }
603617 qpd -> queue_count -- ;
604- if (q -> properties .is_active ) {
605- decrement_queue_count (dqm , q -> properties .type );
606- if (q -> properties .is_gws ) {
607- dqm -> gws_queue_count -- ;
608- qpd -> mapped_gws_queue = false;
609- }
610- }
618+ if (q -> properties .is_active )
619+ decrement_queue_count (dqm , qpd , q );
611620
612621 return retval ;
613622}
@@ -700,12 +709,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
700709 * dqm->active_queue_count to determine whether a new runlist must be
701710 * uploaded.
702711 */
703- if (q -> properties .is_active && !prev_active )
704- increment_queue_count (dqm , q -> properties .type );
705- else if (!q -> properties .is_active && prev_active )
706- decrement_queue_count (dqm , q -> properties .type );
707-
708- if (q -> gws && !q -> properties .is_gws ) {
712+ if (q -> properties .is_active && !prev_active ) {
713+ increment_queue_count (dqm , & pdd -> qpd , q );
714+ } else if (!q -> properties .is_active && prev_active ) {
715+ decrement_queue_count (dqm , & pdd -> qpd , q );
716+ } else if (q -> gws && !q -> properties .is_gws ) {
709717 if (q -> properties .is_active ) {
710718 dqm -> gws_queue_count ++ ;
711719 pdd -> qpd .mapped_gws_queue = true;
@@ -767,11 +775,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
767775 mqd_mgr = dqm -> mqd_mgrs [get_mqd_type_from_queue_type (
768776 q -> properties .type )];
769777 q -> properties .is_active = false;
770- decrement_queue_count (dqm , q -> properties .type );
771- if (q -> properties .is_gws ) {
772- dqm -> gws_queue_count -- ;
773- qpd -> mapped_gws_queue = false;
774- }
778+ decrement_queue_count (dqm , qpd , q );
775779
776780 if (WARN_ONCE (!dqm -> sched_running , "Evict when stopped\n" ))
777781 continue ;
@@ -817,7 +821,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
817821 continue ;
818822
819823 q -> properties .is_active = false;
820- decrement_queue_count (dqm , q -> properties . type );
824+ decrement_queue_count (dqm , qpd , q );
821825 }
822826 pdd -> last_evict_timestamp = get_jiffies_64 ();
823827 retval = execute_queues_cpsch (dqm ,
@@ -888,11 +892,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
888892 mqd_mgr = dqm -> mqd_mgrs [get_mqd_type_from_queue_type (
889893 q -> properties .type )];
890894 q -> properties .is_active = true;
891- increment_queue_count (dqm , q -> properties .type );
892- if (q -> properties .is_gws ) {
893- dqm -> gws_queue_count ++ ;
894- qpd -> mapped_gws_queue = true;
895- }
895+ increment_queue_count (dqm , qpd , q );
896896
897897 if (WARN_ONCE (!dqm -> sched_running , "Restore when stopped\n" ))
898898 continue ;
@@ -950,7 +950,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
950950 continue ;
951951
952952 q -> properties .is_active = true;
953- increment_queue_count (dqm , q -> properties . type );
953+ increment_queue_count (dqm , & pdd -> qpd , q );
954954 }
955955 retval = execute_queues_cpsch (dqm ,
956956 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES , 0 );
@@ -1378,7 +1378,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
13781378 dqm -> total_queue_count );
13791379
13801380 list_add (& kq -> list , & qpd -> priv_queue_list );
1381- increment_queue_count (dqm , kq -> queue -> properties . type );
1381+ increment_queue_count (dqm , qpd , kq -> queue );
13821382 qpd -> is_debug = true;
13831383 execute_queues_cpsch (dqm , KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES , 0 );
13841384 dqm_unlock (dqm );
@@ -1392,7 +1392,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
13921392{
13931393 dqm_lock (dqm );
13941394 list_del (& kq -> list );
1395- decrement_queue_count (dqm , kq -> queue -> properties . type );
1395+ decrement_queue_count (dqm , qpd , kq -> queue );
13961396 qpd -> is_debug = false;
13971397 execute_queues_cpsch (dqm , KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES , 0 );
13981398 /*
@@ -1467,7 +1467,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
14671467 qpd -> queue_count ++ ;
14681468
14691469 if (q -> properties .is_active ) {
1470- increment_queue_count (dqm , q -> properties . type );
1470+ increment_queue_count (dqm , qpd , q );
14711471
14721472 execute_queues_cpsch (dqm ,
14731473 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES , 0 );
@@ -1683,15 +1683,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
16831683 list_del (& q -> list );
16841684 qpd -> queue_count -- ;
16851685 if (q -> properties .is_active ) {
1686- decrement_queue_count (dqm , q -> properties . type );
1686+ decrement_queue_count (dqm , qpd , q );
16871687 retval = execute_queues_cpsch (dqm ,
16881688 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES , 0 );
16891689 if (retval == - ETIME )
16901690 qpd -> reset_wavefronts = true;
1691- if (q -> properties .is_gws ) {
1692- dqm -> gws_queue_count -- ;
1693- qpd -> mapped_gws_queue = false;
1694- }
16951691 }
16961692
16971693 /*
@@ -1932,7 +1928,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
19321928 /* Clean all kernel queues */
19331929 list_for_each_entry_safe (kq , kq_next , & qpd -> priv_queue_list , list ) {
19341930 list_del (& kq -> list );
1935- decrement_queue_count (dqm , kq -> queue -> properties . type );
1931+ decrement_queue_count (dqm , qpd , kq -> queue );
19361932 qpd -> is_debug = false;
19371933 dqm -> total_queue_count -- ;
19381934 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES ;
@@ -1945,13 +1941,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
19451941 else if (q -> properties .type == KFD_QUEUE_TYPE_SDMA_XGMI )
19461942 deallocate_sdma_queue (dqm , q );
19471943
1948- if (q -> properties .is_active ) {
1949- decrement_queue_count (dqm , q -> properties .type );
1950- if (q -> properties .is_gws ) {
1951- dqm -> gws_queue_count -- ;
1952- qpd -> mapped_gws_queue = false;
1953- }
1954- }
1944+ if (q -> properties .is_active )
1945+ decrement_queue_count (dqm , qpd , q );
19551946
19561947 dqm -> total_queue_count -- ;
19571948 }
0 commit comments