@@ -1989,6 +1989,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
1989
1989
{
1990
1990
struct amdgpu_device * adev = ring -> adev ;
1991
1991
u32 idx ;
1992
+ bool sched_work = false;
1992
1993
1993
1994
if (!adev -> gfx .enable_cleaner_shader )
1994
1995
return ;
@@ -2007,15 +2008,19 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
2007
2008
mutex_lock (& adev -> enforce_isolation_mutex );
2008
2009
if (adev -> enforce_isolation [idx ]) {
2009
2010
if (adev -> kfd .init_complete )
2010
- amdgpu_gfx_kfd_sch_ctrl ( adev , idx , false) ;
2011
+ sched_work = true ;
2011
2012
}
2012
2013
mutex_unlock (& adev -> enforce_isolation_mutex );
2014
+
2015
+ if (sched_work )
2016
+ amdgpu_gfx_kfd_sch_ctrl (adev , idx , false);
2013
2017
}
2014
2018
2015
2019
void amdgpu_gfx_enforce_isolation_ring_end_use (struct amdgpu_ring * ring )
2016
2020
{
2017
2021
struct amdgpu_device * adev = ring -> adev ;
2018
2022
u32 idx ;
2023
+ bool sched_work = false;
2019
2024
2020
2025
if (!adev -> gfx .enable_cleaner_shader )
2021
2026
return ;
@@ -2031,9 +2036,12 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
2031
2036
mutex_lock (& adev -> enforce_isolation_mutex );
2032
2037
if (adev -> enforce_isolation [idx ]) {
2033
2038
if (adev -> kfd .init_complete )
2034
- amdgpu_gfx_kfd_sch_ctrl ( adev , idx , true) ;
2039
+ sched_work = true;
2035
2040
}
2036
2041
mutex_unlock (& adev -> enforce_isolation_mutex );
2042
+
2043
+ if (sched_work )
2044
+ amdgpu_gfx_kfd_sch_ctrl (adev , idx , true);
2037
2045
}
2038
2046
2039
2047
/*
0 commit comments