@@ -438,20 +438,6 @@ void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
unregister_shrinker(&i915->mm.shrinker);
}
-void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
- struct mutex *mutex)
-{
- if (!IS_ENABLED(CONFIG_LOCKDEP))
- return;
-
- fs_reclaim_acquire(GFP_KERNEL);
-
- mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
- mutex_release(&mutex->dep_map, _RET_IP_);
-
- fs_reclaim_release(GFP_KERNEL);
-}
-
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
@@ -27,7 +27,5 @@ unsigned long i915_gem_shrink(struct i915_gem_ww_ctx *ww,
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_driver_register__shrinker(struct drm_i915_private *i915);
void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915);
-void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
- struct mutex *mutex);
#endif /* __I915_GEM_SHRINKER_H__ */
@@ -156,7 +156,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
lockdep_set_subclass(&vm->mutex, subclass);
if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
- i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
+ fs_reclaim_taints_mutex(&vm->mutex);
} else {
/*
* CHV + BXT VTD workaround use stop_machine(),
@@ -1405,7 +1405,7 @@ void intel_gt_init_reset(struct intel_gt *gt)
* within the shrinker, we forbid ourselves from performing any
* fs-reclaim or taking related locks during reset.
*/
- i915_gem_shrinker_taints_mutex(gt->i915, >->reset.mutex);
+ fs_reclaim_taints_mutex(>->reset.mutex);
/* no GPU until we are ready! */
__set_bit(I915_WEDGED, >->reset.flags);
@@ -114,3 +114,16 @@ void set_timer_ms(struct timer_list *t, unsigned long timeout)
/* Keep t->expires = 0 reserved to indicate a canceled timer. */
mod_timer(t, jiffies + timeout ?: 1);
}
+
+void fs_reclaim_taints_mutex(struct mutex *mutex)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+
+ mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
+ mutex_release(&mutex->dep_map, _RET_IP_);
+
+ fs_reclaim_release(GFP_KERNEL);
+}
@@ -266,6 +266,8 @@ static inline int list_is_last_rcu(const struct list_head *list,
return READ_ONCE(list->next) == head;
}
+void fs_reclaim_taints_mutex(struct mutex *mutex);
+
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
unsigned long j = msecs_to_jiffies(m);