]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/gpu/drm/msm/msm_gem_shrinker.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / drivers / gpu / drm / msm / msm_gem_shrinker.c
1 /*
2  * Copyright (C) 2016 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include "msm_drv.h"
19 #include "msm_gem.h"
20
21 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
22 {
23         if (!mutex_is_locked(mutex))
24                 return false;
25
26 #if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
27         return mutex->owner == task;
28 #else
29         /* Since UP may be pre-empted, we cannot assume that we own the lock */
30         return false;
31 #endif
32 }
33
34 static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
35 {
36         if (!mutex_trylock(&dev->struct_mutex)) {
37                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
38                         return false;
39                 *unlock = false;
40         } else {
41                 *unlock = true;
42         }
43
44         return true;
45 }
46
47
48 static unsigned long
49 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
50 {
51         struct msm_drm_private *priv =
52                 container_of(shrinker, struct msm_drm_private, shrinker);
53         struct drm_device *dev = priv->dev;
54         struct msm_gem_object *msm_obj;
55         unsigned long count = 0;
56         bool unlock;
57
58         if (!msm_gem_shrinker_lock(dev, &unlock))
59                 return 0;
60
61         list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
62                 if (is_purgeable(msm_obj))
63                         count += msm_obj->base.size >> PAGE_SHIFT;
64         }
65
66         if (unlock)
67                 mutex_unlock(&dev->struct_mutex);
68
69         return count;
70 }
71
72 static unsigned long
73 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
74 {
75         struct msm_drm_private *priv =
76                 container_of(shrinker, struct msm_drm_private, shrinker);
77         struct drm_device *dev = priv->dev;
78         struct msm_gem_object *msm_obj;
79         unsigned long freed = 0;
80         bool unlock;
81
82         if (!msm_gem_shrinker_lock(dev, &unlock))
83                 return SHRINK_STOP;
84
85         list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
86                 if (freed >= sc->nr_to_scan)
87                         break;
88                 if (is_purgeable(msm_obj)) {
89                         msm_gem_purge(&msm_obj->base);
90                         freed += msm_obj->base.size >> PAGE_SHIFT;
91                 }
92         }
93
94         if (unlock)
95                 mutex_unlock(&dev->struct_mutex);
96
97         if (freed > 0)
98                 pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
99
100         return freed;
101 }
102
103 static int
104 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
105 {
106         struct msm_drm_private *priv =
107                 container_of(nb, struct msm_drm_private, vmap_notifier);
108         struct drm_device *dev = priv->dev;
109         struct msm_gem_object *msm_obj;
110         unsigned unmapped = 0;
111         bool unlock;
112
113         if (!msm_gem_shrinker_lock(dev, &unlock))
114                 return NOTIFY_DONE;
115
116         list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
117                 if (is_vunmapable(msm_obj)) {
118                         msm_gem_vunmap(&msm_obj->base);
119                         /* since we don't know any better, lets bail after a few
120                          * and if necessary the shrinker will be invoked again.
121                          * Seems better than unmapping *everything*
122                          */
123                         if (++unmapped >= 15)
124                                 break;
125                 }
126         }
127
128         if (unlock)
129                 mutex_unlock(&dev->struct_mutex);
130
131         *(unsigned long *)ptr += unmapped;
132
133         if (unmapped > 0)
134                 pr_info_ratelimited("Purging %u vmaps\n", unmapped);
135
136         return NOTIFY_DONE;
137 }
138
139 /**
140  * msm_gem_shrinker_init - Initialize msm shrinker
141  * @dev_priv: msm device
142  *
143  * This function registers and sets up the msm shrinker.
144  */
145 void msm_gem_shrinker_init(struct drm_device *dev)
146 {
147         struct msm_drm_private *priv = dev->dev_private;
148         priv->shrinker.count_objects = msm_gem_shrinker_count;
149         priv->shrinker.scan_objects = msm_gem_shrinker_scan;
150         priv->shrinker.seeks = DEFAULT_SEEKS;
151         WARN_ON(register_shrinker(&priv->shrinker));
152
153         priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
154         WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
155 }
156
157 /**
158  * msm_gem_shrinker_cleanup - Clean up msm shrinker
159  * @dev_priv: msm device
160  *
161  * This function unregisters the msm shrinker.
162  */
163 void msm_gem_shrinker_cleanup(struct drm_device *dev)
164 {
165         struct msm_drm_private *priv = dev->dev_private;
166
167         if (priv->shrinker.nr_deferred) {
168                 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
169                 unregister_shrinker(&priv->shrinker);
170         }
171 }