]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/gpu/drm/i915/intel_pm.c
drm/i915: update last_vblank when disabling the power well
[linux-imx.git] / drivers / gpu / drm / i915 / intel_pm.c
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *
26  */
27
28 #include <linux/cpufreq.h>
29 #include "i915_drv.h"
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33
34 #define FORCEWAKE_ACK_TIMEOUT_MS 2
35
36 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
37  * framebuffer contents in-memory, aiming at reducing the required bandwidth
38  * during in-memory transfers and, therefore, reduce the power packet.
39  *
40  * The benefits of FBC are mostly visible with solid backgrounds and
41  * variation-less patterns.
42  *
43  * FBC-related functionality can be enabled by the means of the
44  * i915.i915_enable_fbc parameter
45  */
46
47 static bool intel_crtc_active(struct drm_crtc *crtc)
48 {
49         /* Be paranoid as we can arrive here with only partial
50          * state retrieved from the hardware during setup.
51          */
52         return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
53 }
54
55 static void i8xx_disable_fbc(struct drm_device *dev)
56 {
57         struct drm_i915_private *dev_priv = dev->dev_private;
58         u32 fbc_ctl;
59
60         /* Disable compression */
61         fbc_ctl = I915_READ(FBC_CONTROL);
62         if ((fbc_ctl & FBC_CTL_EN) == 0)
63                 return;
64
65         fbc_ctl &= ~FBC_CTL_EN;
66         I915_WRITE(FBC_CONTROL, fbc_ctl);
67
68         /* Wait for compressing bit to clear */
69         if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
70                 DRM_DEBUG_KMS("FBC idle timed out\n");
71                 return;
72         }
73
74         DRM_DEBUG_KMS("disabled FBC\n");
75 }
76
77 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
78 {
79         struct drm_device *dev = crtc->dev;
80         struct drm_i915_private *dev_priv = dev->dev_private;
81         struct drm_framebuffer *fb = crtc->fb;
82         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
83         struct drm_i915_gem_object *obj = intel_fb->obj;
84         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
85         int cfb_pitch;
86         int plane, i;
87         u32 fbc_ctl, fbc_ctl2;
88
89         cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
90         if (fb->pitches[0] < cfb_pitch)
91                 cfb_pitch = fb->pitches[0];
92
93         /* FBC_CTL wants 64B units */
94         cfb_pitch = (cfb_pitch / 64) - 1;
95         plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
96
97         /* Clear old tags */
98         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
99                 I915_WRITE(FBC_TAG + (i * 4), 0);
100
101         /* Set it up... */
102         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
103         fbc_ctl2 |= plane;
104         I915_WRITE(FBC_CONTROL2, fbc_ctl2);
105         I915_WRITE(FBC_FENCE_OFF, crtc->y);
106
107         /* enable it... */
108         fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
109         if (IS_I945GM(dev))
110                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
111         fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
112         fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
113         fbc_ctl |= obj->fence_reg;
114         I915_WRITE(FBC_CONTROL, fbc_ctl);
115
116         DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c, ",
117                       cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
118 }
119
120 static bool i8xx_fbc_enabled(struct drm_device *dev)
121 {
122         struct drm_i915_private *dev_priv = dev->dev_private;
123
124         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
125 }
126
127 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
128 {
129         struct drm_device *dev = crtc->dev;
130         struct drm_i915_private *dev_priv = dev->dev_private;
131         struct drm_framebuffer *fb = crtc->fb;
132         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
133         struct drm_i915_gem_object *obj = intel_fb->obj;
134         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
135         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
136         unsigned long stall_watermark = 200;
137         u32 dpfc_ctl;
138
139         dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
140         dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
141         I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
142
143         I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
144                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
145                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
146         I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
147
148         /* enable it... */
149         I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
150
151         DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
152 }
153
154 static void g4x_disable_fbc(struct drm_device *dev)
155 {
156         struct drm_i915_private *dev_priv = dev->dev_private;
157         u32 dpfc_ctl;
158
159         /* Disable compression */
160         dpfc_ctl = I915_READ(DPFC_CONTROL);
161         if (dpfc_ctl & DPFC_CTL_EN) {
162                 dpfc_ctl &= ~DPFC_CTL_EN;
163                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
164
165                 DRM_DEBUG_KMS("disabled FBC\n");
166         }
167 }
168
169 static bool g4x_fbc_enabled(struct drm_device *dev)
170 {
171         struct drm_i915_private *dev_priv = dev->dev_private;
172
173         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
174 }
175
176 static void sandybridge_blit_fbc_update(struct drm_device *dev)
177 {
178         struct drm_i915_private *dev_priv = dev->dev_private;
179         u32 blt_ecoskpd;
180
181         /* Make sure blitter notifies FBC of writes */
182         gen6_gt_force_wake_get(dev_priv);
183         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
184         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
185                 GEN6_BLITTER_LOCK_SHIFT;
186         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
187         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
188         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
189         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
190                          GEN6_BLITTER_LOCK_SHIFT);
191         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
192         POSTING_READ(GEN6_BLITTER_ECOSKPD);
193         gen6_gt_force_wake_put(dev_priv);
194 }
195
196 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
197 {
198         struct drm_device *dev = crtc->dev;
199         struct drm_i915_private *dev_priv = dev->dev_private;
200         struct drm_framebuffer *fb = crtc->fb;
201         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
202         struct drm_i915_gem_object *obj = intel_fb->obj;
203         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
204         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
205         unsigned long stall_watermark = 200;
206         u32 dpfc_ctl;
207
208         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
209         dpfc_ctl &= DPFC_RESERVED;
210         dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
211         /* Set persistent mode for front-buffer rendering, ala X. */
212         dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
213         dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
214         I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
215
216         I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
217                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
218                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
219         I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
220         I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
221         /* enable it... */
222         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
223
224         if (IS_GEN6(dev)) {
225                 I915_WRITE(SNB_DPFC_CTL_SA,
226                            SNB_CPU_FENCE_ENABLE | obj->fence_reg);
227                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
228                 sandybridge_blit_fbc_update(dev);
229         }
230
231         DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
232 }
233
234 static void ironlake_disable_fbc(struct drm_device *dev)
235 {
236         struct drm_i915_private *dev_priv = dev->dev_private;
237         u32 dpfc_ctl;
238
239         /* Disable compression */
240         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
241         if (dpfc_ctl & DPFC_CTL_EN) {
242                 dpfc_ctl &= ~DPFC_CTL_EN;
243                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
244
245                 if (IS_IVYBRIDGE(dev))
246                         /* WaFbcDisableDpfcClockGating:ivb */
247                         I915_WRITE(ILK_DSPCLK_GATE_D,
248                                    I915_READ(ILK_DSPCLK_GATE_D) &
249                                    ~ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
250
251                 if (IS_HASWELL(dev))
252                         /* WaFbcDisableDpfcClockGating:hsw */
253                         I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
254                                    I915_READ(HSW_CLKGATE_DISABLE_PART_1) &
255                                    ~HSW_DPFC_GATING_DISABLE);
256
257                 DRM_DEBUG_KMS("disabled FBC\n");
258         }
259 }
260
261 static bool ironlake_fbc_enabled(struct drm_device *dev)
262 {
263         struct drm_i915_private *dev_priv = dev->dev_private;
264
265         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
266 }
267
268 static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
269 {
270         struct drm_device *dev = crtc->dev;
271         struct drm_i915_private *dev_priv = dev->dev_private;
272         struct drm_framebuffer *fb = crtc->fb;
273         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
274         struct drm_i915_gem_object *obj = intel_fb->obj;
275         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
276
277         I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
278
279         I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
280                    IVB_DPFC_CTL_FENCE_EN |
281                    intel_crtc->plane << IVB_DPFC_CTL_PLANE_SHIFT);
282
283         if (IS_IVYBRIDGE(dev)) {
284                 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
285                 I915_WRITE(ILK_DISPLAY_CHICKEN1, ILK_FBCQ_DIS);
286                 /* WaFbcDisableDpfcClockGating:ivb */
287                 I915_WRITE(ILK_DSPCLK_GATE_D,
288                            I915_READ(ILK_DSPCLK_GATE_D) |
289                            ILK_DPFCUNIT_CLOCK_GATE_DISABLE);
290         } else {
291                 /* WaFbcAsynchFlipDisableFbcQueue:hsw */
292                 I915_WRITE(HSW_PIPE_SLICE_CHICKEN_1(intel_crtc->pipe),
293                            HSW_BYPASS_FBC_QUEUE);
294                 /* WaFbcDisableDpfcClockGating:hsw */
295                 I915_WRITE(HSW_CLKGATE_DISABLE_PART_1,
296                            I915_READ(HSW_CLKGATE_DISABLE_PART_1) |
297                            HSW_DPFC_GATING_DISABLE);
298         }
299
300         I915_WRITE(SNB_DPFC_CTL_SA,
301                    SNB_CPU_FENCE_ENABLE | obj->fence_reg);
302         I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
303
304         sandybridge_blit_fbc_update(dev);
305
306         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
307 }
308
309 bool intel_fbc_enabled(struct drm_device *dev)
310 {
311         struct drm_i915_private *dev_priv = dev->dev_private;
312
313         if (!dev_priv->display.fbc_enabled)
314                 return false;
315
316         return dev_priv->display.fbc_enabled(dev);
317 }
318
319 static void intel_fbc_work_fn(struct work_struct *__work)
320 {
321         struct intel_fbc_work *work =
322                 container_of(to_delayed_work(__work),
323                              struct intel_fbc_work, work);
324         struct drm_device *dev = work->crtc->dev;
325         struct drm_i915_private *dev_priv = dev->dev_private;
326
327         mutex_lock(&dev->struct_mutex);
328         if (work == dev_priv->fbc_work) {
329                 /* Double check that we haven't switched fb without cancelling
330                  * the prior work.
331                  */
332                 if (work->crtc->fb == work->fb) {
333                         dev_priv->display.enable_fbc(work->crtc,
334                                                      work->interval);
335
336                         dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
337                         dev_priv->cfb_fb = work->crtc->fb->base.id;
338                         dev_priv->cfb_y = work->crtc->y;
339                 }
340
341                 dev_priv->fbc_work = NULL;
342         }
343         mutex_unlock(&dev->struct_mutex);
344
345         kfree(work);
346 }
347
348 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
349 {
350         if (dev_priv->fbc_work == NULL)
351                 return;
352
353         DRM_DEBUG_KMS("cancelling pending FBC enable\n");
354
355         /* Synchronisation is provided by struct_mutex and checking of
356          * dev_priv->fbc_work, so we can perform the cancellation
357          * entirely asynchronously.
358          */
359         if (cancel_delayed_work(&dev_priv->fbc_work->work))
360                 /* tasklet was killed before being run, clean up */
361                 kfree(dev_priv->fbc_work);
362
363         /* Mark the work as no longer wanted so that if it does
364          * wake-up (because the work was already running and waiting
365          * for our mutex), it will discover that is no longer
366          * necessary to run.
367          */
368         dev_priv->fbc_work = NULL;
369 }
370
371 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
372 {
373         struct intel_fbc_work *work;
374         struct drm_device *dev = crtc->dev;
375         struct drm_i915_private *dev_priv = dev->dev_private;
376
377         if (!dev_priv->display.enable_fbc)
378                 return;
379
380         intel_cancel_fbc_work(dev_priv);
381
382         work = kzalloc(sizeof *work, GFP_KERNEL);
383         if (work == NULL) {
384                 dev_priv->display.enable_fbc(crtc, interval);
385                 return;
386         }
387
388         work->crtc = crtc;
389         work->fb = crtc->fb;
390         work->interval = interval;
391         INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
392
393         dev_priv->fbc_work = work;
394
395         DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
396
397         /* Delay the actual enabling to let pageflipping cease and the
398          * display to settle before starting the compression. Note that
399          * this delay also serves a second purpose: it allows for a
400          * vblank to pass after disabling the FBC before we attempt
401          * to modify the control registers.
402          *
403          * A more complicated solution would involve tracking vblanks
404          * following the termination of the page-flipping sequence
405          * and indeed performing the enable as a co-routine and not
406          * waiting synchronously upon the vblank.
407          */
408         schedule_delayed_work(&work->work, msecs_to_jiffies(50));
409 }
410
411 void intel_disable_fbc(struct drm_device *dev)
412 {
413         struct drm_i915_private *dev_priv = dev->dev_private;
414
415         intel_cancel_fbc_work(dev_priv);
416
417         if (!dev_priv->display.disable_fbc)
418                 return;
419
420         dev_priv->display.disable_fbc(dev);
421         dev_priv->cfb_plane = -1;
422 }
423
424 /**
425  * intel_update_fbc - enable/disable FBC as needed
426  * @dev: the drm_device
427  *
428  * Set up the framebuffer compression hardware at mode set time.  We
429  * enable it if possible:
430  *   - plane A only (on pre-965)
431  *   - no pixel mulitply/line duplication
432  *   - no alpha buffer discard
433  *   - no dual wide
434  *   - framebuffer <= max_hdisplay in width, max_vdisplay in height
435  *
436  * We can't assume that any compression will take place (worst case),
437  * so the compressed buffer has to be the same size as the uncompressed
438  * one.  It also must reside (along with the line length buffer) in
439  * stolen memory.
440  *
441  * We need to enable/disable FBC on a global basis.
442  */
443 void intel_update_fbc(struct drm_device *dev)
444 {
445         struct drm_i915_private *dev_priv = dev->dev_private;
446         struct drm_crtc *crtc = NULL, *tmp_crtc;
447         struct intel_crtc *intel_crtc;
448         struct drm_framebuffer *fb;
449         struct intel_framebuffer *intel_fb;
450         struct drm_i915_gem_object *obj;
451         int enable_fbc;
452         unsigned int max_hdisplay, max_vdisplay;
453
454         if (!i915_powersave)
455                 return;
456
457         if (!I915_HAS_FBC(dev))
458                 return;
459
460         /*
461          * If FBC is already on, we just have to verify that we can
462          * keep it that way...
463          * Need to disable if:
464          *   - more than one pipe is active
465          *   - changing FBC params (stride, fence, mode)
466          *   - new fb is too large to fit in compressed buffer
467          *   - going to an unsupported config (interlace, pixel multiply, etc.)
468          */
469         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
470                 if (intel_crtc_active(tmp_crtc) &&
471                     !to_intel_crtc(tmp_crtc)->primary_disabled) {
472                         if (crtc) {
473                                 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
474                                 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
475                                 goto out_disable;
476                         }
477                         crtc = tmp_crtc;
478                 }
479         }
480
481         if (!crtc || crtc->fb == NULL) {
482                 DRM_DEBUG_KMS("no output, disabling\n");
483                 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
484                 goto out_disable;
485         }
486
487         intel_crtc = to_intel_crtc(crtc);
488         fb = crtc->fb;
489         intel_fb = to_intel_framebuffer(fb);
490         obj = intel_fb->obj;
491
492         enable_fbc = i915_enable_fbc;
493         if (enable_fbc < 0) {
494                 DRM_DEBUG_KMS("fbc set to per-chip default\n");
495                 enable_fbc = 1;
496                 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
497                         enable_fbc = 0;
498         }
499         if (!enable_fbc) {
500                 DRM_DEBUG_KMS("fbc disabled per module param\n");
501                 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
502                 goto out_disable;
503         }
504         if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
505             (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
506                 DRM_DEBUG_KMS("mode incompatible with compression, "
507                               "disabling\n");
508                 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
509                 goto out_disable;
510         }
511
512         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
513                 max_hdisplay = 4096;
514                 max_vdisplay = 2048;
515         } else {
516                 max_hdisplay = 2048;
517                 max_vdisplay = 1536;
518         }
519         if ((crtc->mode.hdisplay > max_hdisplay) ||
520             (crtc->mode.vdisplay > max_vdisplay)) {
521                 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
522                 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
523                 goto out_disable;
524         }
525         if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
526             intel_crtc->plane != 0) {
527                 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
528                 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
529                 goto out_disable;
530         }
531
532         /* The use of a CPU fence is mandatory in order to detect writes
533          * by the CPU to the scanout and trigger updates to the FBC.
534          */
535         if (obj->tiling_mode != I915_TILING_X ||
536             obj->fence_reg == I915_FENCE_REG_NONE) {
537                 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
538                 dev_priv->no_fbc_reason = FBC_NOT_TILED;
539                 goto out_disable;
540         }
541
542         /* If the kernel debugger is active, always disable compression */
543         if (in_dbg_master())
544                 goto out_disable;
545
546         if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
547                 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
548                 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
549                 goto out_disable;
550         }
551
552         /* If the scanout has not changed, don't modify the FBC settings.
553          * Note that we make the fundamental assumption that the fb->obj
554          * cannot be unpinned (and have its GTT offset and fence revoked)
555          * without first being decoupled from the scanout and FBC disabled.
556          */
557         if (dev_priv->cfb_plane == intel_crtc->plane &&
558             dev_priv->cfb_fb == fb->base.id &&
559             dev_priv->cfb_y == crtc->y)
560                 return;
561
562         if (intel_fbc_enabled(dev)) {
563                 /* We update FBC along two paths, after changing fb/crtc
564                  * configuration (modeswitching) and after page-flipping
565                  * finishes. For the latter, we know that not only did
566                  * we disable the FBC at the start of the page-flip
567                  * sequence, but also more than one vblank has passed.
568                  *
569                  * For the former case of modeswitching, it is possible
570                  * to switch between two FBC valid configurations
571                  * instantaneously so we do need to disable the FBC
572                  * before we can modify its control registers. We also
573                  * have to wait for the next vblank for that to take
574                  * effect. However, since we delay enabling FBC we can
575                  * assume that a vblank has passed since disabling and
576                  * that we can safely alter the registers in the deferred
577                  * callback.
578                  *
579                  * In the scenario that we go from a valid to invalid
580                  * and then back to valid FBC configuration we have
581                  * no strict enforcement that a vblank occurred since
582                  * disabling the FBC. However, along all current pipe
583                  * disabling paths we do need to wait for a vblank at
584                  * some point. And we wait before enabling FBC anyway.
585                  */
586                 DRM_DEBUG_KMS("disabling active FBC for update\n");
587                 intel_disable_fbc(dev);
588         }
589
590         intel_enable_fbc(crtc, 500);
591         return;
592
593 out_disable:
594         /* Multiple disables should be harmless */
595         if (intel_fbc_enabled(dev)) {
596                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
597                 intel_disable_fbc(dev);
598         }
599         i915_gem_stolen_cleanup_compression(dev);
600 }
601
602 static void i915_pineview_get_mem_freq(struct drm_device *dev)
603 {
604         drm_i915_private_t *dev_priv = dev->dev_private;
605         u32 tmp;
606
607         tmp = I915_READ(CLKCFG);
608
609         switch (tmp & CLKCFG_FSB_MASK) {
610         case CLKCFG_FSB_533:
611                 dev_priv->fsb_freq = 533; /* 133*4 */
612                 break;
613         case CLKCFG_FSB_800:
614                 dev_priv->fsb_freq = 800; /* 200*4 */
615                 break;
616         case CLKCFG_FSB_667:
617                 dev_priv->fsb_freq =  667; /* 167*4 */
618                 break;
619         case CLKCFG_FSB_400:
620                 dev_priv->fsb_freq = 400; /* 100*4 */
621                 break;
622         }
623
624         switch (tmp & CLKCFG_MEM_MASK) {
625         case CLKCFG_MEM_533:
626                 dev_priv->mem_freq = 533;
627                 break;
628         case CLKCFG_MEM_667:
629                 dev_priv->mem_freq = 667;
630                 break;
631         case CLKCFG_MEM_800:
632                 dev_priv->mem_freq = 800;
633                 break;
634         }
635
636         /* detect pineview DDR3 setting */
637         tmp = I915_READ(CSHRDDR3CTL);
638         dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
639 }
640
641 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
642 {
643         drm_i915_private_t *dev_priv = dev->dev_private;
644         u16 ddrpll, csipll;
645
646         ddrpll = I915_READ16(DDRMPLL1);
647         csipll = I915_READ16(CSIPLL0);
648
649         switch (ddrpll & 0xff) {
650         case 0xc:
651                 dev_priv->mem_freq = 800;
652                 break;
653         case 0x10:
654                 dev_priv->mem_freq = 1066;
655                 break;
656         case 0x14:
657                 dev_priv->mem_freq = 1333;
658                 break;
659         case 0x18:
660                 dev_priv->mem_freq = 1600;
661                 break;
662         default:
663                 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
664                                  ddrpll & 0xff);
665                 dev_priv->mem_freq = 0;
666                 break;
667         }
668
669         dev_priv->ips.r_t = dev_priv->mem_freq;
670
671         switch (csipll & 0x3ff) {
672         case 0x00c:
673                 dev_priv->fsb_freq = 3200;
674                 break;
675         case 0x00e:
676                 dev_priv->fsb_freq = 3733;
677                 break;
678         case 0x010:
679                 dev_priv->fsb_freq = 4266;
680                 break;
681         case 0x012:
682                 dev_priv->fsb_freq = 4800;
683                 break;
684         case 0x014:
685                 dev_priv->fsb_freq = 5333;
686                 break;
687         case 0x016:
688                 dev_priv->fsb_freq = 5866;
689                 break;
690         case 0x018:
691                 dev_priv->fsb_freq = 6400;
692                 break;
693         default:
694                 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
695                                  csipll & 0x3ff);
696                 dev_priv->fsb_freq = 0;
697                 break;
698         }
699
700         if (dev_priv->fsb_freq == 3200) {
701                 dev_priv->ips.c_m = 0;
702         } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
703                 dev_priv->ips.c_m = 1;
704         } else {
705                 dev_priv->ips.c_m = 2;
706         }
707 }
708
709 static const struct cxsr_latency cxsr_latency_table[] = {
710         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
711         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
712         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
713         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
714         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
715
716         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
717         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
718         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
719         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
720         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
721
722         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
723         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
724         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
725         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
726         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
727
728         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
729         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
730         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
731         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
732         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
733
734         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
735         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
736         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
737         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
738         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
739
740         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
741         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
742         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
743         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
744         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
745 };
746
747 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
748                                                          int is_ddr3,
749                                                          int fsb,
750                                                          int mem)
751 {
752         const struct cxsr_latency *latency;
753         int i;
754
755         if (fsb == 0 || mem == 0)
756                 return NULL;
757
758         for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
759                 latency = &cxsr_latency_table[i];
760                 if (is_desktop == latency->is_desktop &&
761                     is_ddr3 == latency->is_ddr3 &&
762                     fsb == latency->fsb_freq && mem == latency->mem_freq)
763                         return latency;
764         }
765
766         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
767
768         return NULL;
769 }
770
771 static void pineview_disable_cxsr(struct drm_device *dev)
772 {
773         struct drm_i915_private *dev_priv = dev->dev_private;
774
775         /* deactivate cxsr */
776         I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
777 }
778
779 /*
780  * Latency for FIFO fetches is dependent on several factors:
781  *   - memory configuration (speed, channels)
782  *   - chipset
783  *   - current MCH state
784  * It can be fairly high in some situations, so here we assume a fairly
785  * pessimal value.  It's a tradeoff between extra memory fetches (if we
786  * set this value too high, the FIFO will fetch frequently to stay full)
787  * and power consumption (set it too low to save power and we might see
788  * FIFO underruns and display "flicker").
789  *
790  * A value of 5us seems to be a good balance; safe for very low end
791  * platforms but not overly aggressive on lower latency configs.
792  */
793 static const int latency_ns = 5000;
794
795 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
796 {
797         struct drm_i915_private *dev_priv = dev->dev_private;
798         uint32_t dsparb = I915_READ(DSPARB);
799         int size;
800
801         size = dsparb & 0x7f;
802         if (plane)
803                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
804
805         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
806                       plane ? "B" : "A", size);
807
808         return size;
809 }
810
811 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
812 {
813         struct drm_i915_private *dev_priv = dev->dev_private;
814         uint32_t dsparb = I915_READ(DSPARB);
815         int size;
816
817         size = dsparb & 0x1ff;
818         if (plane)
819                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
820         size >>= 1; /* Convert to cachelines */
821
822         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
823                       plane ? "B" : "A", size);
824
825         return size;
826 }
827
828 static int i845_get_fifo_size(struct drm_device *dev, int plane)
829 {
830         struct drm_i915_private *dev_priv = dev->dev_private;
831         uint32_t dsparb = I915_READ(DSPARB);
832         int size;
833
834         size = dsparb & 0x7f;
835         size >>= 2; /* Convert to cachelines */
836
837         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
838                       plane ? "B" : "A",
839                       size);
840
841         return size;
842 }
843
844 static int i830_get_fifo_size(struct drm_device *dev, int plane)
845 {
846         struct drm_i915_private *dev_priv = dev->dev_private;
847         uint32_t dsparb = I915_READ(DSPARB);
848         int size;
849
850         size = dsparb & 0x7f;
851         size >>= 1; /* Convert to cachelines */
852
853         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
854                       plane ? "B" : "A", size);
855
856         return size;
857 }
858
859 /* Pineview has different values for various configs */
860 static const struct intel_watermark_params pineview_display_wm = {
861         PINEVIEW_DISPLAY_FIFO,
862         PINEVIEW_MAX_WM,
863         PINEVIEW_DFT_WM,
864         PINEVIEW_GUARD_WM,
865         PINEVIEW_FIFO_LINE_SIZE
866 };
867 static const struct intel_watermark_params pineview_display_hplloff_wm = {
868         PINEVIEW_DISPLAY_FIFO,
869         PINEVIEW_MAX_WM,
870         PINEVIEW_DFT_HPLLOFF_WM,
871         PINEVIEW_GUARD_WM,
872         PINEVIEW_FIFO_LINE_SIZE
873 };
874 static const struct intel_watermark_params pineview_cursor_wm = {
875         PINEVIEW_CURSOR_FIFO,
876         PINEVIEW_CURSOR_MAX_WM,
877         PINEVIEW_CURSOR_DFT_WM,
878         PINEVIEW_CURSOR_GUARD_WM,
879         PINEVIEW_FIFO_LINE_SIZE,
880 };
881 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
882         PINEVIEW_CURSOR_FIFO,
883         PINEVIEW_CURSOR_MAX_WM,
884         PINEVIEW_CURSOR_DFT_WM,
885         PINEVIEW_CURSOR_GUARD_WM,
886         PINEVIEW_FIFO_LINE_SIZE
887 };
888 static const struct intel_watermark_params g4x_wm_info = {
889         G4X_FIFO_SIZE,
890         G4X_MAX_WM,
891         G4X_MAX_WM,
892         2,
893         G4X_FIFO_LINE_SIZE,
894 };
895 static const struct intel_watermark_params g4x_cursor_wm_info = {
896         I965_CURSOR_FIFO,
897         I965_CURSOR_MAX_WM,
898         I965_CURSOR_DFT_WM,
899         2,
900         G4X_FIFO_LINE_SIZE,
901 };
902 static const struct intel_watermark_params valleyview_wm_info = {
903         VALLEYVIEW_FIFO_SIZE,
904         VALLEYVIEW_MAX_WM,
905         VALLEYVIEW_MAX_WM,
906         2,
907         G4X_FIFO_LINE_SIZE,
908 };
909 static const struct intel_watermark_params valleyview_cursor_wm_info = {
910         I965_CURSOR_FIFO,
911         VALLEYVIEW_CURSOR_MAX_WM,
912         I965_CURSOR_DFT_WM,
913         2,
914         G4X_FIFO_LINE_SIZE,
915 };
916 static const struct intel_watermark_params i965_cursor_wm_info = {
917         I965_CURSOR_FIFO,
918         I965_CURSOR_MAX_WM,
919         I965_CURSOR_DFT_WM,
920         2,
921         I915_FIFO_LINE_SIZE,
922 };
923 static const struct intel_watermark_params i945_wm_info = {
924         I945_FIFO_SIZE,
925         I915_MAX_WM,
926         1,
927         2,
928         I915_FIFO_LINE_SIZE
929 };
930 static const struct intel_watermark_params i915_wm_info = {
931         I915_FIFO_SIZE,
932         I915_MAX_WM,
933         1,
934         2,
935         I915_FIFO_LINE_SIZE
936 };
937 static const struct intel_watermark_params i855_wm_info = {
938         I855GM_FIFO_SIZE,
939         I915_MAX_WM,
940         1,
941         2,
942         I830_FIFO_LINE_SIZE
943 };
944 static const struct intel_watermark_params i830_wm_info = {
945         I830_FIFO_SIZE,
946         I915_MAX_WM,
947         1,
948         2,
949         I830_FIFO_LINE_SIZE
950 };
951
952 static const struct intel_watermark_params ironlake_display_wm_info = {
953         ILK_DISPLAY_FIFO,
954         ILK_DISPLAY_MAXWM,
955         ILK_DISPLAY_DFTWM,
956         2,
957         ILK_FIFO_LINE_SIZE
958 };
959 static const struct intel_watermark_params ironlake_cursor_wm_info = {
960         ILK_CURSOR_FIFO,
961         ILK_CURSOR_MAXWM,
962         ILK_CURSOR_DFTWM,
963         2,
964         ILK_FIFO_LINE_SIZE
965 };
966 static const struct intel_watermark_params ironlake_display_srwm_info = {
967         ILK_DISPLAY_SR_FIFO,
968         ILK_DISPLAY_MAX_SRWM,
969         ILK_DISPLAY_DFT_SRWM,
970         2,
971         ILK_FIFO_LINE_SIZE
972 };
973 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
974         ILK_CURSOR_SR_FIFO,
975         ILK_CURSOR_MAX_SRWM,
976         ILK_CURSOR_DFT_SRWM,
977         2,
978         ILK_FIFO_LINE_SIZE
979 };
980
981 static const struct intel_watermark_params sandybridge_display_wm_info = {
982         SNB_DISPLAY_FIFO,
983         SNB_DISPLAY_MAXWM,
984         SNB_DISPLAY_DFTWM,
985         2,
986         SNB_FIFO_LINE_SIZE
987 };
988 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
989         SNB_CURSOR_FIFO,
990         SNB_CURSOR_MAXWM,
991         SNB_CURSOR_DFTWM,
992         2,
993         SNB_FIFO_LINE_SIZE
994 };
995 static const struct intel_watermark_params sandybridge_display_srwm_info = {
996         SNB_DISPLAY_SR_FIFO,
997         SNB_DISPLAY_MAX_SRWM,
998         SNB_DISPLAY_DFT_SRWM,
999         2,
1000         SNB_FIFO_LINE_SIZE
1001 };
1002 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
1003         SNB_CURSOR_SR_FIFO,
1004         SNB_CURSOR_MAX_SRWM,
1005         SNB_CURSOR_DFT_SRWM,
1006         2,
1007         SNB_FIFO_LINE_SIZE
1008 };
1009
1010
1011 /**
1012  * intel_calculate_wm - calculate watermark level
1013  * @clock_in_khz: pixel clock
1014  * @wm: chip FIFO params
1015  * @pixel_size: display pixel size
1016  * @latency_ns: memory latency for the platform
1017  *
1018  * Calculate the watermark level (the level at which the display plane will
1019  * start fetching from memory again).  Each chip has a different display
1020  * FIFO size and allocation, so the caller needs to figure that out and pass
1021  * in the correct intel_watermark_params structure.
1022  *
1023  * As the pixel clock runs, the FIFO will be drained at a rate that depends
1024  * on the pixel size.  When it reaches the watermark level, it'll start
1025  * fetching FIFO line sized based chunks from memory until the FIFO fills
1026  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
1027  * will occur, and a display engine hang could result.
1028  */
1029 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1030                                         const struct intel_watermark_params *wm,
1031                                         int fifo_size,
1032                                         int pixel_size,
1033                                         unsigned long latency_ns)
1034 {
1035         long entries_required, wm_size;
1036
1037         /*
1038          * Note: we need to make sure we don't overflow for various clock &
1039          * latency values.
1040          * clocks go from a few thousand to several hundred thousand.
1041          * latency is usually a few thousand
1042          */
1043         entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1044                 1000;
1045         entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1046
1047         DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1048
1049         wm_size = fifo_size - (entries_required + wm->guard_size);
1050
1051         DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1052
1053         /* Don't promote wm_size to unsigned... */
1054         if (wm_size > (long)wm->max_wm)
1055                 wm_size = wm->max_wm;
1056         if (wm_size <= 0)
1057                 wm_size = wm->default_wm;
1058         return wm_size;
1059 }
1060
1061 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1062 {
1063         struct drm_crtc *crtc, *enabled = NULL;
1064
1065         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1066                 if (intel_crtc_active(crtc)) {
1067                         if (enabled)
1068                                 return NULL;
1069                         enabled = crtc;
1070                 }
1071         }
1072
1073         return enabled;
1074 }
1075
1076 static void pineview_update_wm(struct drm_device *dev)
1077 {
1078         struct drm_i915_private *dev_priv = dev->dev_private;
1079         struct drm_crtc *crtc;
1080         const struct cxsr_latency *latency;
1081         u32 reg;
1082         unsigned long wm;
1083
1084         latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1085                                          dev_priv->fsb_freq, dev_priv->mem_freq);
1086         if (!latency) {
1087                 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1088                 pineview_disable_cxsr(dev);
1089                 return;
1090         }
1091
1092         crtc = single_enabled_crtc(dev);
1093         if (crtc) {
1094                 int clock = crtc->mode.clock;
1095                 int pixel_size = crtc->fb->bits_per_pixel / 8;
1096
1097                 /* Display SR */
1098                 wm = intel_calculate_wm(clock, &pineview_display_wm,
1099                                         pineview_display_wm.fifo_size,
1100                                         pixel_size, latency->display_sr);
1101                 reg = I915_READ(DSPFW1);
1102                 reg &= ~DSPFW_SR_MASK;
1103                 reg |= wm << DSPFW_SR_SHIFT;
1104                 I915_WRITE(DSPFW1, reg);
1105                 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1106
1107                 /* cursor SR */
1108                 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1109                                         pineview_display_wm.fifo_size,
1110                                         pixel_size, latency->cursor_sr);
1111                 reg = I915_READ(DSPFW3);
1112                 reg &= ~DSPFW_CURSOR_SR_MASK;
1113                 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1114                 I915_WRITE(DSPFW3, reg);
1115
1116                 /* Display HPLL off SR */
1117                 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1118                                         pineview_display_hplloff_wm.fifo_size,
1119                                         pixel_size, latency->display_hpll_disable);
1120                 reg = I915_READ(DSPFW3);
1121                 reg &= ~DSPFW_HPLL_SR_MASK;
1122                 reg |= wm & DSPFW_HPLL_SR_MASK;
1123                 I915_WRITE(DSPFW3, reg);
1124
1125                 /* cursor HPLL off SR */
1126                 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1127                                         pineview_display_hplloff_wm.fifo_size,
1128                                         pixel_size, latency->cursor_hpll_disable);
1129                 reg = I915_READ(DSPFW3);
1130                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1131                 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1132                 I915_WRITE(DSPFW3, reg);
1133                 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1134
1135                 /* activate cxsr */
1136                 I915_WRITE(DSPFW3,
1137                            I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1138                 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1139         } else {
1140                 pineview_disable_cxsr(dev);
1141                 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1142         }
1143 }
1144
1145 static bool g4x_compute_wm0(struct drm_device *dev,
1146                             int plane,
1147                             const struct intel_watermark_params *display,
1148                             int display_latency_ns,
1149                             const struct intel_watermark_params *cursor,
1150                             int cursor_latency_ns,
1151                             int *plane_wm,
1152                             int *cursor_wm)
1153 {
1154         struct drm_crtc *crtc;
1155         int htotal, hdisplay, clock, pixel_size;
1156         int line_time_us, line_count;
1157         int entries, tlb_miss;
1158
1159         crtc = intel_get_crtc_for_plane(dev, plane);
1160         if (!intel_crtc_active(crtc)) {
1161                 *cursor_wm = cursor->guard_size;
1162                 *plane_wm = display->guard_size;
1163                 return false;
1164         }
1165
1166         htotal = crtc->mode.htotal;
1167         hdisplay = crtc->mode.hdisplay;
1168         clock = crtc->mode.clock;
1169         pixel_size = crtc->fb->bits_per_pixel / 8;
1170
1171         /* Use the small buffer method to calculate plane watermark */
1172         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1173         tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1174         if (tlb_miss > 0)
1175                 entries += tlb_miss;
1176         entries = DIV_ROUND_UP(entries, display->cacheline_size);
1177         *plane_wm = entries + display->guard_size;
1178         if (*plane_wm > (int)display->max_wm)
1179                 *plane_wm = display->max_wm;
1180
1181         /* Use the large buffer method to calculate cursor watermark */
1182         line_time_us = ((htotal * 1000) / clock);
1183         line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1184         entries = line_count * 64 * pixel_size;
1185         tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1186         if (tlb_miss > 0)
1187                 entries += tlb_miss;
1188         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1189         *cursor_wm = entries + cursor->guard_size;
1190         if (*cursor_wm > (int)cursor->max_wm)
1191                 *cursor_wm = (int)cursor->max_wm;
1192
1193         return true;
1194 }
1195
1196 /*
1197  * Check the wm result.
1198  *
1199  * If any calculated watermark values is larger than the maximum value that
1200  * can be programmed into the associated watermark register, that watermark
1201  * must be disabled.
1202  */
1203 static bool g4x_check_srwm(struct drm_device *dev,
1204                            int display_wm, int cursor_wm,
1205                            const struct intel_watermark_params *display,
1206                            const struct intel_watermark_params *cursor)
1207 {
1208         DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1209                       display_wm, cursor_wm);
1210
1211         if (display_wm > display->max_wm) {
1212                 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1213                               display_wm, display->max_wm);
1214                 return false;
1215         }
1216
1217         if (cursor_wm > cursor->max_wm) {
1218                 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1219                               cursor_wm, cursor->max_wm);
1220                 return false;
1221         }
1222
1223         if (!(display_wm || cursor_wm)) {
1224                 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1225                 return false;
1226         }
1227
1228         return true;
1229 }
1230
1231 static bool g4x_compute_srwm(struct drm_device *dev,
1232                              int plane,
1233                              int latency_ns,
1234                              const struct intel_watermark_params *display,
1235                              const struct intel_watermark_params *cursor,
1236                              int *display_wm, int *cursor_wm)
1237 {
1238         struct drm_crtc *crtc;
1239         int hdisplay, htotal, pixel_size, clock;
1240         unsigned long line_time_us;
1241         int line_count, line_size;
1242         int small, large;
1243         int entries;
1244
1245         if (!latency_ns) {
1246                 *display_wm = *cursor_wm = 0;
1247                 return false;
1248         }
1249
1250         crtc = intel_get_crtc_for_plane(dev, plane);
1251         hdisplay = crtc->mode.hdisplay;
1252         htotal = crtc->mode.htotal;
1253         clock = crtc->mode.clock;
1254         pixel_size = crtc->fb->bits_per_pixel / 8;
1255
1256         line_time_us = (htotal * 1000) / clock;
1257         line_count = (latency_ns / line_time_us + 1000) / 1000;
1258         line_size = hdisplay * pixel_size;
1259
1260         /* Use the minimum of the small and large buffer method for primary */
1261         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1262         large = line_count * line_size;
1263
1264         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1265         *display_wm = entries + display->guard_size;
1266
1267         /* calculate the self-refresh watermark for display cursor */
1268         entries = line_count * pixel_size * 64;
1269         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1270         *cursor_wm = entries + cursor->guard_size;
1271
1272         return g4x_check_srwm(dev,
1273                               *display_wm, *cursor_wm,
1274                               display, cursor);
1275 }
1276
1277 static bool vlv_compute_drain_latency(struct drm_device *dev,
1278                                      int plane,
1279                                      int *plane_prec_mult,
1280                                      int *plane_dl,
1281                                      int *cursor_prec_mult,
1282                                      int *cursor_dl)
1283 {
1284         struct drm_crtc *crtc;
1285         int clock, pixel_size;
1286         int entries;
1287
1288         crtc = intel_get_crtc_for_plane(dev, plane);
1289         if (!intel_crtc_active(crtc))
1290                 return false;
1291
1292         clock = crtc->mode.clock;       /* VESA DOT Clock */
1293         pixel_size = crtc->fb->bits_per_pixel / 8;      /* BPP */
1294
1295         entries = (clock / 1000) * pixel_size;
1296         *plane_prec_mult = (entries > 256) ?
1297                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1298         *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1299                                                      pixel_size);
1300
1301         entries = (clock / 1000) * 4;   /* BPP is always 4 for cursor */
1302         *cursor_prec_mult = (entries > 256) ?
1303                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1304         *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1305
1306         return true;
1307 }
1308
1309 /*
1310  * Update drain latency registers of memory arbiter
1311  *
1312  * Valleyview SoC has a new memory arbiter and needs drain latency registers
1313  * to be programmed. Each plane has a drain latency multiplier and a drain
1314  * latency value.
1315  */
1316
1317 static void vlv_update_drain_latency(struct drm_device *dev)
1318 {
1319         struct drm_i915_private *dev_priv = dev->dev_private;
1320         int planea_prec, planea_dl, planeb_prec, planeb_dl;
1321         int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1322         int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1323                                                         either 16 or 32 */
1324
1325         /* For plane A, Cursor A */
1326         if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1327                                       &cursor_prec_mult, &cursora_dl)) {
1328                 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1329                         DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1330                 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1331                         DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1332
1333                 I915_WRITE(VLV_DDL1, cursora_prec |
1334                                 (cursora_dl << DDL_CURSORA_SHIFT) |
1335                                 planea_prec | planea_dl);
1336         }
1337
1338         /* For plane B, Cursor B */
1339         if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1340                                       &cursor_prec_mult, &cursorb_dl)) {
1341                 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1342                         DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1343                 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1344                         DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1345
1346                 I915_WRITE(VLV_DDL2, cursorb_prec |
1347                                 (cursorb_dl << DDL_CURSORB_SHIFT) |
1348                                 planeb_prec | planeb_dl);
1349         }
1350 }
1351
1352 #define single_plane_enabled(mask) is_power_of_2(mask)
1353
1354 static void valleyview_update_wm(struct drm_device *dev)
1355 {
1356         static const int sr_latency_ns = 12000;
1357         struct drm_i915_private *dev_priv = dev->dev_private;
1358         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1359         int plane_sr, cursor_sr;
1360         int ignore_plane_sr, ignore_cursor_sr;
1361         unsigned int enabled = 0;
1362
1363         vlv_update_drain_latency(dev);
1364
1365         if (g4x_compute_wm0(dev, PIPE_A,
1366                             &valleyview_wm_info, latency_ns,
1367                             &valleyview_cursor_wm_info, latency_ns,
1368                             &planea_wm, &cursora_wm))
1369                 enabled |= 1 << PIPE_A;
1370
1371         if (g4x_compute_wm0(dev, PIPE_B,
1372                             &valleyview_wm_info, latency_ns,
1373                             &valleyview_cursor_wm_info, latency_ns,
1374                             &planeb_wm, &cursorb_wm))
1375                 enabled |= 1 << PIPE_B;
1376
1377         if (single_plane_enabled(enabled) &&
1378             g4x_compute_srwm(dev, ffs(enabled) - 1,
1379                              sr_latency_ns,
1380                              &valleyview_wm_info,
1381                              &valleyview_cursor_wm_info,
1382                              &plane_sr, &ignore_cursor_sr) &&
1383             g4x_compute_srwm(dev, ffs(enabled) - 1,
1384                              2*sr_latency_ns,
1385                              &valleyview_wm_info,
1386                              &valleyview_cursor_wm_info,
1387                              &ignore_plane_sr, &cursor_sr)) {
1388                 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1389         } else {
1390                 I915_WRITE(FW_BLC_SELF_VLV,
1391                            I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1392                 plane_sr = cursor_sr = 0;
1393         }
1394
1395         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1396                       planea_wm, cursora_wm,
1397                       planeb_wm, cursorb_wm,
1398                       plane_sr, cursor_sr);
1399
1400         I915_WRITE(DSPFW1,
1401                    (plane_sr << DSPFW_SR_SHIFT) |
1402                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1403                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
1404                    planea_wm);
1405         I915_WRITE(DSPFW2,
1406                    (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1407                    (cursora_wm << DSPFW_CURSORA_SHIFT));
1408         I915_WRITE(DSPFW3,
1409                    (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1410                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1411 }
1412
1413 static void g4x_update_wm(struct drm_device *dev)
1414 {
1415         static const int sr_latency_ns = 12000;
1416         struct drm_i915_private *dev_priv = dev->dev_private;
1417         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1418         int plane_sr, cursor_sr;
1419         unsigned int enabled = 0;
1420
1421         if (g4x_compute_wm0(dev, PIPE_A,
1422                             &g4x_wm_info, latency_ns,
1423                             &g4x_cursor_wm_info, latency_ns,
1424                             &planea_wm, &cursora_wm))
1425                 enabled |= 1 << PIPE_A;
1426
1427         if (g4x_compute_wm0(dev, PIPE_B,
1428                             &g4x_wm_info, latency_ns,
1429                             &g4x_cursor_wm_info, latency_ns,
1430                             &planeb_wm, &cursorb_wm))
1431                 enabled |= 1 << PIPE_B;
1432
1433         if (single_plane_enabled(enabled) &&
1434             g4x_compute_srwm(dev, ffs(enabled) - 1,
1435                              sr_latency_ns,
1436                              &g4x_wm_info,
1437                              &g4x_cursor_wm_info,
1438                              &plane_sr, &cursor_sr)) {
1439                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1440         } else {
1441                 I915_WRITE(FW_BLC_SELF,
1442                            I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1443                 plane_sr = cursor_sr = 0;
1444         }
1445
1446         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1447                       planea_wm, cursora_wm,
1448                       planeb_wm, cursorb_wm,
1449                       plane_sr, cursor_sr);
1450
1451         I915_WRITE(DSPFW1,
1452                    (plane_sr << DSPFW_SR_SHIFT) |
1453                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1454                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
1455                    planea_wm);
1456         I915_WRITE(DSPFW2,
1457                    (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1458                    (cursora_wm << DSPFW_CURSORA_SHIFT));
1459         /* HPLL off in SR has some issues on G4x... disable it */
1460         I915_WRITE(DSPFW3,
1461                    (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1462                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1463 }
1464
1465 static void i965_update_wm(struct drm_device *dev)
1466 {
1467         struct drm_i915_private *dev_priv = dev->dev_private;
1468         struct drm_crtc *crtc;
1469         int srwm = 1;
1470         int cursor_sr = 16;
1471
1472         /* Calc sr entries for one plane configs */
1473         crtc = single_enabled_crtc(dev);
1474         if (crtc) {
1475                 /* self-refresh has much higher latency */
1476                 static const int sr_latency_ns = 12000;
1477                 int clock = crtc->mode.clock;
1478                 int htotal = crtc->mode.htotal;
1479                 int hdisplay = crtc->mode.hdisplay;
1480                 int pixel_size = crtc->fb->bits_per_pixel / 8;
1481                 unsigned long line_time_us;
1482                 int entries;
1483
1484                 line_time_us = ((htotal * 1000) / clock);
1485
1486                 /* Use ns/us then divide to preserve precision */
1487                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1488                         pixel_size * hdisplay;
1489                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1490                 srwm = I965_FIFO_SIZE - entries;
1491                 if (srwm < 0)
1492                         srwm = 1;
1493                 srwm &= 0x1ff;
1494                 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1495                               entries, srwm);
1496
1497                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1498                         pixel_size * 64;
1499                 entries = DIV_ROUND_UP(entries,
1500                                           i965_cursor_wm_info.cacheline_size);
1501                 cursor_sr = i965_cursor_wm_info.fifo_size -
1502                         (entries + i965_cursor_wm_info.guard_size);
1503
1504                 if (cursor_sr > i965_cursor_wm_info.max_wm)
1505                         cursor_sr = i965_cursor_wm_info.max_wm;
1506
1507                 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1508                               "cursor %d\n", srwm, cursor_sr);
1509
1510                 if (IS_CRESTLINE(dev))
1511                         I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1512         } else {
1513                 /* Turn off self refresh if both pipes are enabled */
1514                 if (IS_CRESTLINE(dev))
1515                         I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1516                                    & ~FW_BLC_SELF_EN);
1517         }
1518
1519         DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1520                       srwm);
1521
1522         /* 965 has limitations... */
1523         I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1524                    (8 << 16) | (8 << 8) | (8 << 0));
1525         I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1526         /* update cursor SR watermark */
1527         I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1528 }
1529
1530 static void i9xx_update_wm(struct drm_device *dev)
1531 {
1532         struct drm_i915_private *dev_priv = dev->dev_private;
1533         const struct intel_watermark_params *wm_info;
1534         uint32_t fwater_lo;
1535         uint32_t fwater_hi;
1536         int cwm, srwm = 1;
1537         int fifo_size;
1538         int planea_wm, planeb_wm;
1539         struct drm_crtc *crtc, *enabled = NULL;
1540
1541         if (IS_I945GM(dev))
1542                 wm_info = &i945_wm_info;
1543         else if (!IS_GEN2(dev))
1544                 wm_info = &i915_wm_info;
1545         else
1546                 wm_info = &i855_wm_info;
1547
1548         fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1549         crtc = intel_get_crtc_for_plane(dev, 0);
1550         if (intel_crtc_active(crtc)) {
1551                 int cpp = crtc->fb->bits_per_pixel / 8;
1552                 if (IS_GEN2(dev))
1553                         cpp = 4;
1554
1555                 planea_wm = intel_calculate_wm(crtc->mode.clock,
1556                                                wm_info, fifo_size, cpp,
1557                                                latency_ns);
1558                 enabled = crtc;
1559         } else
1560                 planea_wm = fifo_size - wm_info->guard_size;
1561
1562         fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1563         crtc = intel_get_crtc_for_plane(dev, 1);
1564         if (intel_crtc_active(crtc)) {
1565                 int cpp = crtc->fb->bits_per_pixel / 8;
1566                 if (IS_GEN2(dev))
1567                         cpp = 4;
1568
1569                 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1570                                                wm_info, fifo_size, cpp,
1571                                                latency_ns);
1572                 if (enabled == NULL)
1573                         enabled = crtc;
1574                 else
1575                         enabled = NULL;
1576         } else
1577                 planeb_wm = fifo_size - wm_info->guard_size;
1578
1579         DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1580
1581         /*
1582          * Overlay gets an aggressive default since video jitter is bad.
1583          */
1584         cwm = 2;
1585
1586         /* Play safe and disable self-refresh before adjusting watermarks. */
1587         if (IS_I945G(dev) || IS_I945GM(dev))
1588                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1589         else if (IS_I915GM(dev))
1590                 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1591
1592         /* Calc sr entries for one plane configs */
1593         if (HAS_FW_BLC(dev) && enabled) {
1594                 /* self-refresh has much higher latency */
1595                 static const int sr_latency_ns = 6000;
1596                 int clock = enabled->mode.clock;
1597                 int htotal = enabled->mode.htotal;
1598                 int hdisplay = enabled->mode.hdisplay;
1599                 int pixel_size = enabled->fb->bits_per_pixel / 8;
1600                 unsigned long line_time_us;
1601                 int entries;
1602
1603                 line_time_us = (htotal * 1000) / clock;
1604
1605                 /* Use ns/us then divide to preserve precision */
1606                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1607                         pixel_size * hdisplay;
1608                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1609                 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1610                 srwm = wm_info->fifo_size - entries;
1611                 if (srwm < 0)
1612                         srwm = 1;
1613
1614                 if (IS_I945G(dev) || IS_I945GM(dev))
1615                         I915_WRITE(FW_BLC_SELF,
1616                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1617                 else if (IS_I915GM(dev))
1618                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1619         }
1620
1621         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1622                       planea_wm, planeb_wm, cwm, srwm);
1623
1624         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1625         fwater_hi = (cwm & 0x1f);
1626
1627         /* Set request length to 8 cachelines per fetch */
1628         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1629         fwater_hi = fwater_hi | (1 << 8);
1630
1631         I915_WRITE(FW_BLC, fwater_lo);
1632         I915_WRITE(FW_BLC2, fwater_hi);
1633
1634         if (HAS_FW_BLC(dev)) {
1635                 if (enabled) {
1636                         if (IS_I945G(dev) || IS_I945GM(dev))
1637                                 I915_WRITE(FW_BLC_SELF,
1638                                            FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1639                         else if (IS_I915GM(dev))
1640                                 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1641                         DRM_DEBUG_KMS("memory self refresh enabled\n");
1642                 } else
1643                         DRM_DEBUG_KMS("memory self refresh disabled\n");
1644         }
1645 }
1646
1647 static void i830_update_wm(struct drm_device *dev)
1648 {
1649         struct drm_i915_private *dev_priv = dev->dev_private;
1650         struct drm_crtc *crtc;
1651         uint32_t fwater_lo;
1652         int planea_wm;
1653
1654         crtc = single_enabled_crtc(dev);
1655         if (crtc == NULL)
1656                 return;
1657
1658         planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1659                                        dev_priv->display.get_fifo_size(dev, 0),
1660                                        4, latency_ns);
1661         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1662         fwater_lo |= (3<<8) | planea_wm;
1663
1664         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1665
1666         I915_WRITE(FW_BLC, fwater_lo);
1667 }
1668
1669 #define ILK_LP0_PLANE_LATENCY           700
1670 #define ILK_LP0_CURSOR_LATENCY          1300
1671
1672 /*
1673  * Check the wm result.
1674  *
1675  * If any calculated watermark values is larger than the maximum value that
1676  * can be programmed into the associated watermark register, that watermark
1677  * must be disabled.
1678  */
1679 static bool ironlake_check_srwm(struct drm_device *dev, int level,
1680                                 int fbc_wm, int display_wm, int cursor_wm,
1681                                 const struct intel_watermark_params *display,
1682                                 const struct intel_watermark_params *cursor)
1683 {
1684         struct drm_i915_private *dev_priv = dev->dev_private;
1685
1686         DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1687                       " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1688
1689         if (fbc_wm > SNB_FBC_MAX_SRWM) {
1690                 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1691                               fbc_wm, SNB_FBC_MAX_SRWM, level);
1692
1693                 /* fbc has it's own way to disable FBC WM */
1694                 I915_WRITE(DISP_ARB_CTL,
1695                            I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1696                 return false;
1697         } else if (INTEL_INFO(dev)->gen >= 6) {
1698                 /* enable FBC WM (except on ILK, where it must remain off) */
1699                 I915_WRITE(DISP_ARB_CTL,
1700                            I915_READ(DISP_ARB_CTL) & ~DISP_FBC_WM_DIS);
1701         }
1702
1703         if (display_wm > display->max_wm) {
1704                 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1705                               display_wm, SNB_DISPLAY_MAX_SRWM, level);
1706                 return false;
1707         }
1708
1709         if (cursor_wm > cursor->max_wm) {
1710                 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1711                               cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1712                 return false;
1713         }
1714
1715         if (!(fbc_wm || display_wm || cursor_wm)) {
1716                 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1717                 return false;
1718         }
1719
1720         return true;
1721 }
1722
1723 /*
1724  * Compute watermark values of WM[1-3],
1725  */
1726 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1727                                   int latency_ns,
1728                                   const struct intel_watermark_params *display,
1729                                   const struct intel_watermark_params *cursor,
1730                                   int *fbc_wm, int *display_wm, int *cursor_wm)
1731 {
1732         struct drm_crtc *crtc;
1733         unsigned long line_time_us;
1734         int hdisplay, htotal, pixel_size, clock;
1735         int line_count, line_size;
1736         int small, large;
1737         int entries;
1738
1739         if (!latency_ns) {
1740                 *fbc_wm = *display_wm = *cursor_wm = 0;
1741                 return false;
1742         }
1743
1744         crtc = intel_get_crtc_for_plane(dev, plane);
1745         hdisplay = crtc->mode.hdisplay;
1746         htotal = crtc->mode.htotal;
1747         clock = crtc->mode.clock;
1748         pixel_size = crtc->fb->bits_per_pixel / 8;
1749
1750         line_time_us = (htotal * 1000) / clock;
1751         line_count = (latency_ns / line_time_us + 1000) / 1000;
1752         line_size = hdisplay * pixel_size;
1753
1754         /* Use the minimum of the small and large buffer method for primary */
1755         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1756         large = line_count * line_size;
1757
1758         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1759         *display_wm = entries + display->guard_size;
1760
1761         /*
1762          * Spec says:
1763          * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1764          */
1765         *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1766
1767         /* calculate the self-refresh watermark for display cursor */
1768         entries = line_count * pixel_size * 64;
1769         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1770         *cursor_wm = entries + cursor->guard_size;
1771
1772         return ironlake_check_srwm(dev, level,
1773                                    *fbc_wm, *display_wm, *cursor_wm,
1774                                    display, cursor);
1775 }
1776
1777 static void ironlake_update_wm(struct drm_device *dev)
1778 {
1779         struct drm_i915_private *dev_priv = dev->dev_private;
1780         int fbc_wm, plane_wm, cursor_wm;
1781         unsigned int enabled;
1782
1783         enabled = 0;
1784         if (g4x_compute_wm0(dev, PIPE_A,
1785                             &ironlake_display_wm_info,
1786                             ILK_LP0_PLANE_LATENCY,
1787                             &ironlake_cursor_wm_info,
1788                             ILK_LP0_CURSOR_LATENCY,
1789                             &plane_wm, &cursor_wm)) {
1790                 I915_WRITE(WM0_PIPEA_ILK,
1791                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1792                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1793                               " plane %d, " "cursor: %d\n",
1794                               plane_wm, cursor_wm);
1795                 enabled |= 1 << PIPE_A;
1796         }
1797
1798         if (g4x_compute_wm0(dev, PIPE_B,
1799                             &ironlake_display_wm_info,
1800                             ILK_LP0_PLANE_LATENCY,
1801                             &ironlake_cursor_wm_info,
1802                             ILK_LP0_CURSOR_LATENCY,
1803                             &plane_wm, &cursor_wm)) {
1804                 I915_WRITE(WM0_PIPEB_ILK,
1805                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1806                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1807                               " plane %d, cursor: %d\n",
1808                               plane_wm, cursor_wm);
1809                 enabled |= 1 << PIPE_B;
1810         }
1811
1812         /*
1813          * Calculate and update the self-refresh watermark only when one
1814          * display plane is used.
1815          */
1816         I915_WRITE(WM3_LP_ILK, 0);
1817         I915_WRITE(WM2_LP_ILK, 0);
1818         I915_WRITE(WM1_LP_ILK, 0);
1819
1820         if (!single_plane_enabled(enabled))
1821                 return;
1822         enabled = ffs(enabled) - 1;
1823
1824         /* WM1 */
1825         if (!ironlake_compute_srwm(dev, 1, enabled,
1826                                    ILK_READ_WM1_LATENCY() * 500,
1827                                    &ironlake_display_srwm_info,
1828                                    &ironlake_cursor_srwm_info,
1829                                    &fbc_wm, &plane_wm, &cursor_wm))
1830                 return;
1831
1832         I915_WRITE(WM1_LP_ILK,
1833                    WM1_LP_SR_EN |
1834                    (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1835                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1836                    (plane_wm << WM1_LP_SR_SHIFT) |
1837                    cursor_wm);
1838
1839         /* WM2 */
1840         if (!ironlake_compute_srwm(dev, 2, enabled,
1841                                    ILK_READ_WM2_LATENCY() * 500,
1842                                    &ironlake_display_srwm_info,
1843                                    &ironlake_cursor_srwm_info,
1844                                    &fbc_wm, &plane_wm, &cursor_wm))
1845                 return;
1846
1847         I915_WRITE(WM2_LP_ILK,
1848                    WM2_LP_EN |
1849                    (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1850                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1851                    (plane_wm << WM1_LP_SR_SHIFT) |
1852                    cursor_wm);
1853
1854         /*
1855          * WM3 is unsupported on ILK, probably because we don't have latency
1856          * data for that power state
1857          */
1858 }
1859
1860 static void sandybridge_update_wm(struct drm_device *dev)
1861 {
1862         struct drm_i915_private *dev_priv = dev->dev_private;
1863         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
1864         u32 val;
1865         int fbc_wm, plane_wm, cursor_wm;
1866         unsigned int enabled;
1867
1868         enabled = 0;
1869         if (g4x_compute_wm0(dev, PIPE_A,
1870                             &sandybridge_display_wm_info, latency,
1871                             &sandybridge_cursor_wm_info, latency,
1872                             &plane_wm, &cursor_wm)) {
1873                 val = I915_READ(WM0_PIPEA_ILK);
1874                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1875                 I915_WRITE(WM0_PIPEA_ILK, val |
1876                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1877                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1878                               " plane %d, " "cursor: %d\n",
1879                               plane_wm, cursor_wm);
1880                 enabled |= 1 << PIPE_A;
1881         }
1882
1883         if (g4x_compute_wm0(dev, PIPE_B,
1884                             &sandybridge_display_wm_info, latency,
1885                             &sandybridge_cursor_wm_info, latency,
1886                             &plane_wm, &cursor_wm)) {
1887                 val = I915_READ(WM0_PIPEB_ILK);
1888                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1889                 I915_WRITE(WM0_PIPEB_ILK, val |
1890                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1891                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1892                               " plane %d, cursor: %d\n",
1893                               plane_wm, cursor_wm);
1894                 enabled |= 1 << PIPE_B;
1895         }
1896
1897         /*
1898          * Calculate and update the self-refresh watermark only when one
1899          * display plane is used.
1900          *
1901          * SNB support 3 levels of watermark.
1902          *
1903          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1904          * and disabled in the descending order
1905          *
1906          */
1907         I915_WRITE(WM3_LP_ILK, 0);
1908         I915_WRITE(WM2_LP_ILK, 0);
1909         I915_WRITE(WM1_LP_ILK, 0);
1910
1911         if (!single_plane_enabled(enabled) ||
1912             dev_priv->sprite_scaling_enabled)
1913                 return;
1914         enabled = ffs(enabled) - 1;
1915
1916         /* WM1 */
1917         if (!ironlake_compute_srwm(dev, 1, enabled,
1918                                    SNB_READ_WM1_LATENCY() * 500,
1919                                    &sandybridge_display_srwm_info,
1920                                    &sandybridge_cursor_srwm_info,
1921                                    &fbc_wm, &plane_wm, &cursor_wm))
1922                 return;
1923
1924         I915_WRITE(WM1_LP_ILK,
1925                    WM1_LP_SR_EN |
1926                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1927                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1928                    (plane_wm << WM1_LP_SR_SHIFT) |
1929                    cursor_wm);
1930
1931         /* WM2 */
1932         if (!ironlake_compute_srwm(dev, 2, enabled,
1933                                    SNB_READ_WM2_LATENCY() * 500,
1934                                    &sandybridge_display_srwm_info,
1935                                    &sandybridge_cursor_srwm_info,
1936                                    &fbc_wm, &plane_wm, &cursor_wm))
1937                 return;
1938
1939         I915_WRITE(WM2_LP_ILK,
1940                    WM2_LP_EN |
1941                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1942                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1943                    (plane_wm << WM1_LP_SR_SHIFT) |
1944                    cursor_wm);
1945
1946         /* WM3 */
1947         if (!ironlake_compute_srwm(dev, 3, enabled,
1948                                    SNB_READ_WM3_LATENCY() * 500,
1949                                    &sandybridge_display_srwm_info,
1950                                    &sandybridge_cursor_srwm_info,
1951                                    &fbc_wm, &plane_wm, &cursor_wm))
1952                 return;
1953
1954         I915_WRITE(WM3_LP_ILK,
1955                    WM3_LP_EN |
1956                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1957                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1958                    (plane_wm << WM1_LP_SR_SHIFT) |
1959                    cursor_wm);
1960 }
1961
1962 static void ivybridge_update_wm(struct drm_device *dev)
1963 {
1964         struct drm_i915_private *dev_priv = dev->dev_private;
1965         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
1966         u32 val;
1967         int fbc_wm, plane_wm, cursor_wm;
1968         int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
1969         unsigned int enabled;
1970
1971         enabled = 0;
1972         if (g4x_compute_wm0(dev, PIPE_A,
1973                             &sandybridge_display_wm_info, latency,
1974                             &sandybridge_cursor_wm_info, latency,
1975                             &plane_wm, &cursor_wm)) {
1976                 val = I915_READ(WM0_PIPEA_ILK);
1977                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1978                 I915_WRITE(WM0_PIPEA_ILK, val |
1979                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1980                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1981                               " plane %d, " "cursor: %d\n",
1982                               plane_wm, cursor_wm);
1983                 enabled |= 1 << PIPE_A;
1984         }
1985
1986         if (g4x_compute_wm0(dev, PIPE_B,
1987                             &sandybridge_display_wm_info, latency,
1988                             &sandybridge_cursor_wm_info, latency,
1989                             &plane_wm, &cursor_wm)) {
1990                 val = I915_READ(WM0_PIPEB_ILK);
1991                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1992                 I915_WRITE(WM0_PIPEB_ILK, val |
1993                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1994                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1995                               " plane %d, cursor: %d\n",
1996                               plane_wm, cursor_wm);
1997                 enabled |= 1 << PIPE_B;
1998         }
1999
2000         if (g4x_compute_wm0(dev, PIPE_C,
2001                             &sandybridge_display_wm_info, latency,
2002                             &sandybridge_cursor_wm_info, latency,
2003                             &plane_wm, &cursor_wm)) {
2004                 val = I915_READ(WM0_PIPEC_IVB);
2005                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
2006                 I915_WRITE(WM0_PIPEC_IVB, val |
2007                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
2008                 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
2009                               " plane %d, cursor: %d\n",
2010                               plane_wm, cursor_wm);
2011                 enabled |= 1 << PIPE_C;
2012         }
2013
2014         /*
2015          * Calculate and update the self-refresh watermark only when one
2016          * display plane is used.
2017          *
2018          * SNB support 3 levels of watermark.
2019          *
2020          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
2021          * and disabled in the descending order
2022          *
2023          */
2024         I915_WRITE(WM3_LP_ILK, 0);
2025         I915_WRITE(WM2_LP_ILK, 0);
2026         I915_WRITE(WM1_LP_ILK, 0);
2027
2028         if (!single_plane_enabled(enabled) ||
2029             dev_priv->sprite_scaling_enabled)
2030                 return;
2031         enabled = ffs(enabled) - 1;
2032
2033         /* WM1 */
2034         if (!ironlake_compute_srwm(dev, 1, enabled,
2035                                    SNB_READ_WM1_LATENCY() * 500,
2036                                    &sandybridge_display_srwm_info,
2037                                    &sandybridge_cursor_srwm_info,
2038                                    &fbc_wm, &plane_wm, &cursor_wm))
2039                 return;
2040
2041         I915_WRITE(WM1_LP_ILK,
2042                    WM1_LP_SR_EN |
2043                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
2044                    (fbc_wm << WM1_LP_FBC_SHIFT) |
2045                    (plane_wm << WM1_LP_SR_SHIFT) |
2046                    cursor_wm);
2047
2048         /* WM2 */
2049         if (!ironlake_compute_srwm(dev, 2, enabled,
2050                                    SNB_READ_WM2_LATENCY() * 500,
2051                                    &sandybridge_display_srwm_info,
2052                                    &sandybridge_cursor_srwm_info,
2053                                    &fbc_wm, &plane_wm, &cursor_wm))
2054                 return;
2055
2056         I915_WRITE(WM2_LP_ILK,
2057                    WM2_LP_EN |
2058                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
2059                    (fbc_wm << WM1_LP_FBC_SHIFT) |
2060                    (plane_wm << WM1_LP_SR_SHIFT) |
2061                    cursor_wm);
2062
2063         /* WM3, note we have to correct the cursor latency */
2064         if (!ironlake_compute_srwm(dev, 3, enabled,
2065                                    SNB_READ_WM3_LATENCY() * 500,
2066                                    &sandybridge_display_srwm_info,
2067                                    &sandybridge_cursor_srwm_info,
2068                                    &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2069             !ironlake_compute_srwm(dev, 3, enabled,
2070                                    2 * SNB_READ_WM3_LATENCY() * 500,
2071                                    &sandybridge_display_srwm_info,
2072                                    &sandybridge_cursor_srwm_info,
2073                                    &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
2074                 return;
2075
2076         I915_WRITE(WM3_LP_ILK,
2077                    WM3_LP_EN |
2078                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
2079                    (fbc_wm << WM1_LP_FBC_SHIFT) |
2080                    (plane_wm << WM1_LP_SR_SHIFT) |
2081                    cursor_wm);
2082 }
2083
2084 static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
2085                                       struct drm_crtc *crtc)
2086 {
2087         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2088         uint32_t pixel_rate, pfit_size;
2089
2090         pixel_rate = intel_crtc->config.adjusted_mode.clock;
2091
2092         /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
2093          * adjust the pixel_rate here. */
2094
2095         pfit_size = intel_crtc->config.pch_pfit.size;
2096         if (pfit_size) {
2097                 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
2098
2099                 pipe_w = intel_crtc->config.requested_mode.hdisplay;
2100                 pipe_h = intel_crtc->config.requested_mode.vdisplay;
2101                 pfit_w = (pfit_size >> 16) & 0xFFFF;
2102                 pfit_h = pfit_size & 0xFFFF;
2103                 if (pipe_w < pfit_w)
2104                         pipe_w = pfit_w;
2105                 if (pipe_h < pfit_h)
2106                         pipe_h = pfit_h;
2107
2108                 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
2109                                      pfit_w * pfit_h);
2110         }
2111
2112         return pixel_rate;
2113 }
2114
2115 static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
2116                                uint32_t latency)
2117 {
2118         uint64_t ret;
2119
2120         ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
2121         ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
2122
2123         return ret;
2124 }
2125
2126 static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
2127                                uint32_t horiz_pixels, uint8_t bytes_per_pixel,
2128                                uint32_t latency)
2129 {
2130         uint32_t ret;
2131
2132         ret = (latency * pixel_rate) / (pipe_htotal * 10000);
2133         ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
2134         ret = DIV_ROUND_UP(ret, 64) + 2;
2135         return ret;
2136 }
2137
2138 static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
2139                            uint8_t bytes_per_pixel)
2140 {
2141         return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
2142 }
2143
2144 struct hsw_pipe_wm_parameters {
2145         bool active;
2146         bool sprite_enabled;
2147         uint8_t pri_bytes_per_pixel;
2148         uint8_t spr_bytes_per_pixel;
2149         uint8_t cur_bytes_per_pixel;
2150         uint32_t pri_horiz_pixels;
2151         uint32_t spr_horiz_pixels;
2152         uint32_t cur_horiz_pixels;
2153         uint32_t pipe_htotal;
2154         uint32_t pixel_rate;
2155 };
2156
2157 struct hsw_wm_maximums {
2158         uint16_t pri;
2159         uint16_t spr;
2160         uint16_t cur;
2161         uint16_t fbc;
2162 };
2163
2164 struct hsw_lp_wm_result {
2165         bool enable;
2166         bool fbc_enable;
2167         uint32_t pri_val;
2168         uint32_t spr_val;
2169         uint32_t cur_val;
2170         uint32_t fbc_val;
2171 };
2172
2173 struct hsw_wm_values {
2174         uint32_t wm_pipe[3];
2175         uint32_t wm_lp[3];
2176         uint32_t wm_lp_spr[3];
2177         uint32_t wm_linetime[3];
2178         bool enable_fbc_wm;
2179 };
2180
2181 enum hsw_data_buf_partitioning {
2182         HSW_DATA_BUF_PART_1_2,
2183         HSW_DATA_BUF_PART_5_6,
2184 };
2185
2186 /* For both WM_PIPE and WM_LP. */
2187 static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
2188                                    uint32_t mem_value,
2189                                    bool is_lp)
2190 {
2191         uint32_t method1, method2;
2192
2193         /* TODO: for now, assume the primary plane is always enabled. */
2194         if (!params->active)
2195                 return 0;
2196
2197         method1 = hsw_wm_method1(params->pixel_rate,
2198                                  params->pri_bytes_per_pixel,
2199                                  mem_value);
2200
2201         if (!is_lp)
2202                 return method1;
2203
2204         method2 = hsw_wm_method2(params->pixel_rate,
2205                                  params->pipe_htotal,
2206                                  params->pri_horiz_pixels,
2207                                  params->pri_bytes_per_pixel,
2208                                  mem_value);
2209
2210         return min(method1, method2);
2211 }
2212
2213 /* For both WM_PIPE and WM_LP. */
2214 static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
2215                                    uint32_t mem_value)
2216 {
2217         uint32_t method1, method2;
2218
2219         if (!params->active || !params->sprite_enabled)
2220                 return 0;
2221
2222         method1 = hsw_wm_method1(params->pixel_rate,
2223                                  params->spr_bytes_per_pixel,
2224                                  mem_value);
2225         method2 = hsw_wm_method2(params->pixel_rate,
2226                                  params->pipe_htotal,
2227                                  params->spr_horiz_pixels,
2228                                  params->spr_bytes_per_pixel,
2229                                  mem_value);
2230         return min(method1, method2);
2231 }
2232
2233 /* For both WM_PIPE and WM_LP. */
2234 static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
2235                                    uint32_t mem_value)
2236 {
2237         if (!params->active)
2238                 return 0;
2239
2240         return hsw_wm_method2(params->pixel_rate,
2241                               params->pipe_htotal,
2242                               params->cur_horiz_pixels,
2243                               params->cur_bytes_per_pixel,
2244                               mem_value);
2245 }
2246
2247 /* Only for WM_LP. */
2248 static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
2249                                    uint32_t pri_val,
2250                                    uint32_t mem_value)
2251 {
2252         if (!params->active)
2253                 return 0;
2254
2255         return hsw_wm_fbc(pri_val,
2256                           params->pri_horiz_pixels,
2257                           params->pri_bytes_per_pixel);
2258 }
2259
2260 static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max,
2261                               struct hsw_pipe_wm_parameters *params,
2262                               struct hsw_lp_wm_result *result)
2263 {
2264         enum pipe pipe;
2265         uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3];
2266
2267         for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
2268                 struct hsw_pipe_wm_parameters *p = &params[pipe];
2269
2270                 pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true);
2271                 spr_val[pipe] = hsw_compute_spr_wm(p, mem_value);
2272                 cur_val[pipe] = hsw_compute_cur_wm(p, mem_value);
2273                 fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
2274         }
2275
2276         result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]);
2277         result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]);
2278         result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]);
2279         result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]);
2280
2281         if (result->fbc_val > max->fbc) {
2282                 result->fbc_enable = false;
2283                 result->fbc_val = 0;
2284         } else {
2285                 result->fbc_enable = true;
2286         }
2287
2288         result->enable = result->pri_val <= max->pri &&
2289                          result->spr_val <= max->spr &&
2290                          result->cur_val <= max->cur;
2291         return result->enable;
2292 }
2293
2294 static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
2295                                     uint32_t mem_value, enum pipe pipe,
2296                                     struct hsw_pipe_wm_parameters *params)
2297 {
2298         uint32_t pri_val, cur_val, spr_val;
2299
2300         pri_val = hsw_compute_pri_wm(params, mem_value, false);
2301         spr_val = hsw_compute_spr_wm(params, mem_value);
2302         cur_val = hsw_compute_cur_wm(params, mem_value);
2303
2304         WARN(pri_val > 127,
2305              "Primary WM error, mode not supported for pipe %c\n",
2306              pipe_name(pipe));
2307         WARN(spr_val > 127,
2308              "Sprite WM error, mode not supported for pipe %c\n",
2309              pipe_name(pipe));
2310         WARN(cur_val > 63,
2311              "Cursor WM error, mode not supported for pipe %c\n",
2312              pipe_name(pipe));
2313
2314         return (pri_val << WM0_PIPE_PLANE_SHIFT) |
2315                (spr_val << WM0_PIPE_SPRITE_SHIFT) |
2316                cur_val;
2317 }
2318
2319 static uint32_t
2320 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2321 {
2322         struct drm_i915_private *dev_priv = dev->dev_private;
2323         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2324         struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2325         u32 linetime, ips_linetime;
2326
2327         if (!intel_crtc_active(crtc))
2328                 return 0;
2329
2330         /* The WM are computed with base on how long it takes to fill a single
2331          * row at the given clock rate, multiplied by 8.
2332          * */
2333         linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8, mode->clock);
2334         ips_linetime = DIV_ROUND_CLOSEST(mode->htotal * 1000 * 8,
2335                                          intel_ddi_get_cdclk_freq(dev_priv));
2336
2337         return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2338                PIPE_WM_LINETIME_TIME(linetime);
2339 }
2340
2341 static void hsw_compute_wm_parameters(struct drm_device *dev,
2342                                       struct hsw_pipe_wm_parameters *params,
2343                                       uint32_t *wm,
2344                                       struct hsw_wm_maximums *lp_max_1_2,
2345                                       struct hsw_wm_maximums *lp_max_5_6)
2346 {
2347         struct drm_i915_private *dev_priv = dev->dev_private;
2348         struct drm_crtc *crtc;
2349         struct drm_plane *plane;
2350         uint64_t sskpd = I915_READ64(MCH_SSKPD);
2351         enum pipe pipe;
2352         int pipes_active = 0, sprites_enabled = 0;
2353
2354         if ((sskpd >> 56) & 0xFF)
2355                 wm[0] = (sskpd >> 56) & 0xFF;
2356         else
2357                 wm[0] = sskpd & 0xF;
2358         wm[1] = ((sskpd >> 4) & 0xFF) * 5;
2359         wm[2] = ((sskpd >> 12) & 0xFF) * 5;
2360         wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
2361         wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
2362
2363         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2364                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2365                 struct hsw_pipe_wm_parameters *p;
2366
2367                 pipe = intel_crtc->pipe;
2368                 p = &params[pipe];
2369
2370                 p->active = intel_crtc_active(crtc);
2371                 if (!p->active)
2372                         continue;
2373
2374                 pipes_active++;
2375
2376                 p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
2377                 p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc);
2378                 p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
2379                 p->cur_bytes_per_pixel = 4;
2380                 p->pri_horiz_pixels =
2381                         intel_crtc->config.requested_mode.hdisplay;
2382                 p->cur_horiz_pixels = 64;
2383         }
2384
2385         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2386                 struct intel_plane *intel_plane = to_intel_plane(plane);
2387                 struct hsw_pipe_wm_parameters *p;
2388
2389                 pipe = intel_plane->pipe;
2390                 p = &params[pipe];
2391
2392                 p->sprite_enabled = intel_plane->wm.enable;
2393                 p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
2394                 p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
2395
2396                 if (p->sprite_enabled)
2397                         sprites_enabled++;
2398         }
2399
2400         if (pipes_active > 1) {
2401                 lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
2402                 lp_max_1_2->spr = lp_max_5_6->spr = 128;
2403                 lp_max_1_2->cur = lp_max_5_6->cur = 64;
2404         } else {
2405                 lp_max_1_2->pri = sprites_enabled ? 384 : 768;
2406                 lp_max_5_6->pri = sprites_enabled ? 128 : 768;
2407                 lp_max_1_2->spr = 384;
2408                 lp_max_5_6->spr = 640;
2409                 lp_max_1_2->cur = lp_max_5_6->cur = 255;
2410         }
2411         lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
2412 }
2413
2414 static void hsw_compute_wm_results(struct drm_device *dev,
2415                                    struct hsw_pipe_wm_parameters *params,
2416                                    uint32_t *wm,
2417                                    struct hsw_wm_maximums *lp_maximums,
2418                                    struct hsw_wm_values *results)
2419 {
2420         struct drm_i915_private *dev_priv = dev->dev_private;
2421         struct drm_crtc *crtc;
2422         struct hsw_lp_wm_result lp_results[4] = {};
2423         enum pipe pipe;
2424         int level, max_level, wm_lp;
2425
2426         for (level = 1; level <= 4; level++)
2427                 if (!hsw_compute_lp_wm(wm[level], lp_maximums, params,
2428                                        &lp_results[level - 1]))
2429                         break;
2430         max_level = level - 1;
2431
2432         /* The spec says it is preferred to disable FBC WMs instead of disabling
2433          * a WM level. */
2434         results->enable_fbc_wm = true;
2435         for (level = 1; level <= max_level; level++) {
2436                 if (!lp_results[level - 1].fbc_enable) {
2437                         results->enable_fbc_wm = false;
2438                         break;
2439                 }
2440         }
2441
2442         memset(results, 0, sizeof(*results));
2443         for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2444                 const struct hsw_lp_wm_result *r;
2445
2446                 level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
2447                 if (level > max_level)
2448                         break;
2449
2450                 r = &lp_results[level - 1];
2451                 results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
2452                                                           r->fbc_val,
2453                                                           r->pri_val,
2454                                                           r->cur_val);
2455                 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2456         }
2457
2458         for_each_pipe(pipe)
2459                 results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0],
2460                                                              pipe,
2461                                                              &params[pipe]);
2462
2463         for_each_pipe(pipe) {
2464                 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2465                 results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
2466         }
2467 }
2468
2469 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2470  * case both are at the same level. Prefer r1 in case they're the same. */
2471 struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
2472                                            struct hsw_wm_values *r2)
2473 {
2474         int i, val_r1 = 0, val_r2 = 0;
2475
2476         for (i = 0; i < 3; i++) {
2477                 if (r1->wm_lp[i] & WM3_LP_EN)
2478                         val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
2479                 if (r2->wm_lp[i] & WM3_LP_EN)
2480                         val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
2481         }
2482
2483         if (val_r1 == val_r2) {
2484                 if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
2485                         return r2;
2486                 else
2487                         return r1;
2488         } else if (val_r1 > val_r2) {
2489                 return r1;
2490         } else {
2491                 return r2;
2492         }
2493 }
2494
2495 /*
2496  * The spec says we shouldn't write when we don't need, because every write
2497  * causes WMs to be re-evaluated, expending some power.
2498  */
2499 static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
2500                                 struct hsw_wm_values *results,
2501                                 enum hsw_data_buf_partitioning partitioning)
2502 {
2503         struct hsw_wm_values previous;
2504         uint32_t val;
2505         enum hsw_data_buf_partitioning prev_partitioning;
2506         bool prev_enable_fbc_wm;
2507
2508         previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
2509         previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
2510         previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
2511         previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
2512         previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
2513         previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
2514         previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2515         previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2516         previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2517         previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
2518         previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
2519         previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
2520
2521         prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2522                             HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2;
2523
2524         prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2525
2526         if (memcmp(results->wm_pipe, previous.wm_pipe,
2527                    sizeof(results->wm_pipe)) == 0 &&
2528             memcmp(results->wm_lp, previous.wm_lp,
2529                    sizeof(results->wm_lp)) == 0 &&
2530             memcmp(results->wm_lp_spr, previous.wm_lp_spr,
2531                    sizeof(results->wm_lp_spr)) == 0 &&
2532             memcmp(results->wm_linetime, previous.wm_linetime,
2533                    sizeof(results->wm_linetime)) == 0 &&
2534             partitioning == prev_partitioning &&
2535             results->enable_fbc_wm == prev_enable_fbc_wm)
2536                 return;
2537
2538         if (previous.wm_lp[2] != 0)
2539                 I915_WRITE(WM3_LP_ILK, 0);
2540         if (previous.wm_lp[1] != 0)
2541                 I915_WRITE(WM2_LP_ILK, 0);
2542         if (previous.wm_lp[0] != 0)
2543                 I915_WRITE(WM1_LP_ILK, 0);
2544
2545         if (previous.wm_pipe[0] != results->wm_pipe[0])
2546                 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2547         if (previous.wm_pipe[1] != results->wm_pipe[1])
2548                 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2549         if (previous.wm_pipe[2] != results->wm_pipe[2])
2550                 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2551
2552         if (previous.wm_linetime[0] != results->wm_linetime[0])
2553                 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2554         if (previous.wm_linetime[1] != results->wm_linetime[1])
2555                 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2556         if (previous.wm_linetime[2] != results->wm_linetime[2])
2557                 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2558
2559         if (prev_partitioning != partitioning) {
2560                 val = I915_READ(WM_MISC);
2561                 if (partitioning == HSW_DATA_BUF_PART_1_2)
2562                         val &= ~WM_MISC_DATA_PARTITION_5_6;
2563                 else
2564                         val |= WM_MISC_DATA_PARTITION_5_6;
2565                 I915_WRITE(WM_MISC, val);
2566         }
2567
2568         if (prev_enable_fbc_wm != results->enable_fbc_wm) {
2569                 val = I915_READ(DISP_ARB_CTL);
2570                 if (results->enable_fbc_wm)
2571                         val &= ~DISP_FBC_WM_DIS;
2572                 else
2573                         val |= DISP_FBC_WM_DIS;
2574                 I915_WRITE(DISP_ARB_CTL, val);
2575         }
2576
2577         if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
2578                 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2579         if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
2580                 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2581         if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
2582                 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2583
2584         if (results->wm_lp[0] != 0)
2585                 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2586         if (results->wm_lp[1] != 0)
2587                 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2588         if (results->wm_lp[2] != 0)
2589                 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2590 }
2591
2592 static void haswell_update_wm(struct drm_device *dev)
2593 {
2594         struct drm_i915_private *dev_priv = dev->dev_private;
2595         struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
2596         struct hsw_pipe_wm_parameters params[3];
2597         struct hsw_wm_values results_1_2, results_5_6, *best_results;
2598         uint32_t wm[5];
2599         enum hsw_data_buf_partitioning partitioning;
2600
2601         hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
2602
2603         hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
2604         if (lp_max_1_2.pri != lp_max_5_6.pri) {
2605                 hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
2606                                        &results_5_6);
2607                 best_results = hsw_find_best_result(&results_1_2, &results_5_6);
2608         } else {
2609                 best_results = &results_1_2;
2610         }
2611
2612         partitioning = (best_results == &results_1_2) ?
2613                        HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
2614
2615         hsw_write_wm_values(dev_priv, best_results, partitioning);
2616 }
2617
2618 static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
2619                                      uint32_t sprite_width, int pixel_size,
2620                                      bool enable)
2621 {
2622         struct drm_plane *plane;
2623
2624         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
2625                 struct intel_plane *intel_plane = to_intel_plane(plane);
2626
2627                 if (intel_plane->pipe == pipe) {
2628                         intel_plane->wm.enable = enable;
2629                         intel_plane->wm.horiz_pixels = sprite_width + 1;
2630                         intel_plane->wm.bytes_per_pixel = pixel_size;
2631                         break;
2632                 }
2633         }
2634
2635         haswell_update_wm(dev);
2636 }
2637
2638 static bool
2639 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
2640                               uint32_t sprite_width, int pixel_size,
2641                               const struct intel_watermark_params *display,
2642                               int display_latency_ns, int *sprite_wm)
2643 {
2644         struct drm_crtc *crtc;
2645         int clock;
2646         int entries, tlb_miss;
2647
2648         crtc = intel_get_crtc_for_plane(dev, plane);
2649         if (!intel_crtc_active(crtc)) {
2650                 *sprite_wm = display->guard_size;
2651                 return false;
2652         }
2653
2654         clock = crtc->mode.clock;
2655
2656         /* Use the small buffer method to calculate the sprite watermark */
2657         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
2658         tlb_miss = display->fifo_size*display->cacheline_size -
2659                 sprite_width * 8;
2660         if (tlb_miss > 0)
2661                 entries += tlb_miss;
2662         entries = DIV_ROUND_UP(entries, display->cacheline_size);
2663         *sprite_wm = entries + display->guard_size;
2664         if (*sprite_wm > (int)display->max_wm)
2665                 *sprite_wm = display->max_wm;
2666
2667         return true;
2668 }
2669
2670 static bool
2671 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
2672                                 uint32_t sprite_width, int pixel_size,
2673                                 const struct intel_watermark_params *display,
2674                                 int latency_ns, int *sprite_wm)
2675 {
2676         struct drm_crtc *crtc;
2677         unsigned long line_time_us;
2678         int clock;
2679         int line_count, line_size;
2680         int small, large;
2681         int entries;
2682
2683         if (!latency_ns) {
2684                 *sprite_wm = 0;
2685                 return false;
2686         }
2687
2688         crtc = intel_get_crtc_for_plane(dev, plane);
2689         clock = crtc->mode.clock;
2690         if (!clock) {
2691                 *sprite_wm = 0;
2692                 return false;
2693         }
2694
2695         line_time_us = (sprite_width * 1000) / clock;
2696         if (!line_time_us) {
2697                 *sprite_wm = 0;
2698                 return false;
2699         }
2700
2701         line_count = (latency_ns / line_time_us + 1000) / 1000;
2702         line_size = sprite_width * pixel_size;
2703
2704         /* Use the minimum of the small and large buffer method for primary */
2705         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
2706         large = line_count * line_size;
2707
2708         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
2709         *sprite_wm = entries + display->guard_size;
2710
2711         return *sprite_wm > 0x3ff ? false : true;
2712 }
2713
2714 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
2715                                          uint32_t sprite_width, int pixel_size,
2716                                          bool enable)
2717 {
2718         struct drm_i915_private *dev_priv = dev->dev_private;
2719         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
2720         u32 val;
2721         int sprite_wm, reg;
2722         int ret;
2723
2724         if (!enable)
2725                 return;
2726
2727         switch (pipe) {
2728         case 0:
2729                 reg = WM0_PIPEA_ILK;
2730                 break;
2731         case 1:
2732                 reg = WM0_PIPEB_ILK;
2733                 break;
2734         case 2:
2735                 reg = WM0_PIPEC_IVB;
2736                 break;
2737         default:
2738                 return; /* bad pipe */
2739         }
2740
2741         ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
2742                                             &sandybridge_display_wm_info,
2743                                             latency, &sprite_wm);
2744         if (!ret) {
2745                 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %c\n",
2746                               pipe_name(pipe));
2747                 return;
2748         }
2749
2750         val = I915_READ(reg);
2751         val &= ~WM0_PIPE_SPRITE_MASK;
2752         I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
2753         DRM_DEBUG_KMS("sprite watermarks For pipe %c - %d\n", pipe_name(pipe), sprite_wm);
2754
2755
2756         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2757                                               pixel_size,
2758                                               &sandybridge_display_srwm_info,
2759                                               SNB_READ_WM1_LATENCY() * 500,
2760                                               &sprite_wm);
2761         if (!ret) {
2762                 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
2763                               pipe_name(pipe));
2764                 return;
2765         }
2766         I915_WRITE(WM1S_LP_ILK, sprite_wm);
2767
2768         /* Only IVB has two more LP watermarks for sprite */
2769         if (!IS_IVYBRIDGE(dev))
2770                 return;
2771
2772         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2773                                               pixel_size,
2774                                               &sandybridge_display_srwm_info,
2775                                               SNB_READ_WM2_LATENCY() * 500,
2776                                               &sprite_wm);
2777         if (!ret) {
2778                 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
2779                               pipe_name(pipe));
2780                 return;
2781         }
2782         I915_WRITE(WM2S_LP_IVB, sprite_wm);
2783
2784         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2785                                               pixel_size,
2786                                               &sandybridge_display_srwm_info,
2787                                               SNB_READ_WM3_LATENCY() * 500,
2788                                               &sprite_wm);
2789         if (!ret) {
2790                 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
2791                               pipe_name(pipe));
2792                 return;
2793         }
2794         I915_WRITE(WM3S_LP_IVB, sprite_wm);
2795 }
2796
2797 /**
2798  * intel_update_watermarks - update FIFO watermark values based on current modes
2799  *
2800  * Calculate watermark values for the various WM regs based on current mode
2801  * and plane configuration.
2802  *
2803  * There are several cases to deal with here:
2804  *   - normal (i.e. non-self-refresh)
2805  *   - self-refresh (SR) mode
2806  *   - lines are large relative to FIFO size (buffer can hold up to 2)
2807  *   - lines are small relative to FIFO size (buffer can hold more than 2
2808  *     lines), so need to account for TLB latency
2809  *
2810  *   The normal calculation is:
2811  *     watermark = dotclock * bytes per pixel * latency
2812  *   where latency is platform & configuration dependent (we assume pessimal
2813  *   values here).
2814  *
2815  *   The SR calculation is:
2816  *     watermark = (trunc(latency/line time)+1) * surface width *
2817  *       bytes per pixel
2818  *   where
2819  *     line time = htotal / dotclock
2820  *     surface width = hdisplay for normal plane and 64 for cursor
2821  *   and latency is assumed to be high, as above.
2822  *
2823  * The final value programmed to the register should always be rounded up,
2824  * and include an extra 2 entries to account for clock crossings.
2825  *
2826  * We don't use the sprite, so we can ignore that.  And on Crestline we have
2827  * to set the non-SR watermarks to 8.
2828  */
2829 void intel_update_watermarks(struct drm_device *dev)
2830 {
2831         struct drm_i915_private *dev_priv = dev->dev_private;
2832
2833         if (dev_priv->display.update_wm)
2834                 dev_priv->display.update_wm(dev);
2835 }
2836
2837 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2838                                     uint32_t sprite_width, int pixel_size,
2839                                     bool enable)
2840 {
2841         struct drm_i915_private *dev_priv = dev->dev_private;
2842
2843         if (dev_priv->display.update_sprite_wm)
2844                 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2845                                                    pixel_size, enable);
2846 }
2847
2848 static struct drm_i915_gem_object *
2849 intel_alloc_context_page(struct drm_device *dev)
2850 {
2851         struct drm_i915_gem_object *ctx;
2852         int ret;
2853
2854         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2855
2856         ctx = i915_gem_alloc_object(dev, 4096);
2857         if (!ctx) {
2858                 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2859                 return NULL;
2860         }
2861
2862         ret = i915_gem_object_pin(ctx, 4096, true, false);
2863         if (ret) {
2864                 DRM_ERROR("failed to pin power context: %d\n", ret);
2865                 goto err_unref;
2866         }
2867
2868         ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2869         if (ret) {
2870                 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2871                 goto err_unpin;
2872         }
2873
2874         return ctx;
2875
2876 err_unpin:
2877         i915_gem_object_unpin(ctx);
2878 err_unref:
2879         drm_gem_object_unreference(&ctx->base);
2880         return NULL;
2881 }
2882
2883 /**
2884  * Lock protecting IPS related data structures
2885  */
2886 DEFINE_SPINLOCK(mchdev_lock);
2887
2888 /* Global for IPS driver to get at the current i915 device. Protected by
2889  * mchdev_lock. */
2890 static struct drm_i915_private *i915_mch_dev;
2891
2892 bool ironlake_set_drps(struct drm_device *dev, u8 val)
2893 {
2894         struct drm_i915_private *dev_priv = dev->dev_private;
2895         u16 rgvswctl;
2896
2897         assert_spin_locked(&mchdev_lock);
2898
2899         rgvswctl = I915_READ16(MEMSWCTL);
2900         if (rgvswctl & MEMCTL_CMD_STS) {
2901                 DRM_DEBUG("gpu busy, RCS change rejected\n");
2902                 return false; /* still busy with another command */
2903         }
2904
2905         rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2906                 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2907         I915_WRITE16(MEMSWCTL, rgvswctl);
2908         POSTING_READ16(MEMSWCTL);
2909
2910         rgvswctl |= MEMCTL_CMD_STS;
2911         I915_WRITE16(MEMSWCTL, rgvswctl);
2912
2913         return true;
2914 }
2915
2916 static void ironlake_enable_drps(struct drm_device *dev)
2917 {
2918         struct drm_i915_private *dev_priv = dev->dev_private;
2919         u32 rgvmodectl = I915_READ(MEMMODECTL);
2920         u8 fmax, fmin, fstart, vstart;
2921
2922         spin_lock_irq(&mchdev_lock);
2923
2924         /* Enable temp reporting */
2925         I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2926         I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2927
2928         /* 100ms RC evaluation intervals */
2929         I915_WRITE(RCUPEI, 100000);
2930         I915_WRITE(RCDNEI, 100000);
2931
2932         /* Set max/min thresholds to 90ms and 80ms respectively */
2933         I915_WRITE(RCBMAXAVG, 90000);
2934         I915_WRITE(RCBMINAVG, 80000);
2935
2936         I915_WRITE(MEMIHYST, 1);
2937
2938         /* Set up min, max, and cur for interrupt handling */
2939         fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2940         fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2941         fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2942                 MEMMODE_FSTART_SHIFT;
2943
2944         vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2945                 PXVFREQ_PX_SHIFT;
2946
2947         dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2948         dev_priv->ips.fstart = fstart;
2949
2950         dev_priv->ips.max_delay = fstart;
2951         dev_priv->ips.min_delay = fmin;
2952         dev_priv->ips.cur_delay = fstart;
2953
2954         DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2955                          fmax, fmin, fstart);
2956
2957         I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2958
2959         /*
2960          * Interrupts will be enabled in ironlake_irq_postinstall
2961          */
2962
2963         I915_WRITE(VIDSTART, vstart);
2964         POSTING_READ(VIDSTART);
2965
2966         rgvmodectl |= MEMMODE_SWMODE_EN;
2967         I915_WRITE(MEMMODECTL, rgvmodectl);
2968
2969         if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2970                 DRM_ERROR("stuck trying to change perf mode\n");
2971         mdelay(1);
2972
2973         ironlake_set_drps(dev, fstart);
2974
2975         dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2976                 I915_READ(0x112e0);
2977         dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2978         dev_priv->ips.last_count2 = I915_READ(0x112f4);
2979         getrawmonotonic(&dev_priv->ips.last_time2);
2980
2981         spin_unlock_irq(&mchdev_lock);
2982 }
2983
2984 static void ironlake_disable_drps(struct drm_device *dev)
2985 {
2986         struct drm_i915_private *dev_priv = dev->dev_private;
2987         u16 rgvswctl;
2988
2989         spin_lock_irq(&mchdev_lock);
2990
2991         rgvswctl = I915_READ16(MEMSWCTL);
2992
2993         /* Ack interrupts, disable EFC interrupt */
2994         I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2995         I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2996         I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2997         I915_WRITE(DEIIR, DE_PCU_EVENT);
2998         I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2999
3000         /* Go back to the starting frequency */
3001         ironlake_set_drps(dev, dev_priv->ips.fstart);
3002         mdelay(1);
3003         rgvswctl |= MEMCTL_CMD_STS;
3004         I915_WRITE(MEMSWCTL, rgvswctl);
3005         mdelay(1);
3006
3007         spin_unlock_irq(&mchdev_lock);
3008 }
3009
3010 /* There's a funny hw issue where the hw returns all 0 when reading from
3011  * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3012  * ourselves, instead of doing a rmw cycle (which might result in us clearing
3013  * all limits and the gpu stuck at whatever frequency it is at atm).
3014  */
3015 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
3016 {
3017         u32 limits;
3018
3019         limits = 0;
3020
3021         if (*val >= dev_priv->rps.max_delay)
3022                 *val = dev_priv->rps.max_delay;
3023         limits |= dev_priv->rps.max_delay << 24;
3024
3025         /* Only set the down limit when we've reached the lowest level to avoid
3026          * getting more interrupts, otherwise leave this clear. This prevents a
3027          * race in the hw when coming out of rc6: There's a tiny window where
3028          * the hw runs at the minimal clock before selecting the desired
3029          * frequency, if the down threshold expires in that window we will not
3030          * receive a down interrupt. */
3031         if (*val <= dev_priv->rps.min_delay) {
3032                 *val = dev_priv->rps.min_delay;
3033                 limits |= dev_priv->rps.min_delay << 16;
3034         }
3035
3036         return limits;
3037 }
3038
3039 void gen6_set_rps(struct drm_device *dev, u8 val)
3040 {
3041         struct drm_i915_private *dev_priv = dev->dev_private;
3042         u32 limits = gen6_rps_limits(dev_priv, &val);
3043
3044         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3045         WARN_ON(val > dev_priv->rps.max_delay);
3046         WARN_ON(val < dev_priv->rps.min_delay);
3047
3048         if (val == dev_priv->rps.cur_delay)
3049                 return;
3050
3051         if (IS_HASWELL(dev))
3052                 I915_WRITE(GEN6_RPNSWREQ,
3053                            HSW_FREQUENCY(val));
3054         else
3055                 I915_WRITE(GEN6_RPNSWREQ,
3056                            GEN6_FREQUENCY(val) |
3057                            GEN6_OFFSET(0) |
3058                            GEN6_AGGRESSIVE_TURBO);
3059
3060         /* Make sure we continue to get interrupts
3061          * until we hit the minimum or maximum frequencies.
3062          */
3063         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
3064
3065         POSTING_READ(GEN6_RPNSWREQ);
3066
3067         dev_priv->rps.cur_delay = val;
3068
3069         trace_intel_gpu_freq_change(val * 50);
3070 }
3071
3072 /*
3073  * Wait until the previous freq change has completed,
3074  * or the timeout elapsed, and then update our notion
3075  * of the current GPU frequency.
3076  */
3077 static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
3078 {
3079         unsigned long timeout = jiffies + msecs_to_jiffies(10);
3080         u32 pval;
3081
3082         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3083
3084         do {
3085                 pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3086                 if (time_after(jiffies, timeout)) {
3087                         DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
3088                         break;
3089                 }
3090                 udelay(10);
3091         } while (pval & 1);
3092
3093         pval >>= 8;
3094
3095         if (pval != dev_priv->rps.cur_delay)
3096                 DRM_DEBUG_DRIVER("Punit overrode GPU freq: %d MHz (%u) requested, but got %d Mhz (%u)\n",
3097                                  vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.cur_delay),
3098                                  dev_priv->rps.cur_delay,
3099                                  vlv_gpu_freq(dev_priv->mem_freq, pval), pval);
3100
3101         dev_priv->rps.cur_delay = pval;
3102 }
3103
3104 void valleyview_set_rps(struct drm_device *dev, u8 val)
3105 {
3106         struct drm_i915_private *dev_priv = dev->dev_private;
3107
3108         gen6_rps_limits(dev_priv, &val);
3109
3110         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3111         WARN_ON(val > dev_priv->rps.max_delay);
3112         WARN_ON(val < dev_priv->rps.min_delay);
3113
3114         vlv_update_rps_cur_delay(dev_priv);
3115
3116         DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3117                          vlv_gpu_freq(dev_priv->mem_freq,
3118                                       dev_priv->rps.cur_delay),
3119                          dev_priv->rps.cur_delay,
3120                          vlv_gpu_freq(dev_priv->mem_freq, val), val);
3121
3122         if (val == dev_priv->rps.cur_delay)
3123                 return;
3124
3125         vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3126
3127         dev_priv->rps.cur_delay = val;
3128
3129         trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
3130 }
3131
3132
3133 static void gen6_disable_rps(struct drm_device *dev)
3134 {
3135         struct drm_i915_private *dev_priv = dev->dev_private;
3136
3137         I915_WRITE(GEN6_RC_CONTROL, 0);
3138         I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3139         I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3140         I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
3141         /* Complete PM interrupt masking here doesn't race with the rps work
3142          * item again unmasking PM interrupts because that is using a different
3143          * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3144          * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3145
3146         spin_lock_irq(&dev_priv->rps.lock);
3147         dev_priv->rps.pm_iir = 0;
3148         spin_unlock_irq(&dev_priv->rps.lock);
3149
3150         I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3151 }
3152
3153 static void valleyview_disable_rps(struct drm_device *dev)
3154 {
3155         struct drm_i915_private *dev_priv = dev->dev_private;
3156
3157         I915_WRITE(GEN6_RC_CONTROL, 0);
3158         I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3159         I915_WRITE(GEN6_PMIER, 0);
3160         /* Complete PM interrupt masking here doesn't race with the rps work
3161          * item again unmasking PM interrupts because that is using a different
3162          * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3163          * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3164
3165         spin_lock_irq(&dev_priv->rps.lock);
3166         dev_priv->rps.pm_iir = 0;
3167         spin_unlock_irq(&dev_priv->rps.lock);
3168
3169         I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
3170
3171         if (dev_priv->vlv_pctx) {
3172                 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3173                 dev_priv->vlv_pctx = NULL;
3174         }
3175 }
3176
3177 int intel_enable_rc6(const struct drm_device *dev)
3178 {
3179         /* Respect the kernel parameter if it is set */
3180         if (i915_enable_rc6 >= 0)
3181                 return i915_enable_rc6;
3182
3183         /* Disable RC6 on Ironlake */
3184         if (INTEL_INFO(dev)->gen == 5)
3185                 return 0;
3186
3187         if (IS_HASWELL(dev)) {
3188                 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
3189                 return INTEL_RC6_ENABLE;
3190         }
3191
3192         /* snb/ivb have more than one rc6 state. */
3193         if (INTEL_INFO(dev)->gen == 6) {
3194                 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
3195                 return INTEL_RC6_ENABLE;
3196         }
3197
3198         DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
3199         return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3200 }
3201
3202 static void gen6_enable_rps(struct drm_device *dev)
3203 {
3204         struct drm_i915_private *dev_priv = dev->dev_private;
3205         struct intel_ring_buffer *ring;
3206         u32 rp_state_cap;
3207         u32 gt_perf_status;
3208         u32 rc6vids, pcu_mbox, rc6_mask = 0;
3209         u32 gtfifodbg;
3210         int rc6_mode;
3211         int i, ret;
3212
3213         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3214
3215         /* Here begins a magic sequence of register writes to enable
3216          * auto-downclocking.
3217          *
3218          * Perhaps there might be some value in exposing these to
3219          * userspace...
3220          */
3221         I915_WRITE(GEN6_RC_STATE, 0);
3222
3223         /* Clear the DBG now so we don't confuse earlier errors */
3224         if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3225                 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3226                 I915_WRITE(GTFIFODBG, gtfifodbg);
3227         }
3228
3229         gen6_gt_force_wake_get(dev_priv);
3230
3231         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3232         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3233
3234         /* In units of 50MHz */
3235         dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
3236         dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
3237         dev_priv->rps.cur_delay = 0;
3238
3239         /* disable the counters and set deterministic thresholds */
3240         I915_WRITE(GEN6_RC_CONTROL, 0);
3241
3242         I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3243         I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3244         I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3245         I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3246         I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3247
3248         for_each_ring(ring, dev_priv, i)
3249                 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3250
3251         I915_WRITE(GEN6_RC_SLEEP, 0);
3252         I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3253         I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3254         I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3255         I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3256
3257         /* Check if we are enabling RC6 */
3258         rc6_mode = intel_enable_rc6(dev_priv->dev);
3259         if (rc6_mode & INTEL_RC6_ENABLE)
3260                 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3261
3262         /* We don't use those on Haswell */
3263         if (!IS_HASWELL(dev)) {
3264                 if (rc6_mode & INTEL_RC6p_ENABLE)
3265                         rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3266
3267                 if (rc6_mode & INTEL_RC6pp_ENABLE)
3268                         rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3269         }
3270
3271         DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3272                         (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3273                         (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3274                         (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3275
3276         I915_WRITE(GEN6_RC_CONTROL,
3277                    rc6_mask |
3278                    GEN6_RC_CTL_EI_MODE(1) |
3279                    GEN6_RC_CTL_HW_ENABLE);
3280
3281         if (IS_HASWELL(dev)) {
3282                 I915_WRITE(GEN6_RPNSWREQ,
3283                            HSW_FREQUENCY(10));
3284                 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3285                            HSW_FREQUENCY(12));
3286         } else {
3287                 I915_WRITE(GEN6_RPNSWREQ,
3288                            GEN6_FREQUENCY(10) |
3289                            GEN6_OFFSET(0) |
3290                            GEN6_AGGRESSIVE_TURBO);
3291                 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3292                            GEN6_FREQUENCY(12));
3293         }
3294
3295         I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
3296         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3297                    dev_priv->rps.max_delay << 24 |
3298                    dev_priv->rps.min_delay << 16);
3299
3300         I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3301         I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3302         I915_WRITE(GEN6_RP_UP_EI, 66000);
3303         I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3304
3305         I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3306         I915_WRITE(GEN6_RP_CONTROL,
3307                    GEN6_RP_MEDIA_TURBO |
3308                    GEN6_RP_MEDIA_HW_NORMAL_MODE |
3309                    GEN6_RP_MEDIA_IS_GFX |
3310                    GEN6_RP_ENABLE |
3311                    GEN6_RP_UP_BUSY_AVG |
3312                    (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
3313
3314         ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3315         if (!ret) {
3316                 pcu_mbox = 0;
3317                 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3318                 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3319                         DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3320                                          (dev_priv->rps.max_delay & 0xff) * 50,
3321                                          (pcu_mbox & 0xff) * 50);
3322                         dev_priv->rps.hw_max = pcu_mbox & 0xff;
3323                 }
3324         } else {
3325                 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3326         }
3327
3328         gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
3329
3330         /* requires MSI enabled */
3331         I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
3332         spin_lock_irq(&dev_priv->rps.lock);
3333         /* FIXME: Our interrupt enabling sequence is bonghits.
3334          * dev_priv->rps.pm_iir really should be 0 here. */
3335         dev_priv->rps.pm_iir = 0;
3336         I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
3337         I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
3338         spin_unlock_irq(&dev_priv->rps.lock);
3339         /* unmask all PM interrupts */
3340         I915_WRITE(GEN6_PMINTRMSK, 0);
3341
3342         rc6vids = 0;
3343         ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3344         if (IS_GEN6(dev) && ret) {
3345                 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3346         } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3347                 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3348                           GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3349                 rc6vids &= 0xffff00;
3350                 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3351                 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3352                 if (ret)
3353                         DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3354         }
3355
3356         gen6_gt_force_wake_put(dev_priv);
3357 }
3358
3359 static void gen6_update_ring_freq(struct drm_device *dev)
3360 {
3361         struct drm_i915_private *dev_priv = dev->dev_private;
3362         int min_freq = 15;
3363         unsigned int gpu_freq;
3364         unsigned int max_ia_freq, min_ring_freq;
3365         int scaling_factor = 180;
3366
3367         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3368
3369         max_ia_freq = cpufreq_quick_get_max(0);
3370         /*
3371          * Default to measured freq if none found, PCU will ensure we don't go
3372          * over
3373          */
3374         if (!max_ia_freq)
3375                 max_ia_freq = tsc_khz;
3376
3377         /* Convert from kHz to MHz */
3378         max_ia_freq /= 1000;
3379
3380         min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
3381         /* convert DDR frequency from units of 133.3MHz to bandwidth */
3382         min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
3383
3384         /*
3385          * For each potential GPU frequency, load a ring frequency we'd like
3386          * to use for memory access.  We do this by specifying the IA frequency
3387          * the PCU should use as a reference to determine the ring frequency.
3388          */
3389         for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
3390              gpu_freq--) {
3391                 int diff = dev_priv->rps.max_delay - gpu_freq;
3392                 unsigned int ia_freq = 0, ring_freq = 0;
3393
3394                 if (IS_HASWELL(dev)) {
3395                         ring_freq = (gpu_freq * 5 + 3) / 4;
3396                         ring_freq = max(min_ring_freq, ring_freq);
3397                         /* leave ia_freq as the default, chosen by cpufreq */
3398                 } else {
3399                         /* On older processors, there is no separate ring
3400                          * clock domain, so in order to boost the bandwidth
3401                          * of the ring, we need to upclock the CPU (ia_freq).
3402                          *
3403                          * For GPU frequencies less than 750MHz,
3404                          * just use the lowest ring freq.
3405                          */
3406                         if (gpu_freq < min_freq)
3407                                 ia_freq = 800;
3408                         else
3409                                 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3410                         ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3411                 }
3412
3413                 sandybridge_pcode_write(dev_priv,
3414                                         GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3415                                         ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3416                                         ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3417                                         gpu_freq);
3418         }
3419 }
3420
3421 int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3422 {
3423         u32 val, rp0;
3424
3425         val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3426
3427         rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3428         /* Clamp to max */
3429         rp0 = min_t(u32, rp0, 0xea);
3430
3431         return rp0;
3432 }
3433
3434 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3435 {
3436         u32 val, rpe;
3437
3438         val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3439         rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3440         val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3441         rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3442
3443         return rpe;
3444 }
3445
3446 int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3447 {
3448         return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3449 }
3450
3451 static void vlv_rps_timer_work(struct work_struct *work)
3452 {
3453         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
3454                                                     rps.vlv_work.work);
3455
3456         /*
3457          * Timer fired, we must be idle.  Drop to min voltage state.
3458          * Note: we use RPe here since it should match the
3459          * Vmin we were shooting for.  That should give us better
3460          * perf when we come back out of RC6 than if we used the
3461          * min freq available.
3462          */
3463         mutex_lock(&dev_priv->rps.hw_lock);
3464         if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
3465                 valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3466         mutex_unlock(&dev_priv->rps.hw_lock);
3467 }
3468
3469 static void valleyview_setup_pctx(struct drm_device *dev)
3470 {
3471         struct drm_i915_private *dev_priv = dev->dev_private;
3472         struct drm_i915_gem_object *pctx;
3473         unsigned long pctx_paddr;
3474         u32 pcbr;
3475         int pctx_size = 24*1024;
3476
3477         pcbr = I915_READ(VLV_PCBR);
3478         if (pcbr) {
3479                 /* BIOS set it up already, grab the pre-alloc'd space */
3480                 int pcbr_offset;
3481
3482                 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3483                 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3484                                                                       pcbr_offset,
3485                                                                       -1,
3486                                                                       pctx_size);
3487                 goto out;
3488         }
3489
3490         /*
3491          * From the Gunit register HAS:
3492          * The Gfx driver is expected to program this register and ensure
3493          * proper allocation within Gfx stolen memory.  For example, this
3494          * register should be programmed such than the PCBR range does not
3495          * overlap with other ranges, such as the frame buffer, protected
3496          * memory, or any other relevant ranges.
3497          */
3498         pctx = i915_gem_object_create_stolen(dev, pctx_size);
3499         if (!pctx) {
3500                 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3501                 return;
3502         }
3503
3504         pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3505         I915_WRITE(VLV_PCBR, pctx_paddr);
3506
3507 out:
3508         dev_priv->vlv_pctx = pctx;
3509 }
3510
3511 static void valleyview_enable_rps(struct drm_device *dev)
3512 {
3513         struct drm_i915_private *dev_priv = dev->dev_private;
3514         struct intel_ring_buffer *ring;
3515         u32 gtfifodbg, val;
3516         int i;
3517
3518         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3519
3520         if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3521                 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3522                 I915_WRITE(GTFIFODBG, gtfifodbg);
3523         }
3524
3525         valleyview_setup_pctx(dev);
3526
3527         gen6_gt_force_wake_get(dev_priv);
3528
3529         I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3530         I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3531         I915_WRITE(GEN6_RP_UP_EI, 66000);
3532         I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3533
3534         I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3535
3536         I915_WRITE(GEN6_RP_CONTROL,
3537                    GEN6_RP_MEDIA_TURBO |
3538                    GEN6_RP_MEDIA_HW_NORMAL_MODE |
3539                    GEN6_RP_MEDIA_IS_GFX |
3540                    GEN6_RP_ENABLE |
3541                    GEN6_RP_UP_BUSY_AVG |
3542                    GEN6_RP_DOWN_IDLE_CONT);
3543
3544         I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
3545         I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3546         I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3547
3548         for_each_ring(ring, dev_priv, i)
3549                 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3550
3551         I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
3552
3553         /* allows RC6 residency counter to work */
3554         I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
3555         I915_WRITE(GEN6_RC_CONTROL,
3556                    GEN7_RC_CTL_TO_MODE);
3557
3558         val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3559         switch ((val >> 6) & 3) {
3560         case 0:
3561         case 1:
3562                 dev_priv->mem_freq = 800;
3563                 break;
3564         case 2:
3565                 dev_priv->mem_freq = 1066;
3566                 break;
3567         case 3:
3568                 dev_priv->mem_freq = 1333;
3569                 break;
3570         }
3571         DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
3572
3573         DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3574         DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3575
3576         dev_priv->rps.cur_delay = (val >> 8) & 0xff;
3577         DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3578                          vlv_gpu_freq(dev_priv->mem_freq,
3579                                       dev_priv->rps.cur_delay),
3580                          dev_priv->rps.cur_delay);
3581
3582         dev_priv->rps.max_delay = valleyview_rps_max_freq(dev_priv);
3583         dev_priv->rps.hw_max = dev_priv->rps.max_delay;
3584         DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3585                          vlv_gpu_freq(dev_priv->mem_freq,
3586                                       dev_priv->rps.max_delay),
3587                          dev_priv->rps.max_delay);
3588
3589         dev_priv->rps.rpe_delay = valleyview_rps_rpe_freq(dev_priv);
3590         DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3591                          vlv_gpu_freq(dev_priv->mem_freq,
3592                                       dev_priv->rps.rpe_delay),
3593                          dev_priv->rps.rpe_delay);
3594
3595         dev_priv->rps.min_delay = valleyview_rps_min_freq(dev_priv);
3596         DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3597                          vlv_gpu_freq(dev_priv->mem_freq,
3598                                       dev_priv->rps.min_delay),
3599                          dev_priv->rps.min_delay);
3600
3601         DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3602                          vlv_gpu_freq(dev_priv->mem_freq,
3603                                       dev_priv->rps.rpe_delay),
3604                          dev_priv->rps.rpe_delay);
3605
3606         INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
3607
3608         valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
3609
3610         /* requires MSI enabled */
3611         I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
3612         spin_lock_irq(&dev_priv->rps.lock);
3613         WARN_ON(dev_priv->rps.pm_iir != 0);
3614         I915_WRITE(GEN6_PMIMR, 0);
3615         spin_unlock_irq(&dev_priv->rps.lock);
3616         /* enable all PM interrupts */
3617         I915_WRITE(GEN6_PMINTRMSK, 0);
3618
3619         gen6_gt_force_wake_put(dev_priv);
3620 }
3621
3622 void ironlake_teardown_rc6(struct drm_device *dev)
3623 {
3624         struct drm_i915_private *dev_priv = dev->dev_private;
3625
3626         if (dev_priv->ips.renderctx) {
3627                 i915_gem_object_unpin(dev_priv->ips.renderctx);
3628                 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
3629                 dev_priv->ips.renderctx = NULL;
3630         }
3631
3632         if (dev_priv->ips.pwrctx) {
3633                 i915_gem_object_unpin(dev_priv->ips.pwrctx);
3634                 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
3635                 dev_priv->ips.pwrctx = NULL;
3636         }
3637 }
3638
3639 static void ironlake_disable_rc6(struct drm_device *dev)
3640 {
3641         struct drm_i915_private *dev_priv = dev->dev_private;
3642
3643         if (I915_READ(PWRCTXA)) {
3644                 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
3645                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
3646                 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
3647                          50);
3648
3649                 I915_WRITE(PWRCTXA, 0);
3650                 POSTING_READ(PWRCTXA);
3651
3652                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3653                 POSTING_READ(RSTDBYCTL);
3654         }
3655 }
3656
3657 static int ironlake_setup_rc6(struct drm_device *dev)
3658 {
3659         struct drm_i915_private *dev_priv = dev->dev_private;
3660
3661         if (dev_priv->ips.renderctx == NULL)
3662                 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
3663         if (!dev_priv->ips.renderctx)
3664                 return -ENOMEM;
3665
3666         if (dev_priv->ips.pwrctx == NULL)
3667                 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
3668         if (!dev_priv->ips.pwrctx) {
3669                 ironlake_teardown_rc6(dev);
3670                 return -ENOMEM;
3671         }
3672
3673         return 0;
3674 }
3675
3676 static void ironlake_enable_rc6(struct drm_device *dev)
3677 {
3678         struct drm_i915_private *dev_priv = dev->dev_private;
3679         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
3680         bool was_interruptible;
3681         int ret;
3682
3683         /* rc6 disabled by default due to repeated reports of hanging during
3684          * boot and resume.
3685          */
3686         if (!intel_enable_rc6(dev))
3687                 return;
3688
3689         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3690
3691         ret = ironlake_setup_rc6(dev);
3692         if (ret)
3693                 return;
3694
3695         was_interruptible = dev_priv->mm.interruptible;
3696         dev_priv->mm.interruptible = false;
3697
3698         /*
3699          * GPU can automatically power down the render unit if given a page
3700          * to save state.
3701          */
3702         ret = intel_ring_begin(ring, 6);
3703         if (ret) {
3704                 ironlake_teardown_rc6(dev);
3705                 dev_priv->mm.interruptible = was_interruptible;
3706                 return;
3707         }
3708
3709         intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3710         intel_ring_emit(ring, MI_SET_CONTEXT);
3711         intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
3712                         MI_MM_SPACE_GTT |
3713                         MI_SAVE_EXT_STATE_EN |
3714                         MI_RESTORE_EXT_STATE_EN |
3715                         MI_RESTORE_INHIBIT);
3716         intel_ring_emit(ring, MI_SUSPEND_FLUSH);
3717         intel_ring_emit(ring, MI_NOOP);
3718         intel_ring_emit(ring, MI_FLUSH);
3719         intel_ring_advance(ring);
3720
3721         /*
3722          * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
3723          * does an implicit flush, combined with MI_FLUSH above, it should be
3724          * safe to assume that renderctx is valid
3725          */
3726         ret = intel_ring_idle(ring);
3727         dev_priv->mm.interruptible = was_interruptible;
3728         if (ret) {
3729                 DRM_ERROR("failed to enable ironlake power savings\n");
3730                 ironlake_teardown_rc6(dev);
3731                 return;
3732         }
3733
3734         I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
3735         I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3736 }
3737
3738 static unsigned long intel_pxfreq(u32 vidfreq)
3739 {
3740         unsigned long freq;
3741         int div = (vidfreq & 0x3f0000) >> 16;
3742         int post = (vidfreq & 0x3000) >> 12;
3743         int pre = (vidfreq & 0x7);
3744
3745         if (!pre)
3746                 return 0;
3747
3748         freq = ((div * 133333) / ((1<<post) * pre));
3749
3750         return freq;
3751 }
3752
3753 static const struct cparams {
3754         u16 i;
3755         u16 t;
3756         u16 m;
3757         u16 c;
3758 } cparams[] = {
3759         { 1, 1333, 301, 28664 },
3760         { 1, 1066, 294, 24460 },
3761         { 1, 800, 294, 25192 },
3762         { 0, 1333, 276, 27605 },
3763         { 0, 1066, 276, 27605 },
3764         { 0, 800, 231, 23784 },
3765 };
3766
3767 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
3768 {
3769         u64 total_count, diff, ret;
3770         u32 count1, count2, count3, m = 0, c = 0;
3771         unsigned long now = jiffies_to_msecs(jiffies), diff1;
3772         int i;
3773
3774         assert_spin_locked(&mchdev_lock);
3775
3776         diff1 = now - dev_priv->ips.last_time1;
3777
3778         /* Prevent division-by-zero if we are asking too fast.
3779          * Also, we don't get interesting results if we are polling
3780          * faster than once in 10ms, so just return the saved value
3781          * in such cases.
3782          */
3783         if (diff1 <= 10)
3784                 return dev_priv->ips.chipset_power;
3785
3786         count1 = I915_READ(DMIEC);
3787         count2 = I915_READ(DDREC);
3788         count3 = I915_READ(CSIEC);
3789
3790         total_count = count1 + count2 + count3;
3791
3792         /* FIXME: handle per-counter overflow */
3793         if (total_count < dev_priv->ips.last_count1) {
3794                 diff = ~0UL - dev_priv->ips.last_count1;
3795                 diff += total_count;
3796         } else {
3797                 diff = total_count - dev_priv->ips.last_count1;
3798         }
3799
3800         for (i = 0; i < ARRAY_SIZE(cparams); i++) {
3801                 if (cparams[i].i == dev_priv->ips.c_m &&
3802                     cparams[i].t == dev_priv->ips.r_t) {
3803                         m = cparams[i].m;
3804                         c = cparams[i].c;
3805                         break;
3806                 }
3807         }
3808
3809         diff = div_u64(diff, diff1);
3810         ret = ((m * diff) + c);
3811         ret = div_u64(ret, 10);
3812
3813         dev_priv->ips.last_count1 = total_count;
3814         dev_priv->ips.last_time1 = now;
3815
3816         dev_priv->ips.chipset_power = ret;
3817
3818         return ret;
3819 }
3820
3821 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
3822 {
3823         unsigned long val;
3824
3825         if (dev_priv->info->gen != 5)
3826                 return 0;
3827
3828         spin_lock_irq(&mchdev_lock);
3829
3830         val = __i915_chipset_val(dev_priv);
3831
3832         spin_unlock_irq(&mchdev_lock);
3833
3834         return val;
3835 }
3836
3837 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
3838 {
3839         unsigned long m, x, b;
3840         u32 tsfs;
3841
3842         tsfs = I915_READ(TSFS);
3843
3844         m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
3845         x = I915_READ8(TR1);
3846
3847         b = tsfs & TSFS_INTR_MASK;
3848
3849         return ((m * x) / 127) - b;
3850 }
3851
3852 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
3853 {
3854         static const struct v_table {
3855                 u16 vd; /* in .1 mil */
3856                 u16 vm; /* in .1 mil */
3857         } v_table[] = {
3858                 { 0, 0, },
3859                 { 375, 0, },
3860                 { 500, 0, },
3861                 { 625, 0, },
3862                 { 750, 0, },
3863                 { 875, 0, },
3864                 { 1000, 0, },
3865                 { 1125, 0, },
3866                 { 4125, 3000, },
3867                 { 4125, 3000, },
3868                 { 4125, 3000, },
3869                 { 4125, 3000, },
3870                 { 4125, 3000, },
3871                 { 4125, 3000, },
3872                 { 4125, 3000, },
3873                 { 4125, 3000, },
3874                 { 4125, 3000, },
3875                 { 4125, 3000, },
3876                 { 4125, 3000, },
3877                 { 4125, 3000, },
3878                 { 4125, 3000, },
3879                 { 4125, 3000, },
3880                 { 4125, 3000, },
3881                 { 4125, 3000, },
3882                 { 4125, 3000, },
3883                 { 4125, 3000, },
3884                 { 4125, 3000, },
3885                 { 4125, 3000, },
3886                 { 4125, 3000, },
3887                 { 4125, 3000, },
3888                 { 4125, 3000, },
3889                 { 4125, 3000, },
3890                 { 4250, 3125, },
3891                 { 4375, 3250, },
3892                 { 4500, 3375, },
3893                 { 4625, 3500, },
3894                 { 4750, 3625, },
3895                 { 4875, 3750, },
3896                 { 5000, 3875, },
3897                 { 5125, 4000, },
3898                 { 5250, 4125, },
3899                 { 5375, 4250, },
3900                 { 5500, 4375, },
3901                 { 5625, 4500, },
3902                 { 5750, 4625, },
3903                 { 5875, 4750, },
3904                 { 6000, 4875, },
3905                 { 6125, 5000, },
3906                 { 6250, 5125, },
3907                 { 6375, 5250, },
3908                 { 6500, 5375, },
3909                 { 6625, 5500, },
3910                 { 6750, 5625, },
3911                 { 6875, 5750, },
3912                 { 7000, 5875, },
3913                 { 7125, 6000, },
3914                 { 7250, 6125, },
3915                 { 7375, 6250, },
3916                 { 7500, 6375, },
3917                 { 7625, 6500, },
3918                 { 7750, 6625, },
3919                 { 7875, 6750, },
3920                 { 8000, 6875, },
3921                 { 8125, 7000, },
3922                 { 8250, 7125, },
3923                 { 8375, 7250, },
3924                 { 8500, 7375, },
3925                 { 8625, 7500, },
3926                 { 8750, 7625, },
3927                 { 8875, 7750, },
3928                 { 9000, 7875, },
3929                 { 9125, 8000, },
3930                 { 9250, 8125, },
3931                 { 9375, 8250, },
3932                 { 9500, 8375, },
3933                 { 9625, 8500, },
3934                 { 9750, 8625, },
3935                 { 9875, 8750, },
3936                 { 10000, 8875, },
3937                 { 10125, 9000, },
3938                 { 10250, 9125, },
3939                 { 10375, 9250, },
3940                 { 10500, 9375, },
3941                 { 10625, 9500, },
3942                 { 10750, 9625, },
3943                 { 10875, 9750, },
3944                 { 11000, 9875, },
3945                 { 11125, 10000, },
3946                 { 11250, 10125, },
3947                 { 11375, 10250, },
3948                 { 11500, 10375, },
3949                 { 11625, 10500, },
3950                 { 11750, 10625, },
3951                 { 11875, 10750, },
3952                 { 12000, 10875, },
3953                 { 12125, 11000, },
3954                 { 12250, 11125, },
3955                 { 12375, 11250, },
3956                 { 12500, 11375, },
3957                 { 12625, 11500, },
3958                 { 12750, 11625, },
3959                 { 12875, 11750, },
3960                 { 13000, 11875, },
3961                 { 13125, 12000, },
3962                 { 13250, 12125, },
3963                 { 13375, 12250, },
3964                 { 13500, 12375, },
3965                 { 13625, 12500, },
3966                 { 13750, 12625, },
3967                 { 13875, 12750, },
3968                 { 14000, 12875, },
3969                 { 14125, 13000, },
3970                 { 14250, 13125, },
3971                 { 14375, 13250, },
3972                 { 14500, 13375, },
3973                 { 14625, 13500, },
3974                 { 14750, 13625, },
3975                 { 14875, 13750, },
3976                 { 15000, 13875, },
3977                 { 15125, 14000, },
3978                 { 15250, 14125, },
3979                 { 15375, 14250, },
3980                 { 15500, 14375, },
3981                 { 15625, 14500, },
3982                 { 15750, 14625, },
3983                 { 15875, 14750, },
3984                 { 16000, 14875, },
3985                 { 16125, 15000, },
3986         };
3987         if (dev_priv->info->is_mobile)
3988                 return v_table[pxvid].vm;
3989         else
3990                 return v_table[pxvid].vd;
3991 }
3992
3993 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
3994 {
3995         struct timespec now, diff1;
3996         u64 diff;
3997         unsigned long diffms;
3998         u32 count;
3999
4000         assert_spin_locked(&mchdev_lock);
4001
4002         getrawmonotonic(&now);
4003         diff1 = timespec_sub(now, dev_priv->ips.last_time2);
4004
4005         /* Don't divide by 0 */
4006         diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4007         if (!diffms)
4008                 return;
4009
4010         count = I915_READ(GFXEC);
4011
4012         if (count < dev_priv->ips.last_count2) {
4013                 diff = ~0UL - dev_priv->ips.last_count2;
4014                 diff += count;
4015         } else {
4016                 diff = count - dev_priv->ips.last_count2;
4017         }
4018
4019         dev_priv->ips.last_count2 = count;
4020         dev_priv->ips.last_time2 = now;
4021
4022         /* More magic constants... */
4023         diff = diff * 1181;
4024         diff = div_u64(diff, diffms * 10);
4025         dev_priv->ips.gfx_power = diff;
4026 }
4027
4028 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4029 {
4030         if (dev_priv->info->gen != 5)
4031                 return;
4032
4033         spin_lock_irq(&mchdev_lock);
4034
4035         __i915_update_gfx_val(dev_priv);
4036
4037         spin_unlock_irq(&mchdev_lock);
4038 }
4039
4040 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4041 {
4042         unsigned long t, corr, state1, corr2, state2;
4043         u32 pxvid, ext_v;
4044
4045         assert_spin_locked(&mchdev_lock);
4046
4047         pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
4048         pxvid = (pxvid >> 24) & 0x7f;
4049         ext_v = pvid_to_extvid(dev_priv, pxvid);
4050
4051         state1 = ext_v;
4052
4053         t = i915_mch_val(dev_priv);
4054
4055         /* Revel in the empirically derived constants */
4056
4057         /* Correction factor in 1/100000 units */
4058         if (t > 80)
4059                 corr = ((t * 2349) + 135940);
4060         else if (t >= 50)
4061                 corr = ((t * 964) + 29317);
4062         else /* < 50 */
4063                 corr = ((t * 301) + 1004);
4064
4065         corr = corr * ((150142 * state1) / 10000 - 78642);
4066         corr /= 100000;
4067         corr2 = (corr * dev_priv->ips.corr);
4068
4069         state2 = (corr2 * state1) / 10000;
4070         state2 /= 100; /* convert to mW */
4071
4072         __i915_update_gfx_val(dev_priv);
4073
4074         return dev_priv->ips.gfx_power + state2;
4075 }
4076
4077 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4078 {
4079         unsigned long val;
4080
4081         if (dev_priv->info->gen != 5)
4082                 return 0;
4083
4084         spin_lock_irq(&mchdev_lock);
4085
4086         val = __i915_gfx_val(dev_priv);
4087
4088         spin_unlock_irq(&mchdev_lock);
4089
4090         return val;
4091 }
4092
4093 /**
4094  * i915_read_mch_val - return value for IPS use
4095  *
4096  * Calculate and return a value for the IPS driver to use when deciding whether
4097  * we have thermal and power headroom to increase CPU or GPU power budget.
4098  */
4099 unsigned long i915_read_mch_val(void)
4100 {
4101         struct drm_i915_private *dev_priv;
4102         unsigned long chipset_val, graphics_val, ret = 0;
4103
4104         spin_lock_irq(&mchdev_lock);
4105         if (!i915_mch_dev)
4106                 goto out_unlock;
4107         dev_priv = i915_mch_dev;
4108
4109         chipset_val = __i915_chipset_val(dev_priv);
4110         graphics_val = __i915_gfx_val(dev_priv);
4111
4112         ret = chipset_val + graphics_val;
4113
4114 out_unlock:
4115         spin_unlock_irq(&mchdev_lock);
4116
4117         return ret;
4118 }
4119 EXPORT_SYMBOL_GPL(i915_read_mch_val);
4120
4121 /**
4122  * i915_gpu_raise - raise GPU frequency limit
4123  *
4124  * Raise the limit; IPS indicates we have thermal headroom.
4125  */
4126 bool i915_gpu_raise(void)
4127 {
4128         struct drm_i915_private *dev_priv;
4129         bool ret = true;
4130
4131         spin_lock_irq(&mchdev_lock);
4132         if (!i915_mch_dev) {
4133                 ret = false;
4134                 goto out_unlock;
4135         }
4136         dev_priv = i915_mch_dev;
4137
4138         if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4139                 dev_priv->ips.max_delay--;
4140
4141 out_unlock:
4142         spin_unlock_irq(&mchdev_lock);
4143
4144         return ret;
4145 }
4146 EXPORT_SYMBOL_GPL(i915_gpu_raise);
4147
4148 /**
4149  * i915_gpu_lower - lower GPU frequency limit
4150  *
4151  * IPS indicates we're close to a thermal limit, so throttle back the GPU
4152  * frequency maximum.
4153  */
4154 bool i915_gpu_lower(void)
4155 {
4156         struct drm_i915_private *dev_priv;
4157         bool ret = true;
4158
4159         spin_lock_irq(&mchdev_lock);
4160         if (!i915_mch_dev) {
4161                 ret = false;
4162                 goto out_unlock;
4163         }
4164         dev_priv = i915_mch_dev;
4165
4166         if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4167                 dev_priv->ips.max_delay++;
4168
4169 out_unlock:
4170         spin_unlock_irq(&mchdev_lock);
4171
4172         return ret;
4173 }
4174 EXPORT_SYMBOL_GPL(i915_gpu_lower);
4175
4176 /**
4177  * i915_gpu_busy - indicate GPU business to IPS
4178  *
4179  * Tell the IPS driver whether or not the GPU is busy.
4180  */
4181 bool i915_gpu_busy(void)
4182 {
4183         struct drm_i915_private *dev_priv;
4184         struct intel_ring_buffer *ring;
4185         bool ret = false;
4186         int i;
4187
4188         spin_lock_irq(&mchdev_lock);
4189         if (!i915_mch_dev)
4190                 goto out_unlock;
4191         dev_priv = i915_mch_dev;
4192
4193         for_each_ring(ring, dev_priv, i)
4194                 ret |= !list_empty(&ring->request_list);
4195
4196 out_unlock:
4197         spin_unlock_irq(&mchdev_lock);
4198
4199         return ret;
4200 }
4201 EXPORT_SYMBOL_GPL(i915_gpu_busy);
4202
4203 /**
4204  * i915_gpu_turbo_disable - disable graphics turbo
4205  *
4206  * Disable graphics turbo by resetting the max frequency and setting the
4207  * current frequency to the default.
4208  */
4209 bool i915_gpu_turbo_disable(void)
4210 {
4211         struct drm_i915_private *dev_priv;
4212         bool ret = true;
4213
4214         spin_lock_irq(&mchdev_lock);
4215         if (!i915_mch_dev) {
4216                 ret = false;
4217                 goto out_unlock;
4218         }
4219         dev_priv = i915_mch_dev;
4220
4221         dev_priv->ips.max_delay = dev_priv->ips.fstart;
4222
4223         if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
4224                 ret = false;
4225
4226 out_unlock:
4227         spin_unlock_irq(&mchdev_lock);
4228
4229         return ret;
4230 }
4231 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4232
4233 /**
4234  * Tells the intel_ips driver that the i915 driver is now loaded, if
4235  * IPS got loaded first.
4236  *
4237  * This awkward dance is so that neither module has to depend on the
4238  * other in order for IPS to do the appropriate communication of
4239  * GPU turbo limits to i915.
4240  */
4241 static void
4242 ips_ping_for_i915_load(void)
4243 {
4244         void (*link)(void);
4245
4246         link = symbol_get(ips_link_to_i915_driver);
4247         if (link) {
4248                 link();
4249                 symbol_put(ips_link_to_i915_driver);
4250         }
4251 }
4252
4253 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4254 {
4255         /* We only register the i915 ips part with intel-ips once everything is
4256          * set up, to avoid intel-ips sneaking in and reading bogus values. */
4257         spin_lock_irq(&mchdev_lock);
4258         i915_mch_dev = dev_priv;
4259         spin_unlock_irq(&mchdev_lock);
4260
4261         ips_ping_for_i915_load();
4262 }
4263
4264 void intel_gpu_ips_teardown(void)
4265 {
4266         spin_lock_irq(&mchdev_lock);
4267         i915_mch_dev = NULL;
4268         spin_unlock_irq(&mchdev_lock);
4269 }
4270 static void intel_init_emon(struct drm_device *dev)
4271 {
4272         struct drm_i915_private *dev_priv = dev->dev_private;
4273         u32 lcfuse;
4274         u8 pxw[16];
4275         int i;
4276
4277         /* Disable to program */
4278         I915_WRITE(ECR, 0);
4279         POSTING_READ(ECR);
4280
4281         /* Program energy weights for various events */
4282         I915_WRITE(SDEW, 0x15040d00);
4283         I915_WRITE(CSIEW0, 0x007f0000);
4284         I915_WRITE(CSIEW1, 0x1e220004);
4285         I915_WRITE(CSIEW2, 0x04000004);
4286
4287         for (i = 0; i < 5; i++)
4288                 I915_WRITE(PEW + (i * 4), 0);
4289         for (i = 0; i < 3; i++)
4290                 I915_WRITE(DEW + (i * 4), 0);
4291
4292         /* Program P-state weights to account for frequency power adjustment */
4293         for (i = 0; i < 16; i++) {
4294                 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4295                 unsigned long freq = intel_pxfreq(pxvidfreq);
4296                 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4297                         PXVFREQ_PX_SHIFT;
4298                 unsigned long val;
4299
4300                 val = vid * vid;
4301                 val *= (freq / 1000);
4302                 val *= 255;
4303                 val /= (127*127*900);
4304                 if (val > 0xff)
4305                         DRM_ERROR("bad pxval: %ld\n", val);
4306                 pxw[i] = val;
4307         }
4308         /* Render standby states get 0 weight */
4309         pxw[14] = 0;
4310         pxw[15] = 0;
4311
4312         for (i = 0; i < 4; i++) {
4313                 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
4314                         (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
4315                 I915_WRITE(PXW + (i * 4), val);
4316         }
4317
4318         /* Adjust magic regs to magic values (more experimental results) */
4319         I915_WRITE(OGW0, 0);
4320         I915_WRITE(OGW1, 0);
4321         I915_WRITE(EG0, 0x00007f00);
4322         I915_WRITE(EG1, 0x0000000e);
4323         I915_WRITE(EG2, 0x000e0000);
4324         I915_WRITE(EG3, 0x68000300);
4325         I915_WRITE(EG4, 0x42000000);
4326         I915_WRITE(EG5, 0x00140031);
4327         I915_WRITE(EG6, 0);
4328         I915_WRITE(EG7, 0);
4329
4330         for (i = 0; i < 8; i++)
4331                 I915_WRITE(PXWL + (i * 4), 0);
4332
4333         /* Enable PMON + select events */
4334         I915_WRITE(ECR, 0x80000019);
4335
4336         lcfuse = I915_READ(LCFUSE02);
4337
4338         dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4339 }
4340
4341 void intel_disable_gt_powersave(struct drm_device *dev)
4342 {
4343         struct drm_i915_private *dev_priv = dev->dev_private;
4344
4345         /* Interrupts should be disabled already to avoid re-arming. */
4346         WARN_ON(dev->irq_enabled);
4347
4348         if (IS_IRONLAKE_M(dev)) {
4349                 ironlake_disable_drps(dev);
4350                 ironlake_disable_rc6(dev);
4351         } else if (INTEL_INFO(dev)->gen >= 6) {
4352                 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4353                 cancel_work_sync(&dev_priv->rps.work);
4354                 if (IS_VALLEYVIEW(dev))
4355                         cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
4356                 mutex_lock(&dev_priv->rps.hw_lock);
4357                 if (IS_VALLEYVIEW(dev))
4358                         valleyview_disable_rps(dev);
4359                 else
4360                         gen6_disable_rps(dev);
4361                 mutex_unlock(&dev_priv->rps.hw_lock);
4362         }
4363 }
4364
4365 static void intel_gen6_powersave_work(struct work_struct *work)
4366 {
4367         struct drm_i915_private *dev_priv =
4368                 container_of(work, struct drm_i915_private,
4369                              rps.delayed_resume_work.work);
4370         struct drm_device *dev = dev_priv->dev;
4371
4372         mutex_lock(&dev_priv->rps.hw_lock);
4373
4374         if (IS_VALLEYVIEW(dev)) {
4375                 valleyview_enable_rps(dev);
4376         } else {
4377                 gen6_enable_rps(dev);
4378                 gen6_update_ring_freq(dev);
4379         }
4380         mutex_unlock(&dev_priv->rps.hw_lock);
4381 }
4382
4383 void intel_enable_gt_powersave(struct drm_device *dev)
4384 {
4385         struct drm_i915_private *dev_priv = dev->dev_private;
4386
4387         if (IS_IRONLAKE_M(dev)) {
4388                 ironlake_enable_drps(dev);
4389                 ironlake_enable_rc6(dev);
4390                 intel_init_emon(dev);
4391         } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
4392                 /*
4393                  * PCU communication is slow and this doesn't need to be
4394                  * done at any specific time, so do this out of our fast path
4395                  * to make resume and init faster.
4396                  */
4397                 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
4398                                       round_jiffies_up_relative(HZ));
4399         }
4400 }
4401
4402 static void ibx_init_clock_gating(struct drm_device *dev)
4403 {
4404         struct drm_i915_private *dev_priv = dev->dev_private;
4405
4406         /*
4407          * On Ibex Peak and Cougar Point, we need to disable clock
4408          * gating for the panel power sequencer or it will fail to
4409          * start up when no ports are active.
4410          */
4411         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4412 }
4413
4414 static void g4x_disable_trickle_feed(struct drm_device *dev)
4415 {
4416         struct drm_i915_private *dev_priv = dev->dev_private;
4417         int pipe;
4418
4419         for_each_pipe(pipe) {
4420                 I915_WRITE(DSPCNTR(pipe),
4421                            I915_READ(DSPCNTR(pipe)) |
4422                            DISPPLANE_TRICKLE_FEED_DISABLE);
4423                 intel_flush_display_plane(dev_priv, pipe);
4424         }
4425 }
4426
4427 static void ironlake_init_clock_gating(struct drm_device *dev)
4428 {
4429         struct drm_i915_private *dev_priv = dev->dev_private;
4430         uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4431
4432         /* Required for FBC */
4433         dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
4434                    ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
4435                    ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
4436
4437         I915_WRITE(PCH_3DCGDIS0,
4438                    MARIUNIT_CLOCK_GATE_DISABLE |
4439                    SVSMUNIT_CLOCK_GATE_DISABLE);
4440         I915_WRITE(PCH_3DCGDIS1,
4441                    VFMUNIT_CLOCK_GATE_DISABLE);
4442
4443         /*
4444          * According to the spec the following bits should be set in
4445          * order to enable memory self-refresh
4446          * The bit 22/21 of 0x42004
4447          * The bit 5 of 0x42020
4448          * The bit 15 of 0x45000
4449          */
4450         I915_WRITE(ILK_DISPLAY_CHICKEN2,
4451                    (I915_READ(ILK_DISPLAY_CHICKEN2) |
4452                     ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4453         dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
4454         I915_WRITE(DISP_ARB_CTL,
4455                    (I915_READ(DISP_ARB_CTL) |
4456                     DISP_FBC_WM_DIS));
4457         I915_WRITE(WM3_LP_ILK, 0);
4458         I915_WRITE(WM2_LP_ILK, 0);
4459         I915_WRITE(WM1_LP_ILK, 0);
4460
4461         /*
4462          * Based on the document from hardware guys the following bits
4463          * should be set unconditionally in order to enable FBC.
4464          * The bit 22 of 0x42000
4465          * The bit 22 of 0x42004
4466          * The bit 7,8,9 of 0x42020.
4467          */
4468         if (IS_IRONLAKE_M(dev)) {
4469                 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4470                            I915_READ(ILK_DISPLAY_CHICKEN1) |
4471                            ILK_FBCQ_DIS);
4472                 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4473                            I915_READ(ILK_DISPLAY_CHICKEN2) |
4474                            ILK_DPARB_GATE);
4475         }
4476
4477         I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4478
4479         I915_WRITE(ILK_DISPLAY_CHICKEN2,
4480                    I915_READ(ILK_DISPLAY_CHICKEN2) |
4481                    ILK_ELPIN_409_SELECT);
4482         I915_WRITE(_3D_CHICKEN2,
4483                    _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
4484                    _3D_CHICKEN2_WM_READ_PIPELINED);
4485
4486         /* WaDisableRenderCachePipelinedFlush:ilk */
4487         I915_WRITE(CACHE_MODE_0,
4488                    _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4489
4490         g4x_disable_trickle_feed(dev);
4491
4492         ibx_init_clock_gating(dev);
4493 }
4494
4495 static void cpt_init_clock_gating(struct drm_device *dev)
4496 {
4497         struct drm_i915_private *dev_priv = dev->dev_private;
4498         int pipe;
4499         uint32_t val;
4500
4501         /*
4502          * On Ibex Peak and Cougar Point, we need to disable clock
4503          * gating for the panel power sequencer or it will fail to
4504          * start up when no ports are active.
4505          */
4506         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4507         I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
4508                    DPLS_EDP_PPS_FIX_DIS);
4509         /* The below fixes the weird display corruption, a few pixels shifted
4510          * downward, on (only) LVDS of some HP laptops with IVY.
4511          */
4512         for_each_pipe(pipe) {
4513                 val = I915_READ(TRANS_CHICKEN2(pipe));
4514                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
4515                 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4516                 if (dev_priv->vbt.fdi_rx_polarity_inverted)
4517                         val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4518                 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
4519                 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
4520                 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
4521                 I915_WRITE(TRANS_CHICKEN2(pipe), val);
4522         }
4523         /* WADP0ClockGatingDisable */
4524         for_each_pipe(pipe) {
4525                 I915_WRITE(TRANS_CHICKEN1(pipe),
4526                            TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4527         }
4528 }
4529
4530 static void gen6_check_mch_setup(struct drm_device *dev)
4531 {
4532         struct drm_i915_private *dev_priv = dev->dev_private;
4533         uint32_t tmp;
4534
4535         tmp = I915_READ(MCH_SSKPD);
4536         if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
4537                 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
4538                 DRM_INFO("This can cause pipe underruns and display issues.\n");
4539                 DRM_INFO("Please upgrade your BIOS to fix this.\n");
4540         }
4541 }
4542
4543 static void gen6_init_clock_gating(struct drm_device *dev)
4544 {
4545         struct drm_i915_private *dev_priv = dev->dev_private;
4546         uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4547
4548         I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4549
4550         I915_WRITE(ILK_DISPLAY_CHICKEN2,
4551                    I915_READ(ILK_DISPLAY_CHICKEN2) |
4552                    ILK_ELPIN_409_SELECT);
4553
4554         /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
4555         I915_WRITE(_3D_CHICKEN,
4556                    _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
4557
4558         /* WaSetupGtModeTdRowDispatch:snb */
4559         if (IS_SNB_GT1(dev))
4560                 I915_WRITE(GEN6_GT_MODE,
4561                            _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4562
4563         I915_WRITE(WM3_LP_ILK, 0);
4564         I915_WRITE(WM2_LP_ILK, 0);
4565         I915_WRITE(WM1_LP_ILK, 0);
4566
4567         I915_WRITE(CACHE_MODE_0,
4568                    _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
4569
4570         I915_WRITE(GEN6_UCGCTL1,
4571                    I915_READ(GEN6_UCGCTL1) |
4572                    GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
4573                    GEN6_CSUNIT_CLOCK_GATE_DISABLE);
4574
4575         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4576          * gating disable must be set.  Failure to set it results in
4577          * flickering pixels due to Z write ordering failures after
4578          * some amount of runtime in the Mesa "fire" demo, and Unigine
4579          * Sanctuary and Tropics, and apparently anything else with
4580          * alpha test or pixel discard.
4581          *
4582          * According to the spec, bit 11 (RCCUNIT) must also be set,
4583          * but we didn't debug actual testcases to find it out.
4584          *
4585          * Also apply WaDisableVDSUnitClockGating:snb and
4586          * WaDisableRCPBUnitClockGating:snb.
4587          */
4588         I915_WRITE(GEN6_UCGCTL2,
4589                    GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
4590                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4591                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4592
4593         /* Bspec says we need to always set all mask bits. */
4594         I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
4595                    _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
4596
4597         /*
4598          * According to the spec the following bits should be
4599          * set in order to enable memory self-refresh and fbc:
4600          * The bit21 and bit22 of 0x42000
4601          * The bit21 and bit22 of 0x42004
4602          * The bit5 and bit7 of 0x42020
4603          * The bit14 of 0x70180
4604          * The bit14 of 0x71180
4605          */
4606         I915_WRITE(ILK_DISPLAY_CHICKEN1,
4607                    I915_READ(ILK_DISPLAY_CHICKEN1) |
4608                    ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
4609         I915_WRITE(ILK_DISPLAY_CHICKEN2,
4610                    I915_READ(ILK_DISPLAY_CHICKEN2) |
4611                    ILK_DPARB_GATE | ILK_VSDPFD_FULL);
4612         I915_WRITE(ILK_DSPCLK_GATE_D,
4613                    I915_READ(ILK_DSPCLK_GATE_D) |
4614                    ILK_DPARBUNIT_CLOCK_GATE_ENABLE  |
4615                    ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
4616
4617         /* WaMbcDriverBootEnable:snb */
4618         I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4619                    GEN6_MBCTL_ENABLE_BOOT_FETCH);
4620
4621         g4x_disable_trickle_feed(dev);
4622
4623         /* The default value should be 0x200 according to docs, but the two
4624          * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
4625         I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
4626         I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
4627
4628         cpt_init_clock_gating(dev);
4629
4630         gen6_check_mch_setup(dev);
4631 }
4632
4633 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
4634 {
4635         uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
4636
4637         reg &= ~GEN7_FF_SCHED_MASK;
4638         reg |= GEN7_FF_TS_SCHED_HW;
4639         reg |= GEN7_FF_VS_SCHED_HW;
4640         reg |= GEN7_FF_DS_SCHED_HW;
4641
4642         if (IS_HASWELL(dev_priv->dev))
4643                 reg &= ~GEN7_FF_VS_REF_CNT_FFME;
4644
4645         I915_WRITE(GEN7_FF_THREAD_MODE, reg);
4646 }
4647
4648 static void lpt_init_clock_gating(struct drm_device *dev)
4649 {
4650         struct drm_i915_private *dev_priv = dev->dev_private;
4651
4652         /*
4653          * TODO: this bit should only be enabled when really needed, then
4654          * disabled when not needed anymore in order to save power.
4655          */
4656         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
4657                 I915_WRITE(SOUTH_DSPCLK_GATE_D,
4658                            I915_READ(SOUTH_DSPCLK_GATE_D) |
4659                            PCH_LP_PARTITION_LEVEL_DISABLE);
4660
4661         /* WADPOClockGatingDisable:hsw */
4662         I915_WRITE(_TRANSA_CHICKEN1,
4663                    I915_READ(_TRANSA_CHICKEN1) |
4664                    TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4665 }
4666
4667 static void lpt_suspend_hw(struct drm_device *dev)
4668 {
4669         struct drm_i915_private *dev_priv = dev->dev_private;
4670
4671         if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
4672                 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
4673
4674                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4675                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4676         }
4677 }
4678
4679 static void haswell_init_clock_gating(struct drm_device *dev)
4680 {
4681         struct drm_i915_private *dev_priv = dev->dev_private;
4682
4683         I915_WRITE(WM3_LP_ILK, 0);
4684         I915_WRITE(WM2_LP_ILK, 0);
4685         I915_WRITE(WM1_LP_ILK, 0);
4686
4687         /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4688          * This implements the WaDisableRCZUnitClockGating:hsw workaround.
4689          */
4690         I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
4691
4692         /* Apply the WaDisableRHWOOptimizationForRenderHang:hsw workaround. */
4693         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4694                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4695
4696         /* WaApplyL3ControlAndL3ChickenMode:hsw */
4697         I915_WRITE(GEN7_L3CNTLREG1,
4698                         GEN7_WA_FOR_GEN7_L3_CONTROL);
4699         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4700                         GEN7_WA_L3_CHICKEN_MODE);
4701
4702         /* This is required by WaCatErrorRejectionIssue:hsw */
4703         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4704                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4705                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4706
4707         g4x_disable_trickle_feed(dev);
4708
4709         /* WaVSRefCountFullforceMissDisable:hsw */
4710         gen7_setup_fixed_func_scheduler(dev_priv);
4711
4712         /* WaDisable4x2SubspanOptimization:hsw */
4713         I915_WRITE(CACHE_MODE_1,
4714                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4715
4716         /* WaMbcDriverBootEnable:hsw */
4717         I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4718                    GEN6_MBCTL_ENABLE_BOOT_FETCH);
4719
4720         /* WaSwitchSolVfFArbitrationPriority:hsw */
4721         I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4722
4723         /* WaRsPkgCStateDisplayPMReq:hsw */
4724         I915_WRITE(CHICKEN_PAR1_1,
4725                    I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
4726
4727         lpt_init_clock_gating(dev);
4728 }
4729
4730 static void ivybridge_init_clock_gating(struct drm_device *dev)
4731 {
4732         struct drm_i915_private *dev_priv = dev->dev_private;
4733         uint32_t snpcr;
4734
4735         I915_WRITE(WM3_LP_ILK, 0);
4736         I915_WRITE(WM2_LP_ILK, 0);
4737         I915_WRITE(WM1_LP_ILK, 0);
4738
4739         I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
4740
4741         /* WaDisableEarlyCull:ivb */
4742         I915_WRITE(_3D_CHICKEN3,
4743                    _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
4744
4745         /* WaDisableBackToBackFlipFix:ivb */
4746         I915_WRITE(IVB_CHICKEN3,
4747                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
4748                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
4749
4750         /* WaDisablePSDDualDispatchEnable:ivb */
4751         if (IS_IVB_GT1(dev))
4752                 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4753                            _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4754         else
4755                 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
4756                            _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4757
4758         /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
4759         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4760                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4761
4762         /* WaApplyL3ControlAndL3ChickenMode:ivb */
4763         I915_WRITE(GEN7_L3CNTLREG1,
4764                         GEN7_WA_FOR_GEN7_L3_CONTROL);
4765         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4766                    GEN7_WA_L3_CHICKEN_MODE);
4767         if (IS_IVB_GT1(dev))
4768                 I915_WRITE(GEN7_ROW_CHICKEN2,
4769                            _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4770         else
4771                 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
4772                            _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4773
4774
4775         /* WaForceL3Serialization:ivb */
4776         I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
4777                    ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
4778
4779         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4780          * gating disable must be set.  Failure to set it results in
4781          * flickering pixels due to Z write ordering failures after
4782          * some amount of runtime in the Mesa "fire" demo, and Unigine
4783          * Sanctuary and Tropics, and apparently anything else with
4784          * alpha test or pixel discard.
4785          *
4786          * According to the spec, bit 11 (RCCUNIT) must also be set,
4787          * but we didn't debug actual testcases to find it out.
4788          *
4789          * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4790          * This implements the WaDisableRCZUnitClockGating:ivb workaround.
4791          */
4792         I915_WRITE(GEN6_UCGCTL2,
4793                    GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
4794                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4795
4796         /* This is required by WaCatErrorRejectionIssue:ivb */
4797         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4798                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4799                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4800
4801         g4x_disable_trickle_feed(dev);
4802
4803         /* WaMbcDriverBootEnable:ivb */
4804         I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4805                    GEN6_MBCTL_ENABLE_BOOT_FETCH);
4806
4807         /* WaVSRefCountFullforceMissDisable:ivb */
4808         gen7_setup_fixed_func_scheduler(dev_priv);
4809
4810         /* WaDisable4x2SubspanOptimization:ivb */
4811         I915_WRITE(CACHE_MODE_1,
4812                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4813
4814         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4815         snpcr &= ~GEN6_MBC_SNPCR_MASK;
4816         snpcr |= GEN6_MBC_SNPCR_MED;
4817         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4818
4819         if (!HAS_PCH_NOP(dev))
4820                 cpt_init_clock_gating(dev);
4821
4822         gen6_check_mch_setup(dev);
4823 }
4824
4825 static void valleyview_init_clock_gating(struct drm_device *dev)
4826 {
4827         struct drm_i915_private *dev_priv = dev->dev_private;
4828
4829         I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
4830
4831         /* WaDisableEarlyCull:vlv */
4832         I915_WRITE(_3D_CHICKEN3,
4833                    _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
4834
4835         /* WaDisableBackToBackFlipFix:vlv */
4836         I915_WRITE(IVB_CHICKEN3,
4837                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
4838                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
4839
4840         /* WaDisablePSDDualDispatchEnable:vlv */
4841         I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4842                    _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
4843                                       GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4844
4845         /* Apply the WaDisableRHWOOptimizationForRenderHang:vlv workaround. */
4846         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4847                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4848
4849         /* WaApplyL3ControlAndL3ChickenMode:vlv */
4850         I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
4851         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
4852
4853         /* WaForceL3Serialization:vlv */
4854         I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
4855                    ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
4856
4857         /* WaDisableDopClockGating:vlv */
4858         I915_WRITE(GEN7_ROW_CHICKEN2,
4859                    _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4860
4861         /* This is required by WaCatErrorRejectionIssue:vlv */
4862         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4863                    I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4864                    GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4865
4866         /* WaMbcDriverBootEnable:vlv */
4867         I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
4868                    GEN6_MBCTL_ENABLE_BOOT_FETCH);
4869
4870
4871         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4872          * gating disable must be set.  Failure to set it results in
4873          * flickering pixels due to Z write ordering failures after
4874          * some amount of runtime in the Mesa "fire" demo, and Unigine
4875          * Sanctuary and Tropics, and apparently anything else with
4876          * alpha test or pixel discard.
4877          *
4878          * According to the spec, bit 11 (RCCUNIT) must also be set,
4879          * but we didn't debug actual testcases to find it out.
4880          *
4881          * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
4882          * This implements the WaDisableRCZUnitClockGating:vlv workaround.
4883          *
4884          * Also apply WaDisableVDSUnitClockGating:vlv and
4885          * WaDisableRCPBUnitClockGating:vlv.
4886          */
4887         I915_WRITE(GEN6_UCGCTL2,
4888                    GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
4889                    GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
4890                    GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
4891                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4892                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4893
4894         I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
4895
4896         I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
4897
4898         I915_WRITE(CACHE_MODE_1,
4899                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4900
4901         /*
4902          * WaDisableVLVClockGating_VBIIssue:vlv
4903          * Disable clock gating on th GCFG unit to prevent a delay
4904          * in the reporting of vblank events.
4905          */
4906         I915_WRITE(VLV_GUNIT_CLOCK_GATE, 0xffffffff);
4907
4908         /* Conservative clock gating settings for now */
4909         I915_WRITE(0x9400, 0xffffffff);
4910         I915_WRITE(0x9404, 0xffffffff);
4911         I915_WRITE(0x9408, 0xffffffff);
4912         I915_WRITE(0x940c, 0xffffffff);
4913         I915_WRITE(0x9410, 0xffffffff);
4914         I915_WRITE(0x9414, 0xffffffff);
4915         I915_WRITE(0x9418, 0xffffffff);
4916 }
4917
4918 static void g4x_init_clock_gating(struct drm_device *dev)
4919 {
4920         struct drm_i915_private *dev_priv = dev->dev_private;
4921         uint32_t dspclk_gate;
4922
4923         I915_WRITE(RENCLK_GATE_D1, 0);
4924         I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
4925                    GS_UNIT_CLOCK_GATE_DISABLE |
4926                    CL_UNIT_CLOCK_GATE_DISABLE);
4927         I915_WRITE(RAMCLK_GATE_D, 0);
4928         dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
4929                 OVRUNIT_CLOCK_GATE_DISABLE |
4930                 OVCUNIT_CLOCK_GATE_DISABLE;
4931         if (IS_GM45(dev))
4932                 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
4933         I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
4934
4935         /* WaDisableRenderCachePipelinedFlush */
4936         I915_WRITE(CACHE_MODE_0,
4937                    _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4938
4939         g4x_disable_trickle_feed(dev);
4940 }
4941
4942 static void crestline_init_clock_gating(struct drm_device *dev)
4943 {
4944         struct drm_i915_private *dev_priv = dev->dev_private;
4945
4946         I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
4947         I915_WRITE(RENCLK_GATE_D2, 0);
4948         I915_WRITE(DSPCLK_GATE_D, 0);
4949         I915_WRITE(RAMCLK_GATE_D, 0);
4950         I915_WRITE16(DEUC, 0);
4951         I915_WRITE(MI_ARB_STATE,
4952                    _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4953 }
4954
4955 static void broadwater_init_clock_gating(struct drm_device *dev)
4956 {
4957         struct drm_i915_private *dev_priv = dev->dev_private;
4958
4959         I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
4960                    I965_RCC_CLOCK_GATE_DISABLE |
4961                    I965_RCPB_CLOCK_GATE_DISABLE |
4962                    I965_ISC_CLOCK_GATE_DISABLE |
4963                    I965_FBC_CLOCK_GATE_DISABLE);
4964         I915_WRITE(RENCLK_GATE_D2, 0);
4965         I915_WRITE(MI_ARB_STATE,
4966                    _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
4967 }
4968
4969 static void gen3_init_clock_gating(struct drm_device *dev)
4970 {
4971         struct drm_i915_private *dev_priv = dev->dev_private;
4972         u32 dstate = I915_READ(D_STATE);
4973
4974         dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
4975                 DSTATE_DOT_CLOCK_GATING;
4976         I915_WRITE(D_STATE, dstate);
4977
4978         if (IS_PINEVIEW(dev))
4979                 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
4980
4981         /* IIR "flip pending" means done if this bit is set */
4982         I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
4983 }
4984
4985 static void i85x_init_clock_gating(struct drm_device *dev)
4986 {
4987         struct drm_i915_private *dev_priv = dev->dev_private;
4988
4989         I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
4990 }
4991
4992 static void i830_init_clock_gating(struct drm_device *dev)
4993 {
4994         struct drm_i915_private *dev_priv = dev->dev_private;
4995
4996         I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
4997 }
4998
4999 void intel_init_clock_gating(struct drm_device *dev)
5000 {
5001         struct drm_i915_private *dev_priv = dev->dev_private;
5002
5003         dev_priv->display.init_clock_gating(dev);
5004 }
5005
5006 void intel_suspend_hw(struct drm_device *dev)
5007 {
5008         if (HAS_PCH_LPT(dev))
5009                 lpt_suspend_hw(dev);
5010 }
5011
5012 /**
5013  * We should only use the power well if we explicitly asked the hardware to
5014  * enable it, so check if it's enabled and also check if we've requested it to
5015  * be enabled.
5016  */
5017 bool intel_display_power_enabled(struct drm_device *dev,
5018                                  enum intel_display_power_domain domain)
5019 {
5020         struct drm_i915_private *dev_priv = dev->dev_private;
5021
5022         if (!HAS_POWER_WELL(dev))
5023                 return true;
5024
5025         switch (domain) {
5026         case POWER_DOMAIN_PIPE_A:
5027         case POWER_DOMAIN_TRANSCODER_EDP:
5028                 return true;
5029         case POWER_DOMAIN_PIPE_B:
5030         case POWER_DOMAIN_PIPE_C:
5031         case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
5032         case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
5033         case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
5034         case POWER_DOMAIN_TRANSCODER_A:
5035         case POWER_DOMAIN_TRANSCODER_B:
5036         case POWER_DOMAIN_TRANSCODER_C:
5037                 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5038                        (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
5039         default:
5040                 BUG();
5041         }
5042 }
5043
5044 static void __intel_set_power_well(struct drm_device *dev, bool enable)
5045 {
5046         struct drm_i915_private *dev_priv = dev->dev_private;
5047         bool is_enabled, enable_requested;
5048         uint32_t tmp;
5049
5050         tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5051         is_enabled = tmp & HSW_PWR_WELL_STATE;
5052         enable_requested = tmp & HSW_PWR_WELL_ENABLE;
5053
5054         if (enable) {
5055                 if (!enable_requested)
5056                         I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
5057
5058                 if (!is_enabled) {
5059                         DRM_DEBUG_KMS("Enabling power well\n");
5060                         if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5061                                       HSW_PWR_WELL_STATE), 20))
5062                                 DRM_ERROR("Timeout enabling power well\n");
5063                 }
5064         } else {
5065                 if (enable_requested) {
5066                         unsigned long irqflags;
5067                         enum pipe p;
5068
5069                         I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5070                         POSTING_READ(HSW_PWR_WELL_DRIVER);
5071                         DRM_DEBUG_KMS("Requesting to disable the power well\n");
5072
5073                         /*
5074                          * After this, the registers on the pipes that are part
5075                          * of the power well will become zero, so we have to
5076                          * adjust our counters according to that.
5077                          *
5078                          * FIXME: Should we do this in general in
5079                          * drm_vblank_post_modeset?
5080                          */
5081                         spin_lock_irqsave(&dev->vbl_lock, irqflags);
5082                         for_each_pipe(p)
5083                                 if (p != PIPE_A)
5084                                         dev->last_vblank[p] = 0;
5085                         spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5086                 }
5087         }
5088 }
5089
5090 static struct i915_power_well *hsw_pwr;
5091
5092 /* Display audio driver power well request */
5093 void i915_request_power_well(void)
5094 {
5095         if (WARN_ON(!hsw_pwr))
5096                 return;
5097
5098         spin_lock_irq(&hsw_pwr->lock);
5099         if (!hsw_pwr->count++ &&
5100                         !hsw_pwr->i915_request)
5101                 __intel_set_power_well(hsw_pwr->device, true);
5102         spin_unlock_irq(&hsw_pwr->lock);
5103 }
5104 EXPORT_SYMBOL_GPL(i915_request_power_well);
5105
5106 /* Display audio driver power well release */
5107 void i915_release_power_well(void)
5108 {
5109         if (WARN_ON(!hsw_pwr))
5110                 return;
5111
5112         spin_lock_irq(&hsw_pwr->lock);
5113         WARN_ON(!hsw_pwr->count);
5114         if (!--hsw_pwr->count &&
5115                        !hsw_pwr->i915_request)
5116                 __intel_set_power_well(hsw_pwr->device, false);
5117         spin_unlock_irq(&hsw_pwr->lock);
5118 }
5119 EXPORT_SYMBOL_GPL(i915_release_power_well);
5120
5121 int i915_init_power_well(struct drm_device *dev)
5122 {
5123         struct drm_i915_private *dev_priv = dev->dev_private;
5124
5125         hsw_pwr = &dev_priv->power_well;
5126
5127         hsw_pwr->device = dev;
5128         spin_lock_init(&hsw_pwr->lock);
5129         hsw_pwr->count = 0;
5130
5131         return 0;
5132 }
5133
5134 void i915_remove_power_well(struct drm_device *dev)
5135 {
5136         hsw_pwr = NULL;
5137 }
5138
5139 void intel_set_power_well(struct drm_device *dev, bool enable)
5140 {
5141         struct drm_i915_private *dev_priv = dev->dev_private;
5142         struct i915_power_well *power_well = &dev_priv->power_well;
5143
5144         if (!HAS_POWER_WELL(dev))
5145                 return;
5146
5147         if (!i915_disable_power_well && !enable)
5148                 return;
5149
5150         spin_lock_irq(&power_well->lock);
5151         power_well->i915_request = enable;
5152
5153         /* only reject "disable" power well request */
5154         if (power_well->count && !enable) {
5155                 spin_unlock_irq(&power_well->lock);
5156                 return;
5157         }
5158
5159         __intel_set_power_well(dev, enable);
5160         spin_unlock_irq(&power_well->lock);
5161 }
5162
5163 /*
5164  * Starting with Haswell, we have a "Power Down Well" that can be turned off
5165  * when not needed anymore. We have 4 registers that can request the power well
5166  * to be enabled, and it will only be disabled if none of the registers is
5167  * requesting it to be enabled.
5168  */
5169 void intel_init_power_well(struct drm_device *dev)
5170 {
5171         struct drm_i915_private *dev_priv = dev->dev_private;
5172
5173         if (!HAS_POWER_WELL(dev))
5174                 return;
5175
5176         /* For now, we need the power well to be always enabled. */
5177         intel_set_power_well(dev, true);
5178
5179         /* We're taking over the BIOS, so clear any requests made by it since
5180          * the driver is in charge now. */
5181         if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
5182                 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5183 }
5184
5185 /* Set up chip specific power management-related functions */
5186 void intel_init_pm(struct drm_device *dev)
5187 {
5188         struct drm_i915_private *dev_priv = dev->dev_private;
5189
5190         if (I915_HAS_FBC(dev)) {
5191                 if (HAS_PCH_SPLIT(dev)) {
5192                         dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5193                         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5194                                 dev_priv->display.enable_fbc =
5195                                         gen7_enable_fbc;
5196                         else
5197                                 dev_priv->display.enable_fbc =
5198                                         ironlake_enable_fbc;
5199                         dev_priv->display.disable_fbc = ironlake_disable_fbc;
5200                 } else if (IS_GM45(dev)) {
5201                         dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5202                         dev_priv->display.enable_fbc = g4x_enable_fbc;
5203                         dev_priv->display.disable_fbc = g4x_disable_fbc;
5204                 } else if (IS_CRESTLINE(dev)) {
5205                         dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
5206                         dev_priv->display.enable_fbc = i8xx_enable_fbc;
5207                         dev_priv->display.disable_fbc = i8xx_disable_fbc;
5208                 }
5209                 /* 855GM needs testing */
5210         }
5211
5212         /* For cxsr */
5213         if (IS_PINEVIEW(dev))
5214                 i915_pineview_get_mem_freq(dev);
5215         else if (IS_GEN5(dev))
5216                 i915_ironlake_get_mem_freq(dev);
5217
5218         /* For FIFO watermark updates */
5219         if (HAS_PCH_SPLIT(dev)) {
5220                 if (IS_GEN5(dev)) {
5221                         if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
5222                                 dev_priv->display.update_wm = ironlake_update_wm;
5223                         else {
5224                                 DRM_DEBUG_KMS("Failed to get proper latency. "
5225                                               "Disable CxSR\n");
5226                                 dev_priv->display.update_wm = NULL;
5227                         }
5228                         dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
5229                 } else if (IS_GEN6(dev)) {
5230                         if (SNB_READ_WM0_LATENCY()) {
5231                                 dev_priv->display.update_wm = sandybridge_update_wm;
5232                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5233                         } else {
5234                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
5235                                               "Disable CxSR\n");
5236                                 dev_priv->display.update_wm = NULL;
5237                         }
5238                         dev_priv->display.init_clock_gating = gen6_init_clock_gating;
5239                 } else if (IS_IVYBRIDGE(dev)) {
5240                         if (SNB_READ_WM0_LATENCY()) {
5241                                 dev_priv->display.update_wm = ivybridge_update_wm;
5242                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
5243                         } else {
5244                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
5245                                               "Disable CxSR\n");
5246                                 dev_priv->display.update_wm = NULL;
5247                         }
5248                         dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
5249                 } else if (IS_HASWELL(dev)) {
5250                         if (I915_READ64(MCH_SSKPD)) {
5251                                 dev_priv->display.update_wm = haswell_update_wm;
5252                                 dev_priv->display.update_sprite_wm =
5253                                         haswell_update_sprite_wm;
5254                         } else {
5255                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
5256                                               "Disable CxSR\n");
5257                                 dev_priv->display.update_wm = NULL;
5258                         }
5259                         dev_priv->display.init_clock_gating = haswell_init_clock_gating;
5260                 } else
5261                         dev_priv->display.update_wm = NULL;
5262         } else if (IS_VALLEYVIEW(dev)) {
5263                 dev_priv->display.update_wm = valleyview_update_wm;
5264                 dev_priv->display.init_clock_gating =
5265                         valleyview_init_clock_gating;
5266         } else if (IS_PINEVIEW(dev)) {
5267                 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
5268                                             dev_priv->is_ddr3,
5269                                             dev_priv->fsb_freq,
5270                                             dev_priv->mem_freq)) {
5271                         DRM_INFO("failed to find known CxSR latency "
5272                                  "(found ddr%s fsb freq %d, mem freq %d), "
5273                                  "disabling CxSR\n",
5274                                  (dev_priv->is_ddr3 == 1) ? "3" : "2",
5275                                  dev_priv->fsb_freq, dev_priv->mem_freq);
5276                         /* Disable CxSR and never update its watermark again */
5277                         pineview_disable_cxsr(dev);
5278                         dev_priv->display.update_wm = NULL;
5279                 } else
5280                         dev_priv->display.update_wm = pineview_update_wm;
5281                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5282         } else if (IS_G4X(dev)) {
5283                 dev_priv->display.update_wm = g4x_update_wm;
5284                 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
5285         } else if (IS_GEN4(dev)) {
5286                 dev_priv->display.update_wm = i965_update_wm;
5287                 if (IS_CRESTLINE(dev))
5288                         dev_priv->display.init_clock_gating = crestline_init_clock_gating;
5289                 else if (IS_BROADWATER(dev))
5290                         dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
5291         } else if (IS_GEN3(dev)) {
5292                 dev_priv->display.update_wm = i9xx_update_wm;
5293                 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
5294                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
5295         } else if (IS_I865G(dev)) {
5296                 dev_priv->display.update_wm = i830_update_wm;
5297                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5298                 dev_priv->display.get_fifo_size = i830_get_fifo_size;
5299         } else if (IS_I85X(dev)) {
5300                 dev_priv->display.update_wm = i9xx_update_wm;
5301                 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
5302                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
5303         } else {
5304                 dev_priv->display.update_wm = i830_update_wm;
5305                 dev_priv->display.init_clock_gating = i830_init_clock_gating;
5306                 if (IS_845G(dev))
5307                         dev_priv->display.get_fifo_size = i845_get_fifo_size;
5308                 else
5309                         dev_priv->display.get_fifo_size = i830_get_fifo_size;
5310         }
5311 }
5312
5313 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
5314 {
5315         u32 gt_thread_status_mask;
5316
5317         if (IS_HASWELL(dev_priv->dev))
5318                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
5319         else
5320                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
5321
5322         /* w/a for a sporadic read returning 0 by waiting for the GT
5323          * thread to wake up.
5324          */
5325         if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
5326                 DRM_ERROR("GT thread status wait timed out\n");
5327 }
5328
5329 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
5330 {
5331         I915_WRITE_NOTRACE(FORCEWAKE, 0);
5332         POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5333 }
5334
5335 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5336 {
5337         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
5338                             FORCEWAKE_ACK_TIMEOUT_MS))
5339                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5340
5341         I915_WRITE_NOTRACE(FORCEWAKE, 1);
5342         POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
5343
5344         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
5345                             FORCEWAKE_ACK_TIMEOUT_MS))
5346                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5347
5348         /* WaRsForcewakeWaitTC0:snb */
5349         __gen6_gt_wait_for_thread_c0(dev_priv);
5350 }
5351
5352 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
5353 {
5354         I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
5355         /* something from same cacheline, but !FORCEWAKE_MT */
5356         POSTING_READ(ECOBUS);
5357 }
5358
5359 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
5360 {
5361         u32 forcewake_ack;
5362
5363         if (IS_HASWELL(dev_priv->dev))
5364                 forcewake_ack = FORCEWAKE_ACK_HSW;
5365         else
5366                 forcewake_ack = FORCEWAKE_MT_ACK;
5367
5368         if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
5369                             FORCEWAKE_ACK_TIMEOUT_MS))
5370                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5371
5372         I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5373         /* something from same cacheline, but !FORCEWAKE_MT */
5374         POSTING_READ(ECOBUS);
5375
5376         if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
5377                             FORCEWAKE_ACK_TIMEOUT_MS))
5378                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
5379
5380         /* WaRsForcewakeWaitTC0:ivb,hsw */
5381         __gen6_gt_wait_for_thread_c0(dev_priv);
5382 }
5383
5384 /*
5385  * Generally this is called implicitly by the register read function. However,
5386  * if some sequence requires the GT to not power down then this function should
5387  * be called at the beginning of the sequence followed by a call to
5388  * gen6_gt_force_wake_put() at the end of the sequence.
5389  */
5390 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
5391 {
5392         unsigned long irqflags;
5393
5394         spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5395         if (dev_priv->forcewake_count++ == 0)
5396                 dev_priv->gt.force_wake_get(dev_priv);
5397         spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5398 }
5399
5400 void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
5401 {
5402         u32 gtfifodbg;
5403         gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
5404         if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
5405              "MMIO read or write has been dropped %x\n", gtfifodbg))
5406                 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
5407 }
5408
5409 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5410 {
5411         I915_WRITE_NOTRACE(FORCEWAKE, 0);
5412         /* something from same cacheline, but !FORCEWAKE */
5413         POSTING_READ(ECOBUS);
5414         gen6_gt_check_fifodbg(dev_priv);
5415 }
5416
5417 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
5418 {
5419         I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5420         /* something from same cacheline, but !FORCEWAKE_MT */
5421         POSTING_READ(ECOBUS);
5422         gen6_gt_check_fifodbg(dev_priv);
5423 }
5424
5425 /*
5426  * see gen6_gt_force_wake_get()
5427  */
5428 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
5429 {
5430         unsigned long irqflags;
5431
5432         spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
5433         if (--dev_priv->forcewake_count == 0)
5434                 dev_priv->gt.force_wake_put(dev_priv);
5435         spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
5436 }
5437
5438 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
5439 {
5440         int ret = 0;
5441
5442         if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
5443                 int loop = 500;
5444                 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5445                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
5446                         udelay(10);
5447                         fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
5448                 }
5449                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
5450                         ++ret;
5451                 dev_priv->gt_fifo_count = fifo;
5452         }
5453         dev_priv->gt_fifo_count--;
5454
5455         return ret;
5456 }
5457
5458 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
5459 {
5460         I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
5461         /* something from same cacheline, but !FORCEWAKE_VLV */
5462         POSTING_READ(FORCEWAKE_ACK_VLV);
5463 }
5464
5465 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
5466 {
5467         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
5468                             FORCEWAKE_ACK_TIMEOUT_MS))
5469                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
5470
5471         I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5472         I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5473                            _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
5474
5475         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
5476                             FORCEWAKE_ACK_TIMEOUT_MS))
5477                 DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
5478
5479         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
5480                              FORCEWAKE_KERNEL),
5481                             FORCEWAKE_ACK_TIMEOUT_MS))
5482                 DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
5483
5484         /* WaRsForcewakeWaitTC0:vlv */
5485         __gen6_gt_wait_for_thread_c0(dev_priv);
5486 }
5487
5488 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
5489 {
5490         I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5491         I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
5492                            _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
5493         /* The below doubles as a POSTING_READ */
5494         gen6_gt_check_fifodbg(dev_priv);
5495 }
5496
5497 void intel_gt_sanitize(struct drm_device *dev)
5498 {
5499         struct drm_i915_private *dev_priv = dev->dev_private;
5500
5501         if (IS_VALLEYVIEW(dev)) {
5502                 vlv_force_wake_reset(dev_priv);
5503         } else if (INTEL_INFO(dev)->gen >= 6) {
5504                 __gen6_gt_force_wake_reset(dev_priv);
5505                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
5506                         __gen6_gt_force_wake_mt_reset(dev_priv);
5507         }
5508
5509         /* BIOS often leaves RC6 enabled, but disable it for hw init */
5510         if (INTEL_INFO(dev)->gen >= 6)
5511                 intel_disable_gt_powersave(dev);
5512 }
5513
5514 void intel_gt_init(struct drm_device *dev)
5515 {
5516         struct drm_i915_private *dev_priv = dev->dev_private;
5517
5518         if (IS_VALLEYVIEW(dev)) {
5519                 dev_priv->gt.force_wake_get = vlv_force_wake_get;
5520                 dev_priv->gt.force_wake_put = vlv_force_wake_put;
5521         } else if (IS_HASWELL(dev)) {
5522                 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
5523                 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
5524         } else if (IS_IVYBRIDGE(dev)) {
5525                 u32 ecobus;
5526
5527                 /* IVB configs may use multi-threaded forcewake */
5528
5529                 /* A small trick here - if the bios hasn't configured
5530                  * MT forcewake, and if the device is in RC6, then
5531                  * force_wake_mt_get will not wake the device and the
5532                  * ECOBUS read will return zero. Which will be
5533                  * (correctly) interpreted by the test below as MT
5534                  * forcewake being disabled.
5535                  */
5536                 mutex_lock(&dev->struct_mutex);
5537                 __gen6_gt_force_wake_mt_get(dev_priv);
5538                 ecobus = I915_READ_NOTRACE(ECOBUS);
5539                 __gen6_gt_force_wake_mt_put(dev_priv);
5540                 mutex_unlock(&dev->struct_mutex);
5541
5542                 if (ecobus & FORCEWAKE_MT_ENABLE) {
5543                         dev_priv->gt.force_wake_get =
5544                                                 __gen6_gt_force_wake_mt_get;
5545                         dev_priv->gt.force_wake_put =
5546                                                 __gen6_gt_force_wake_mt_put;
5547                 } else {
5548                         DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
5549                         DRM_INFO("when using vblank-synced partial screen updates.\n");
5550                         dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5551                         dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5552                 }
5553         } else if (IS_GEN6(dev)) {
5554                 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
5555                 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
5556         }
5557 }
5558
5559 void intel_pm_init(struct drm_device *dev)
5560 {
5561         struct drm_i915_private *dev_priv = dev->dev_private;
5562
5563         INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
5564                           intel_gen6_powersave_work);
5565 }
5566
5567 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
5568 {
5569         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5570
5571         if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5572                 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
5573                 return -EAGAIN;
5574         }
5575
5576         I915_WRITE(GEN6_PCODE_DATA, *val);
5577         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5578
5579         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5580                      500)) {
5581                 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
5582                 return -ETIMEDOUT;
5583         }
5584
5585         *val = I915_READ(GEN6_PCODE_DATA);
5586         I915_WRITE(GEN6_PCODE_DATA, 0);
5587
5588         return 0;
5589 }
5590
5591 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
5592 {
5593         WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5594
5595         if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
5596                 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
5597                 return -EAGAIN;
5598         }
5599
5600         I915_WRITE(GEN6_PCODE_DATA, val);
5601         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
5602
5603         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
5604                      500)) {
5605                 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
5606                 return -ETIMEDOUT;
5607         }
5608
5609         I915_WRITE(GEN6_PCODE_DATA, 0);
5610
5611         return 0;
5612 }
5613
5614 int vlv_gpu_freq(int ddr_freq, int val)
5615 {
5616         int mult, base;
5617
5618         switch (ddr_freq) {
5619         case 800:
5620                 mult = 20;
5621                 base = 120;
5622                 break;
5623         case 1066:
5624                 mult = 22;
5625                 base = 133;
5626                 break;
5627         case 1333:
5628                 mult = 21;
5629                 base = 125;
5630                 break;
5631         default:
5632                 return -1;
5633         }
5634
5635         return ((val - 0xbd) * mult) + base;
5636 }
5637
5638 int vlv_freq_opcode(int ddr_freq, int val)
5639 {
5640         int mult, base;
5641
5642         switch (ddr_freq) {
5643         case 800:
5644                 mult = 20;
5645                 base = 120;
5646                 break;
5647         case 1066:
5648                 mult = 22;
5649                 base = 133;
5650                 break;
5651         case 1333:
5652                 mult = 21;
5653                 base = 125;
5654                 break;
5655         default:
5656                 return -1;
5657         }
5658
5659         val /= mult;
5660         val -= base / mult;
5661         val += 0xbd;
5662
5663         if (val > 0xea)
5664                 val = 0xea;
5665
5666         return val;
5667 }
5668