]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/gpu/drm/i915/intel_display.c
19ab75f307e5346b094d60d842424013d8ad1372
[linux-imx.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <linux/dma_remapping.h>
43
44 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
45 static void intel_increase_pllclock(struct drm_crtc *crtc);
46 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47
48 typedef struct {
49         /* given values */
50         int n;
51         int m1, m2;
52         int p1, p2;
53         /* derived values */
54         int     dot;
55         int     vco;
56         int     m;
57         int     p;
58 } intel_clock_t;
59
60 typedef struct {
61         int     min, max;
62 } intel_range_t;
63
64 typedef struct {
65         int     dot_limit;
66         int     p2_slow, p2_fast;
67 } intel_p2_t;
68
69 #define INTEL_P2_NUM                  2
70 typedef struct intel_limit intel_limit_t;
71 struct intel_limit {
72         intel_range_t   dot, vco, n, m, m1, m2, p, p1;
73         intel_p2_t          p2;
74         bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
75                         int, int, intel_clock_t *, intel_clock_t *);
76 };
77
78 /* FDI */
79 #define IRONLAKE_FDI_FREQ               2700000 /* in kHz for mode->clock */
80
81 int
82 intel_pch_rawclk(struct drm_device *dev)
83 {
84         struct drm_i915_private *dev_priv = dev->dev_private;
85
86         WARN_ON(!HAS_PCH_SPLIT(dev));
87
88         return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
89 }
90
91 static bool
92 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
93                     int target, int refclk, intel_clock_t *match_clock,
94                     intel_clock_t *best_clock);
95 static bool
96 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
97                         int target, int refclk, intel_clock_t *match_clock,
98                         intel_clock_t *best_clock);
99
100 static bool
101 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
102                       int target, int refclk, intel_clock_t *match_clock,
103                       intel_clock_t *best_clock);
104 static bool
105 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
106                            int target, int refclk, intel_clock_t *match_clock,
107                            intel_clock_t *best_clock);
108
109 static bool
110 intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
111                         int target, int refclk, intel_clock_t *match_clock,
112                         intel_clock_t *best_clock);
113
114 static inline u32 /* units of 100MHz */
115 intel_fdi_link_freq(struct drm_device *dev)
116 {
117         if (IS_GEN5(dev)) {
118                 struct drm_i915_private *dev_priv = dev->dev_private;
119                 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
120         } else
121                 return 27;
122 }
123
124 static const intel_limit_t intel_limits_i8xx_dvo = {
125         .dot = { .min = 25000, .max = 350000 },
126         .vco = { .min = 930000, .max = 1400000 },
127         .n = { .min = 3, .max = 16 },
128         .m = { .min = 96, .max = 140 },
129         .m1 = { .min = 18, .max = 26 },
130         .m2 = { .min = 6, .max = 16 },
131         .p = { .min = 4, .max = 128 },
132         .p1 = { .min = 2, .max = 33 },
133         .p2 = { .dot_limit = 165000,
134                 .p2_slow = 4, .p2_fast = 2 },
135         .find_pll = intel_find_best_PLL,
136 };
137
138 static const intel_limit_t intel_limits_i8xx_lvds = {
139         .dot = { .min = 25000, .max = 350000 },
140         .vco = { .min = 930000, .max = 1400000 },
141         .n = { .min = 3, .max = 16 },
142         .m = { .min = 96, .max = 140 },
143         .m1 = { .min = 18, .max = 26 },
144         .m2 = { .min = 6, .max = 16 },
145         .p = { .min = 4, .max = 128 },
146         .p1 = { .min = 1, .max = 6 },
147         .p2 = { .dot_limit = 165000,
148                 .p2_slow = 14, .p2_fast = 7 },
149         .find_pll = intel_find_best_PLL,
150 };
151
152 static const intel_limit_t intel_limits_i9xx_sdvo = {
153         .dot = { .min = 20000, .max = 400000 },
154         .vco = { .min = 1400000, .max = 2800000 },
155         .n = { .min = 1, .max = 6 },
156         .m = { .min = 70, .max = 120 },
157         .m1 = { .min = 10, .max = 22 },
158         .m2 = { .min = 5, .max = 9 },
159         .p = { .min = 5, .max = 80 },
160         .p1 = { .min = 1, .max = 8 },
161         .p2 = { .dot_limit = 200000,
162                 .p2_slow = 10, .p2_fast = 5 },
163         .find_pll = intel_find_best_PLL,
164 };
165
166 static const intel_limit_t intel_limits_i9xx_lvds = {
167         .dot = { .min = 20000, .max = 400000 },
168         .vco = { .min = 1400000, .max = 2800000 },
169         .n = { .min = 1, .max = 6 },
170         .m = { .min = 70, .max = 120 },
171         .m1 = { .min = 10, .max = 22 },
172         .m2 = { .min = 5, .max = 9 },
173         .p = { .min = 7, .max = 98 },
174         .p1 = { .min = 1, .max = 8 },
175         .p2 = { .dot_limit = 112000,
176                 .p2_slow = 14, .p2_fast = 7 },
177         .find_pll = intel_find_best_PLL,
178 };
179
180
181 static const intel_limit_t intel_limits_g4x_sdvo = {
182         .dot = { .min = 25000, .max = 270000 },
183         .vco = { .min = 1750000, .max = 3500000},
184         .n = { .min = 1, .max = 4 },
185         .m = { .min = 104, .max = 138 },
186         .m1 = { .min = 17, .max = 23 },
187         .m2 = { .min = 5, .max = 11 },
188         .p = { .min = 10, .max = 30 },
189         .p1 = { .min = 1, .max = 3},
190         .p2 = { .dot_limit = 270000,
191                 .p2_slow = 10,
192                 .p2_fast = 10
193         },
194         .find_pll = intel_g4x_find_best_PLL,
195 };
196
197 static const intel_limit_t intel_limits_g4x_hdmi = {
198         .dot = { .min = 22000, .max = 400000 },
199         .vco = { .min = 1750000, .max = 3500000},
200         .n = { .min = 1, .max = 4 },
201         .m = { .min = 104, .max = 138 },
202         .m1 = { .min = 16, .max = 23 },
203         .m2 = { .min = 5, .max = 11 },
204         .p = { .min = 5, .max = 80 },
205         .p1 = { .min = 1, .max = 8},
206         .p2 = { .dot_limit = 165000,
207                 .p2_slow = 10, .p2_fast = 5 },
208         .find_pll = intel_g4x_find_best_PLL,
209 };
210
211 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
212         .dot = { .min = 20000, .max = 115000 },
213         .vco = { .min = 1750000, .max = 3500000 },
214         .n = { .min = 1, .max = 3 },
215         .m = { .min = 104, .max = 138 },
216         .m1 = { .min = 17, .max = 23 },
217         .m2 = { .min = 5, .max = 11 },
218         .p = { .min = 28, .max = 112 },
219         .p1 = { .min = 2, .max = 8 },
220         .p2 = { .dot_limit = 0,
221                 .p2_slow = 14, .p2_fast = 14
222         },
223         .find_pll = intel_g4x_find_best_PLL,
224 };
225
226 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
227         .dot = { .min = 80000, .max = 224000 },
228         .vco = { .min = 1750000, .max = 3500000 },
229         .n = { .min = 1, .max = 3 },
230         .m = { .min = 104, .max = 138 },
231         .m1 = { .min = 17, .max = 23 },
232         .m2 = { .min = 5, .max = 11 },
233         .p = { .min = 14, .max = 42 },
234         .p1 = { .min = 2, .max = 6 },
235         .p2 = { .dot_limit = 0,
236                 .p2_slow = 7, .p2_fast = 7
237         },
238         .find_pll = intel_g4x_find_best_PLL,
239 };
240
241 static const intel_limit_t intel_limits_g4x_display_port = {
242         .dot = { .min = 161670, .max = 227000 },
243         .vco = { .min = 1750000, .max = 3500000},
244         .n = { .min = 1, .max = 2 },
245         .m = { .min = 97, .max = 108 },
246         .m1 = { .min = 0x10, .max = 0x12 },
247         .m2 = { .min = 0x05, .max = 0x06 },
248         .p = { .min = 10, .max = 20 },
249         .p1 = { .min = 1, .max = 2},
250         .p2 = { .dot_limit = 0,
251                 .p2_slow = 10, .p2_fast = 10 },
252         .find_pll = intel_find_pll_g4x_dp,
253 };
254
255 static const intel_limit_t intel_limits_pineview_sdvo = {
256         .dot = { .min = 20000, .max = 400000},
257         .vco = { .min = 1700000, .max = 3500000 },
258         /* Pineview's Ncounter is a ring counter */
259         .n = { .min = 3, .max = 6 },
260         .m = { .min = 2, .max = 256 },
261         /* Pineview only has one combined m divider, which we treat as m2. */
262         .m1 = { .min = 0, .max = 0 },
263         .m2 = { .min = 0, .max = 254 },
264         .p = { .min = 5, .max = 80 },
265         .p1 = { .min = 1, .max = 8 },
266         .p2 = { .dot_limit = 200000,
267                 .p2_slow = 10, .p2_fast = 5 },
268         .find_pll = intel_find_best_PLL,
269 };
270
271 static const intel_limit_t intel_limits_pineview_lvds = {
272         .dot = { .min = 20000, .max = 400000 },
273         .vco = { .min = 1700000, .max = 3500000 },
274         .n = { .min = 3, .max = 6 },
275         .m = { .min = 2, .max = 256 },
276         .m1 = { .min = 0, .max = 0 },
277         .m2 = { .min = 0, .max = 254 },
278         .p = { .min = 7, .max = 112 },
279         .p1 = { .min = 1, .max = 8 },
280         .p2 = { .dot_limit = 112000,
281                 .p2_slow = 14, .p2_fast = 14 },
282         .find_pll = intel_find_best_PLL,
283 };
284
285 /* Ironlake / Sandybridge
286  *
287  * We calculate clock using (register_value + 2) for N/M1/M2, so here
288  * the range value for them is (actual_value - 2).
289  */
290 static const intel_limit_t intel_limits_ironlake_dac = {
291         .dot = { .min = 25000, .max = 350000 },
292         .vco = { .min = 1760000, .max = 3510000 },
293         .n = { .min = 1, .max = 5 },
294         .m = { .min = 79, .max = 127 },
295         .m1 = { .min = 12, .max = 22 },
296         .m2 = { .min = 5, .max = 9 },
297         .p = { .min = 5, .max = 80 },
298         .p1 = { .min = 1, .max = 8 },
299         .p2 = { .dot_limit = 225000,
300                 .p2_slow = 10, .p2_fast = 5 },
301         .find_pll = intel_g4x_find_best_PLL,
302 };
303
304 static const intel_limit_t intel_limits_ironlake_single_lvds = {
305         .dot = { .min = 25000, .max = 350000 },
306         .vco = { .min = 1760000, .max = 3510000 },
307         .n = { .min = 1, .max = 3 },
308         .m = { .min = 79, .max = 118 },
309         .m1 = { .min = 12, .max = 22 },
310         .m2 = { .min = 5, .max = 9 },
311         .p = { .min = 28, .max = 112 },
312         .p1 = { .min = 2, .max = 8 },
313         .p2 = { .dot_limit = 225000,
314                 .p2_slow = 14, .p2_fast = 14 },
315         .find_pll = intel_g4x_find_best_PLL,
316 };
317
318 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
319         .dot = { .min = 25000, .max = 350000 },
320         .vco = { .min = 1760000, .max = 3510000 },
321         .n = { .min = 1, .max = 3 },
322         .m = { .min = 79, .max = 127 },
323         .m1 = { .min = 12, .max = 22 },
324         .m2 = { .min = 5, .max = 9 },
325         .p = { .min = 14, .max = 56 },
326         .p1 = { .min = 2, .max = 8 },
327         .p2 = { .dot_limit = 225000,
328                 .p2_slow = 7, .p2_fast = 7 },
329         .find_pll = intel_g4x_find_best_PLL,
330 };
331
332 /* LVDS 100mhz refclk limits. */
333 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
334         .dot = { .min = 25000, .max = 350000 },
335         .vco = { .min = 1760000, .max = 3510000 },
336         .n = { .min = 1, .max = 2 },
337         .m = { .min = 79, .max = 126 },
338         .m1 = { .min = 12, .max = 22 },
339         .m2 = { .min = 5, .max = 9 },
340         .p = { .min = 28, .max = 112 },
341         .p1 = { .min = 2, .max = 8 },
342         .p2 = { .dot_limit = 225000,
343                 .p2_slow = 14, .p2_fast = 14 },
344         .find_pll = intel_g4x_find_best_PLL,
345 };
346
347 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
348         .dot = { .min = 25000, .max = 350000 },
349         .vco = { .min = 1760000, .max = 3510000 },
350         .n = { .min = 1, .max = 3 },
351         .m = { .min = 79, .max = 126 },
352         .m1 = { .min = 12, .max = 22 },
353         .m2 = { .min = 5, .max = 9 },
354         .p = { .min = 14, .max = 42 },
355         .p1 = { .min = 2, .max = 6 },
356         .p2 = { .dot_limit = 225000,
357                 .p2_slow = 7, .p2_fast = 7 },
358         .find_pll = intel_g4x_find_best_PLL,
359 };
360
361 static const intel_limit_t intel_limits_ironlake_display_port = {
362         .dot = { .min = 25000, .max = 350000 },
363         .vco = { .min = 1760000, .max = 3510000},
364         .n = { .min = 1, .max = 2 },
365         .m = { .min = 81, .max = 90 },
366         .m1 = { .min = 12, .max = 22 },
367         .m2 = { .min = 5, .max = 9 },
368         .p = { .min = 10, .max = 20 },
369         .p1 = { .min = 1, .max = 2},
370         .p2 = { .dot_limit = 0,
371                 .p2_slow = 10, .p2_fast = 10 },
372         .find_pll = intel_find_pll_ironlake_dp,
373 };
374
375 static const intel_limit_t intel_limits_vlv_dac = {
376         .dot = { .min = 25000, .max = 270000 },
377         .vco = { .min = 4000000, .max = 6000000 },
378         .n = { .min = 1, .max = 7 },
379         .m = { .min = 22, .max = 450 }, /* guess */
380         .m1 = { .min = 2, .max = 3 },
381         .m2 = { .min = 11, .max = 156 },
382         .p = { .min = 10, .max = 30 },
383         .p1 = { .min = 2, .max = 3 },
384         .p2 = { .dot_limit = 270000,
385                 .p2_slow = 2, .p2_fast = 20 },
386         .find_pll = intel_vlv_find_best_pll,
387 };
388
389 static const intel_limit_t intel_limits_vlv_hdmi = {
390         .dot = { .min = 20000, .max = 165000 },
391         .vco = { .min = 4000000, .max = 5994000},
392         .n = { .min = 1, .max = 7 },
393         .m = { .min = 60, .max = 300 }, /* guess */
394         .m1 = { .min = 2, .max = 3 },
395         .m2 = { .min = 11, .max = 156 },
396         .p = { .min = 10, .max = 30 },
397         .p1 = { .min = 2, .max = 3 },
398         .p2 = { .dot_limit = 270000,
399                 .p2_slow = 2, .p2_fast = 20 },
400         .find_pll = intel_vlv_find_best_pll,
401 };
402
403 static const intel_limit_t intel_limits_vlv_dp = {
404         .dot = { .min = 25000, .max = 270000 },
405         .vco = { .min = 4000000, .max = 6000000 },
406         .n = { .min = 1, .max = 7 },
407         .m = { .min = 22, .max = 450 },
408         .m1 = { .min = 2, .max = 3 },
409         .m2 = { .min = 11, .max = 156 },
410         .p = { .min = 10, .max = 30 },
411         .p1 = { .min = 2, .max = 3 },
412         .p2 = { .dot_limit = 270000,
413                 .p2_slow = 2, .p2_fast = 20 },
414         .find_pll = intel_vlv_find_best_pll,
415 };
416
417 u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
418 {
419         WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
420
421         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
422                 DRM_ERROR("DPIO idle wait timed out\n");
423                 return 0;
424         }
425
426         I915_WRITE(DPIO_REG, reg);
427         I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
428                    DPIO_BYTE);
429         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
430                 DRM_ERROR("DPIO read wait timed out\n");
431                 return 0;
432         }
433
434         return I915_READ(DPIO_DATA);
435 }
436
437 static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
438                              u32 val)
439 {
440         WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
441
442         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
443                 DRM_ERROR("DPIO idle wait timed out\n");
444                 return;
445         }
446
447         I915_WRITE(DPIO_DATA, val);
448         I915_WRITE(DPIO_REG, reg);
449         I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
450                    DPIO_BYTE);
451         if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
452                 DRM_ERROR("DPIO write wait timed out\n");
453 }
454
455 static void vlv_init_dpio(struct drm_device *dev)
456 {
457         struct drm_i915_private *dev_priv = dev->dev_private;
458
459         /* Reset the DPIO config */
460         I915_WRITE(DPIO_CTL, 0);
461         POSTING_READ(DPIO_CTL);
462         I915_WRITE(DPIO_CTL, 1);
463         POSTING_READ(DPIO_CTL);
464 }
465
466 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
467                                                 int refclk)
468 {
469         struct drm_device *dev = crtc->dev;
470         const intel_limit_t *limit;
471
472         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
473                 if (intel_is_dual_link_lvds(dev)) {
474                         /* LVDS dual channel */
475                         if (refclk == 100000)
476                                 limit = &intel_limits_ironlake_dual_lvds_100m;
477                         else
478                                 limit = &intel_limits_ironlake_dual_lvds;
479                 } else {
480                         if (refclk == 100000)
481                                 limit = &intel_limits_ironlake_single_lvds_100m;
482                         else
483                                 limit = &intel_limits_ironlake_single_lvds;
484                 }
485         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
486                    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
487                 limit = &intel_limits_ironlake_display_port;
488         else
489                 limit = &intel_limits_ironlake_dac;
490
491         return limit;
492 }
493
494 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
495 {
496         struct drm_device *dev = crtc->dev;
497         const intel_limit_t *limit;
498
499         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
500                 if (intel_is_dual_link_lvds(dev))
501                         /* LVDS with dual channel */
502                         limit = &intel_limits_g4x_dual_channel_lvds;
503                 else
504                         /* LVDS with dual channel */
505                         limit = &intel_limits_g4x_single_channel_lvds;
506         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
507                    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
508                 limit = &intel_limits_g4x_hdmi;
509         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
510                 limit = &intel_limits_g4x_sdvo;
511         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
512                 limit = &intel_limits_g4x_display_port;
513         } else /* The option is for other outputs */
514                 limit = &intel_limits_i9xx_sdvo;
515
516         return limit;
517 }
518
519 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
520 {
521         struct drm_device *dev = crtc->dev;
522         const intel_limit_t *limit;
523
524         if (HAS_PCH_SPLIT(dev))
525                 limit = intel_ironlake_limit(crtc, refclk);
526         else if (IS_G4X(dev)) {
527                 limit = intel_g4x_limit(crtc);
528         } else if (IS_PINEVIEW(dev)) {
529                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
530                         limit = &intel_limits_pineview_lvds;
531                 else
532                         limit = &intel_limits_pineview_sdvo;
533         } else if (IS_VALLEYVIEW(dev)) {
534                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
535                         limit = &intel_limits_vlv_dac;
536                 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
537                         limit = &intel_limits_vlv_hdmi;
538                 else
539                         limit = &intel_limits_vlv_dp;
540         } else if (!IS_GEN2(dev)) {
541                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
542                         limit = &intel_limits_i9xx_lvds;
543                 else
544                         limit = &intel_limits_i9xx_sdvo;
545         } else {
546                 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
547                         limit = &intel_limits_i8xx_lvds;
548                 else
549                         limit = &intel_limits_i8xx_dvo;
550         }
551         return limit;
552 }
553
554 /* m1 is reserved as 0 in Pineview, n is a ring counter */
555 static void pineview_clock(int refclk, intel_clock_t *clock)
556 {
557         clock->m = clock->m2 + 2;
558         clock->p = clock->p1 * clock->p2;
559         clock->vco = refclk * clock->m / clock->n;
560         clock->dot = clock->vco / clock->p;
561 }
562
563 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
564 {
565         if (IS_PINEVIEW(dev)) {
566                 pineview_clock(refclk, clock);
567                 return;
568         }
569         clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
570         clock->p = clock->p1 * clock->p2;
571         clock->vco = refclk * clock->m / (clock->n + 2);
572         clock->dot = clock->vco / clock->p;
573 }
574
575 /**
576  * Returns whether any output on the specified pipe is of the specified type
577  */
578 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
579 {
580         struct drm_device *dev = crtc->dev;
581         struct intel_encoder *encoder;
582
583         for_each_encoder_on_crtc(dev, crtc, encoder)
584                 if (encoder->type == type)
585                         return true;
586
587         return false;
588 }
589
590 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
591 /**
592  * Returns whether the given set of divisors are valid for a given refclk with
593  * the given connectors.
594  */
595
596 static bool intel_PLL_is_valid(struct drm_device *dev,
597                                const intel_limit_t *limit,
598                                const intel_clock_t *clock)
599 {
600         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
601                 INTELPllInvalid("p1 out of range\n");
602         if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
603                 INTELPllInvalid("p out of range\n");
604         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
605                 INTELPllInvalid("m2 out of range\n");
606         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
607                 INTELPllInvalid("m1 out of range\n");
608         if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
609                 INTELPllInvalid("m1 <= m2\n");
610         if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
611                 INTELPllInvalid("m out of range\n");
612         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
613                 INTELPllInvalid("n out of range\n");
614         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
615                 INTELPllInvalid("vco out of range\n");
616         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
617          * connector, etc., rather than just a single range.
618          */
619         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
620                 INTELPllInvalid("dot out of range\n");
621
622         return true;
623 }
624
625 static bool
626 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
627                     int target, int refclk, intel_clock_t *match_clock,
628                     intel_clock_t *best_clock)
629
630 {
631         struct drm_device *dev = crtc->dev;
632         intel_clock_t clock;
633         int err = target;
634
635         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
636                 /*
637                  * For LVDS just rely on its current settings for dual-channel.
638                  * We haven't figured out how to reliably set up different
639                  * single/dual channel state, if we even can.
640                  */
641                 if (intel_is_dual_link_lvds(dev))
642                         clock.p2 = limit->p2.p2_fast;
643                 else
644                         clock.p2 = limit->p2.p2_slow;
645         } else {
646                 if (target < limit->p2.dot_limit)
647                         clock.p2 = limit->p2.p2_slow;
648                 else
649                         clock.p2 = limit->p2.p2_fast;
650         }
651
652         memset(best_clock, 0, sizeof(*best_clock));
653
654         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
655              clock.m1++) {
656                 for (clock.m2 = limit->m2.min;
657                      clock.m2 <= limit->m2.max; clock.m2++) {
658                         /* m1 is always 0 in Pineview */
659                         if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
660                                 break;
661                         for (clock.n = limit->n.min;
662                              clock.n <= limit->n.max; clock.n++) {
663                                 for (clock.p1 = limit->p1.min;
664                                         clock.p1 <= limit->p1.max; clock.p1++) {
665                                         int this_err;
666
667                                         intel_clock(dev, refclk, &clock);
668                                         if (!intel_PLL_is_valid(dev, limit,
669                                                                 &clock))
670                                                 continue;
671                                         if (match_clock &&
672                                             clock.p != match_clock->p)
673                                                 continue;
674
675                                         this_err = abs(clock.dot - target);
676                                         if (this_err < err) {
677                                                 *best_clock = clock;
678                                                 err = this_err;
679                                         }
680                                 }
681                         }
682                 }
683         }
684
685         return (err != target);
686 }
687
688 static bool
689 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
690                         int target, int refclk, intel_clock_t *match_clock,
691                         intel_clock_t *best_clock)
692 {
693         struct drm_device *dev = crtc->dev;
694         intel_clock_t clock;
695         int max_n;
696         bool found;
697         /* approximately equals target * 0.00585 */
698         int err_most = (target >> 8) + (target >> 9);
699         found = false;
700
701         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
702                 int lvds_reg;
703
704                 if (HAS_PCH_SPLIT(dev))
705                         lvds_reg = PCH_LVDS;
706                 else
707                         lvds_reg = LVDS;
708                 if (intel_is_dual_link_lvds(dev))
709                         clock.p2 = limit->p2.p2_fast;
710                 else
711                         clock.p2 = limit->p2.p2_slow;
712         } else {
713                 if (target < limit->p2.dot_limit)
714                         clock.p2 = limit->p2.p2_slow;
715                 else
716                         clock.p2 = limit->p2.p2_fast;
717         }
718
719         memset(best_clock, 0, sizeof(*best_clock));
720         max_n = limit->n.max;
721         /* based on hardware requirement, prefer smaller n to precision */
722         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
723                 /* based on hardware requirement, prefere larger m1,m2 */
724                 for (clock.m1 = limit->m1.max;
725                      clock.m1 >= limit->m1.min; clock.m1--) {
726                         for (clock.m2 = limit->m2.max;
727                              clock.m2 >= limit->m2.min; clock.m2--) {
728                                 for (clock.p1 = limit->p1.max;
729                                      clock.p1 >= limit->p1.min; clock.p1--) {
730                                         int this_err;
731
732                                         intel_clock(dev, refclk, &clock);
733                                         if (!intel_PLL_is_valid(dev, limit,
734                                                                 &clock))
735                                                 continue;
736                                         if (match_clock &&
737                                             clock.p != match_clock->p)
738                                                 continue;
739
740                                         this_err = abs(clock.dot - target);
741                                         if (this_err < err_most) {
742                                                 *best_clock = clock;
743                                                 err_most = this_err;
744                                                 max_n = clock.n;
745                                                 found = true;
746                                         }
747                                 }
748                         }
749                 }
750         }
751         return found;
752 }
753
754 static bool
755 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
756                            int target, int refclk, intel_clock_t *match_clock,
757                            intel_clock_t *best_clock)
758 {
759         struct drm_device *dev = crtc->dev;
760         intel_clock_t clock;
761
762         if (target < 200000) {
763                 clock.n = 1;
764                 clock.p1 = 2;
765                 clock.p2 = 10;
766                 clock.m1 = 12;
767                 clock.m2 = 9;
768         } else {
769                 clock.n = 2;
770                 clock.p1 = 1;
771                 clock.p2 = 10;
772                 clock.m1 = 14;
773                 clock.m2 = 8;
774         }
775         intel_clock(dev, refclk, &clock);
776         memcpy(best_clock, &clock, sizeof(intel_clock_t));
777         return true;
778 }
779
780 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
781 static bool
782 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
783                       int target, int refclk, intel_clock_t *match_clock,
784                       intel_clock_t *best_clock)
785 {
786         intel_clock_t clock;
787         if (target < 200000) {
788                 clock.p1 = 2;
789                 clock.p2 = 10;
790                 clock.n = 2;
791                 clock.m1 = 23;
792                 clock.m2 = 8;
793         } else {
794                 clock.p1 = 1;
795                 clock.p2 = 10;
796                 clock.n = 1;
797                 clock.m1 = 14;
798                 clock.m2 = 2;
799         }
800         clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
801         clock.p = (clock.p1 * clock.p2);
802         clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
803         clock.vco = 0;
804         memcpy(best_clock, &clock, sizeof(intel_clock_t));
805         return true;
806 }
807 static bool
808 intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
809                         int target, int refclk, intel_clock_t *match_clock,
810                         intel_clock_t *best_clock)
811 {
812         u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
813         u32 m, n, fastclk;
814         u32 updrate, minupdate, fracbits, p;
815         unsigned long bestppm, ppm, absppm;
816         int dotclk, flag;
817
818         flag = 0;
819         dotclk = target * 1000;
820         bestppm = 1000000;
821         ppm = absppm = 0;
822         fastclk = dotclk / (2*100);
823         updrate = 0;
824         minupdate = 19200;
825         fracbits = 1;
826         n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
827         bestm1 = bestm2 = bestp1 = bestp2 = 0;
828
829         /* based on hardware requirement, prefer smaller n to precision */
830         for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
831                 updrate = refclk / n;
832                 for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
833                         for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
834                                 if (p2 > 10)
835                                         p2 = p2 - 1;
836                                 p = p1 * p2;
837                                 /* based on hardware requirement, prefer bigger m1,m2 values */
838                                 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
839                                         m2 = (((2*(fastclk * p * n / m1 )) +
840                                                refclk) / (2*refclk));
841                                         m = m1 * m2;
842                                         vco = updrate * m;
843                                         if (vco >= limit->vco.min && vco < limit->vco.max) {
844                                                 ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
845                                                 absppm = (ppm > 0) ? ppm : (-ppm);
846                                                 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
847                                                         bestppm = 0;
848                                                         flag = 1;
849                                                 }
850                                                 if (absppm < bestppm - 10) {
851                                                         bestppm = absppm;
852                                                         flag = 1;
853                                                 }
854                                                 if (flag) {
855                                                         bestn = n;
856                                                         bestm1 = m1;
857                                                         bestm2 = m2;
858                                                         bestp1 = p1;
859                                                         bestp2 = p2;
860                                                         flag = 0;
861                                                 }
862                                         }
863                                 }
864                         }
865                 }
866         }
867         best_clock->n = bestn;
868         best_clock->m1 = bestm1;
869         best_clock->m2 = bestm2;
870         best_clock->p1 = bestp1;
871         best_clock->p2 = bestp2;
872
873         return true;
874 }
875
876 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
877                                              enum pipe pipe)
878 {
879         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
880         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
881
882         return intel_crtc->cpu_transcoder;
883 }
884
885 static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
886 {
887         struct drm_i915_private *dev_priv = dev->dev_private;
888         u32 frame, frame_reg = PIPEFRAME(pipe);
889
890         frame = I915_READ(frame_reg);
891
892         if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
893                 DRM_DEBUG_KMS("vblank wait timed out\n");
894 }
895
896 /**
897  * intel_wait_for_vblank - wait for vblank on a given pipe
898  * @dev: drm device
899  * @pipe: pipe to wait for
900  *
901  * Wait for vblank to occur on a given pipe.  Needed for various bits of
902  * mode setting code.
903  */
904 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
905 {
906         struct drm_i915_private *dev_priv = dev->dev_private;
907         int pipestat_reg = PIPESTAT(pipe);
908
909         if (INTEL_INFO(dev)->gen >= 5) {
910                 ironlake_wait_for_vblank(dev, pipe);
911                 return;
912         }
913
914         /* Clear existing vblank status. Note this will clear any other
915          * sticky status fields as well.
916          *
917          * This races with i915_driver_irq_handler() with the result
918          * that either function could miss a vblank event.  Here it is not
919          * fatal, as we will either wait upon the next vblank interrupt or
920          * timeout.  Generally speaking intel_wait_for_vblank() is only
921          * called during modeset at which time the GPU should be idle and
922          * should *not* be performing page flips and thus not waiting on
923          * vblanks...
924          * Currently, the result of us stealing a vblank from the irq
925          * handler is that a single frame will be skipped during swapbuffers.
926          */
927         I915_WRITE(pipestat_reg,
928                    I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
929
930         /* Wait for vblank interrupt bit to set */
931         if (wait_for(I915_READ(pipestat_reg) &
932                      PIPE_VBLANK_INTERRUPT_STATUS,
933                      50))
934                 DRM_DEBUG_KMS("vblank wait timed out\n");
935 }
936
937 /*
938  * intel_wait_for_pipe_off - wait for pipe to turn off
939  * @dev: drm device
940  * @pipe: pipe to wait for
941  *
942  * After disabling a pipe, we can't wait for vblank in the usual way,
943  * spinning on the vblank interrupt status bit, since we won't actually
944  * see an interrupt when the pipe is disabled.
945  *
946  * On Gen4 and above:
947  *   wait for the pipe register state bit to turn off
948  *
949  * Otherwise:
950  *   wait for the display line value to settle (it usually
951  *   ends up stopping at the start of the next frame).
952  *
953  */
954 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
955 {
956         struct drm_i915_private *dev_priv = dev->dev_private;
957         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
958                                                                       pipe);
959
960         if (INTEL_INFO(dev)->gen >= 4) {
961                 int reg = PIPECONF(cpu_transcoder);
962
963                 /* Wait for the Pipe State to go off */
964                 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
965                              100))
966                         WARN(1, "pipe_off wait timed out\n");
967         } else {
968                 u32 last_line, line_mask;
969                 int reg = PIPEDSL(pipe);
970                 unsigned long timeout = jiffies + msecs_to_jiffies(100);
971
972                 if (IS_GEN2(dev))
973                         line_mask = DSL_LINEMASK_GEN2;
974                 else
975                         line_mask = DSL_LINEMASK_GEN3;
976
977                 /* Wait for the display line to settle */
978                 do {
979                         last_line = I915_READ(reg) & line_mask;
980                         mdelay(5);
981                 } while (((I915_READ(reg) & line_mask) != last_line) &&
982                          time_after(timeout, jiffies));
983                 if (time_after(jiffies, timeout))
984                         WARN(1, "pipe_off wait timed out\n");
985         }
986 }
987
988 /*
989  * ibx_digital_port_connected - is the specified port connected?
990  * @dev_priv: i915 private structure
991  * @port: the port to test
992  *
993  * Returns true if @port is connected, false otherwise.
994  */
995 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
996                                 struct intel_digital_port *port)
997 {
998         u32 bit;
999
1000         if (HAS_PCH_IBX(dev_priv->dev)) {
1001                 switch(port->port) {
1002                 case PORT_B:
1003                         bit = SDE_PORTB_HOTPLUG;
1004                         break;
1005                 case PORT_C:
1006                         bit = SDE_PORTC_HOTPLUG;
1007                         break;
1008                 case PORT_D:
1009                         bit = SDE_PORTD_HOTPLUG;
1010                         break;
1011                 default:
1012                         return true;
1013                 }
1014         } else {
1015                 switch(port->port) {
1016                 case PORT_B:
1017                         bit = SDE_PORTB_HOTPLUG_CPT;
1018                         break;
1019                 case PORT_C:
1020                         bit = SDE_PORTC_HOTPLUG_CPT;
1021                         break;
1022                 case PORT_D:
1023                         bit = SDE_PORTD_HOTPLUG_CPT;
1024                         break;
1025                 default:
1026                         return true;
1027                 }
1028         }
1029
1030         return I915_READ(SDEISR) & bit;
1031 }
1032
1033 static const char *state_string(bool enabled)
1034 {
1035         return enabled ? "on" : "off";
1036 }
1037
1038 /* Only for pre-ILK configs */
1039 static void assert_pll(struct drm_i915_private *dev_priv,
1040                        enum pipe pipe, bool state)
1041 {
1042         int reg;
1043         u32 val;
1044         bool cur_state;
1045
1046         reg = DPLL(pipe);
1047         val = I915_READ(reg);
1048         cur_state = !!(val & DPLL_VCO_ENABLE);
1049         WARN(cur_state != state,
1050              "PLL state assertion failure (expected %s, current %s)\n",
1051              state_string(state), state_string(cur_state));
1052 }
1053 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
1054 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
1055
1056 /* For ILK+ */
1057 static void assert_pch_pll(struct drm_i915_private *dev_priv,
1058                            struct intel_pch_pll *pll,
1059                            struct intel_crtc *crtc,
1060                            bool state)
1061 {
1062         u32 val;
1063         bool cur_state;
1064
1065         if (HAS_PCH_LPT(dev_priv->dev)) {
1066                 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1067                 return;
1068         }
1069
1070         if (WARN (!pll,
1071                   "asserting PCH PLL %s with no PLL\n", state_string(state)))
1072                 return;
1073
1074         val = I915_READ(pll->pll_reg);
1075         cur_state = !!(val & DPLL_VCO_ENABLE);
1076         WARN(cur_state != state,
1077              "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
1078              pll->pll_reg, state_string(state), state_string(cur_state), val);
1079
1080         /* Make sure the selected PLL is correctly attached to the transcoder */
1081         if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
1082                 u32 pch_dpll;
1083
1084                 pch_dpll = I915_READ(PCH_DPLL_SEL);
1085                 cur_state = pll->pll_reg == _PCH_DPLL_B;
1086                 if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
1087                           "PLL[%d] not attached to this transcoder %d: %08x\n",
1088                           cur_state, crtc->pipe, pch_dpll)) {
1089                         cur_state = !!(val >> (4*crtc->pipe + 3));
1090                         WARN(cur_state != state,
1091                              "PLL[%d] not %s on this transcoder %d: %08x\n",
1092                              pll->pll_reg == _PCH_DPLL_B,
1093                              state_string(state),
1094                              crtc->pipe,
1095                              val);
1096                 }
1097         }
1098 }
1099 #define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
1100 #define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
1101
1102 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1103                           enum pipe pipe, bool state)
1104 {
1105         int reg;
1106         u32 val;
1107         bool cur_state;
1108         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1109                                                                       pipe);
1110
1111         if (HAS_DDI(dev_priv->dev)) {
1112                 /* DDI does not have a specific FDI_TX register */
1113                 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1114                 val = I915_READ(reg);
1115                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1116         } else {
1117                 reg = FDI_TX_CTL(pipe);
1118                 val = I915_READ(reg);
1119                 cur_state = !!(val & FDI_TX_ENABLE);
1120         }
1121         WARN(cur_state != state,
1122              "FDI TX state assertion failure (expected %s, current %s)\n",
1123              state_string(state), state_string(cur_state));
1124 }
1125 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1126 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1127
1128 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1129                           enum pipe pipe, bool state)
1130 {
1131         int reg;
1132         u32 val;
1133         bool cur_state;
1134
1135         reg = FDI_RX_CTL(pipe);
1136         val = I915_READ(reg);
1137         cur_state = !!(val & FDI_RX_ENABLE);
1138         WARN(cur_state != state,
1139              "FDI RX state assertion failure (expected %s, current %s)\n",
1140              state_string(state), state_string(cur_state));
1141 }
1142 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1143 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1144
1145 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1146                                       enum pipe pipe)
1147 {
1148         int reg;
1149         u32 val;
1150
1151         /* ILK FDI PLL is always enabled */
1152         if (dev_priv->info->gen == 5)
1153                 return;
1154
1155         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1156         if (HAS_DDI(dev_priv->dev))
1157                 return;
1158
1159         reg = FDI_TX_CTL(pipe);
1160         val = I915_READ(reg);
1161         WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1162 }
1163
1164 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1165                                       enum pipe pipe)
1166 {
1167         int reg;
1168         u32 val;
1169
1170         reg = FDI_RX_CTL(pipe);
1171         val = I915_READ(reg);
1172         WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1173 }
1174
1175 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1176                                   enum pipe pipe)
1177 {
1178         int pp_reg, lvds_reg;
1179         u32 val;
1180         enum pipe panel_pipe = PIPE_A;
1181         bool locked = true;
1182
1183         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1184                 pp_reg = PCH_PP_CONTROL;
1185                 lvds_reg = PCH_LVDS;
1186         } else {
1187                 pp_reg = PP_CONTROL;
1188                 lvds_reg = LVDS;
1189         }
1190
1191         val = I915_READ(pp_reg);
1192         if (!(val & PANEL_POWER_ON) ||
1193             ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1194                 locked = false;
1195
1196         if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1197                 panel_pipe = PIPE_B;
1198
1199         WARN(panel_pipe == pipe && locked,
1200              "panel assertion failure, pipe %c regs locked\n",
1201              pipe_name(pipe));
1202 }
1203
1204 void assert_pipe(struct drm_i915_private *dev_priv,
1205                  enum pipe pipe, bool state)
1206 {
1207         int reg;
1208         u32 val;
1209         bool cur_state;
1210         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1211                                                                       pipe);
1212
1213         /* if we need the pipe A quirk it must be always on */
1214         if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1215                 state = true;
1216
1217         reg = PIPECONF(cpu_transcoder);
1218         val = I915_READ(reg);
1219         cur_state = !!(val & PIPECONF_ENABLE);
1220         WARN(cur_state != state,
1221              "pipe %c assertion failure (expected %s, current %s)\n",
1222              pipe_name(pipe), state_string(state), state_string(cur_state));
1223 }
1224
1225 static void assert_plane(struct drm_i915_private *dev_priv,
1226                          enum plane plane, bool state)
1227 {
1228         int reg;
1229         u32 val;
1230         bool cur_state;
1231
1232         reg = DSPCNTR(plane);
1233         val = I915_READ(reg);
1234         cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1235         WARN(cur_state != state,
1236              "plane %c assertion failure (expected %s, current %s)\n",
1237              plane_name(plane), state_string(state), state_string(cur_state));
1238 }
1239
1240 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1241 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1242
1243 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1244                                    enum pipe pipe)
1245 {
1246         int reg, i;
1247         u32 val;
1248         int cur_pipe;
1249
1250         /* Planes are fixed to pipes on ILK+ */
1251         if (HAS_PCH_SPLIT(dev_priv->dev)) {
1252                 reg = DSPCNTR(pipe);
1253                 val = I915_READ(reg);
1254                 WARN((val & DISPLAY_PLANE_ENABLE),
1255                      "plane %c assertion failure, should be disabled but not\n",
1256                      plane_name(pipe));
1257                 return;
1258         }
1259
1260         /* Need to check both planes against the pipe */
1261         for (i = 0; i < 2; i++) {
1262                 reg = DSPCNTR(i);
1263                 val = I915_READ(reg);
1264                 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1265                         DISPPLANE_SEL_PIPE_SHIFT;
1266                 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1267                      "plane %c assertion failure, should be off on pipe %c but is still active\n",
1268                      plane_name(i), pipe_name(pipe));
1269         }
1270 }
1271
1272 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1273 {
1274         u32 val;
1275         bool enabled;
1276
1277         if (HAS_PCH_LPT(dev_priv->dev)) {
1278                 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1279                 return;
1280         }
1281
1282         val = I915_READ(PCH_DREF_CONTROL);
1283         enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1284                             DREF_SUPERSPREAD_SOURCE_MASK));
1285         WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1286 }
1287
1288 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1289                                        enum pipe pipe)
1290 {
1291         int reg;
1292         u32 val;
1293         bool enabled;
1294
1295         reg = TRANSCONF(pipe);
1296         val = I915_READ(reg);
1297         enabled = !!(val & TRANS_ENABLE);
1298         WARN(enabled,
1299              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1300              pipe_name(pipe));
1301 }
1302
1303 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1304                             enum pipe pipe, u32 port_sel, u32 val)
1305 {
1306         if ((val & DP_PORT_EN) == 0)
1307                 return false;
1308
1309         if (HAS_PCH_CPT(dev_priv->dev)) {
1310                 u32     trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1311                 u32     trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1312                 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1313                         return false;
1314         } else {
1315                 if ((val & DP_PIPE_MASK) != (pipe << 30))
1316                         return false;
1317         }
1318         return true;
1319 }
1320
1321 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1322                               enum pipe pipe, u32 val)
1323 {
1324         if ((val & PORT_ENABLE) == 0)
1325                 return false;
1326
1327         if (HAS_PCH_CPT(dev_priv->dev)) {
1328                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1329                         return false;
1330         } else {
1331                 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1332                         return false;
1333         }
1334         return true;
1335 }
1336
1337 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1338                               enum pipe pipe, u32 val)
1339 {
1340         if ((val & LVDS_PORT_EN) == 0)
1341                 return false;
1342
1343         if (HAS_PCH_CPT(dev_priv->dev)) {
1344                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1345                         return false;
1346         } else {
1347                 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1348                         return false;
1349         }
1350         return true;
1351 }
1352
1353 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1354                               enum pipe pipe, u32 val)
1355 {
1356         if ((val & ADPA_DAC_ENABLE) == 0)
1357                 return false;
1358         if (HAS_PCH_CPT(dev_priv->dev)) {
1359                 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1360                         return false;
1361         } else {
1362                 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1363                         return false;
1364         }
1365         return true;
1366 }
1367
1368 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1369                                    enum pipe pipe, int reg, u32 port_sel)
1370 {
1371         u32 val = I915_READ(reg);
1372         WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1373              "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1374              reg, pipe_name(pipe));
1375
1376         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1377              && (val & DP_PIPEB_SELECT),
1378              "IBX PCH dp port still using transcoder B\n");
1379 }
1380
1381 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1382                                      enum pipe pipe, int reg)
1383 {
1384         u32 val = I915_READ(reg);
1385         WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1386              "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1387              reg, pipe_name(pipe));
1388
1389         WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
1390              && (val & SDVO_PIPE_B_SELECT),
1391              "IBX PCH hdmi port still using transcoder B\n");
1392 }
1393
1394 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1395                                       enum pipe pipe)
1396 {
1397         int reg;
1398         u32 val;
1399
1400         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1401         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1402         assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1403
1404         reg = PCH_ADPA;
1405         val = I915_READ(reg);
1406         WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1407              "PCH VGA enabled on transcoder %c, should be disabled\n",
1408              pipe_name(pipe));
1409
1410         reg = PCH_LVDS;
1411         val = I915_READ(reg);
1412         WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1413              "PCH LVDS enabled on transcoder %c, should be disabled\n",
1414              pipe_name(pipe));
1415
1416         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1417         assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1418         assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1419 }
1420
1421 /**
1422  * intel_enable_pll - enable a PLL
1423  * @dev_priv: i915 private structure
1424  * @pipe: pipe PLL to enable
1425  *
1426  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1427  * make sure the PLL reg is writable first though, since the panel write
1428  * protect mechanism may be enabled.
1429  *
1430  * Note!  This is for pre-ILK only.
1431  *
1432  * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
1433  */
1434 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1435 {
1436         int reg;
1437         u32 val;
1438
1439         /* No really, not for ILK+ */
1440         BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
1441
1442         /* PLL is protected by panel, make sure we can write it */
1443         if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1444                 assert_panel_unlocked(dev_priv, pipe);
1445
1446         reg = DPLL(pipe);
1447         val = I915_READ(reg);
1448         val |= DPLL_VCO_ENABLE;
1449
1450         /* We do this three times for luck */
1451         I915_WRITE(reg, val);
1452         POSTING_READ(reg);
1453         udelay(150); /* wait for warmup */
1454         I915_WRITE(reg, val);
1455         POSTING_READ(reg);
1456         udelay(150); /* wait for warmup */
1457         I915_WRITE(reg, val);
1458         POSTING_READ(reg);
1459         udelay(150); /* wait for warmup */
1460 }
1461
1462 /**
1463  * intel_disable_pll - disable a PLL
1464  * @dev_priv: i915 private structure
1465  * @pipe: pipe PLL to disable
1466  *
1467  * Disable the PLL for @pipe, making sure the pipe is off first.
1468  *
1469  * Note!  This is for pre-ILK only.
1470  */
1471 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1472 {
1473         int reg;
1474         u32 val;
1475
1476         /* Don't disable pipe A or pipe A PLLs if needed */
1477         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1478                 return;
1479
1480         /* Make sure the pipe isn't still relying on us */
1481         assert_pipe_disabled(dev_priv, pipe);
1482
1483         reg = DPLL(pipe);
1484         val = I915_READ(reg);
1485         val &= ~DPLL_VCO_ENABLE;
1486         I915_WRITE(reg, val);
1487         POSTING_READ(reg);
1488 }
1489
1490 /* SBI access */
1491 static void
1492 intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1493                 enum intel_sbi_destination destination)
1494 {
1495         u32 tmp;
1496
1497         WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1498
1499         if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
1500                                 100)) {
1501                 DRM_ERROR("timeout waiting for SBI to become ready\n");
1502                 return;
1503         }
1504
1505         I915_WRITE(SBI_ADDR, (reg << 16));
1506         I915_WRITE(SBI_DATA, value);
1507
1508         if (destination == SBI_ICLK)
1509                 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
1510         else
1511                 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
1512         I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
1513
1514         if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1515                                 100)) {
1516                 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1517                 return;
1518         }
1519 }
1520
1521 static u32
1522 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1523                enum intel_sbi_destination destination)
1524 {
1525         u32 value = 0;
1526         WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1527
1528         if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
1529                                 100)) {
1530                 DRM_ERROR("timeout waiting for SBI to become ready\n");
1531                 return 0;
1532         }
1533
1534         I915_WRITE(SBI_ADDR, (reg << 16));
1535
1536         if (destination == SBI_ICLK)
1537                 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
1538         else
1539                 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
1540         I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
1541
1542         if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1543                                 100)) {
1544                 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1545                 return 0;
1546         }
1547
1548         return I915_READ(SBI_DATA);
1549 }
1550
1551 /**
1552  * ironlake_enable_pch_pll - enable PCH PLL
1553  * @dev_priv: i915 private structure
1554  * @pipe: pipe PLL to enable
1555  *
1556  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1557  * drives the transcoder clock.
1558  */
1559 static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
1560 {
1561         struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1562         struct intel_pch_pll *pll;
1563         int reg;
1564         u32 val;
1565
1566         /* PCH PLLs only available on ILK, SNB and IVB */
1567         BUG_ON(dev_priv->info->gen < 5);
1568         pll = intel_crtc->pch_pll;
1569         if (pll == NULL)
1570                 return;
1571
1572         if (WARN_ON(pll->refcount == 0))
1573                 return;
1574
1575         DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1576                       pll->pll_reg, pll->active, pll->on,
1577                       intel_crtc->base.base.id);
1578
1579         /* PCH refclock must be enabled first */
1580         assert_pch_refclk_enabled(dev_priv);
1581
1582         if (pll->active++ && pll->on) {
1583                 assert_pch_pll_enabled(dev_priv, pll, NULL);
1584                 return;
1585         }
1586
1587         DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1588
1589         reg = pll->pll_reg;
1590         val = I915_READ(reg);
1591         val |= DPLL_VCO_ENABLE;
1592         I915_WRITE(reg, val);
1593         POSTING_READ(reg);
1594         udelay(200);
1595
1596         pll->on = true;
1597 }
1598
1599 static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1600 {
1601         struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1602         struct intel_pch_pll *pll = intel_crtc->pch_pll;
1603         int reg;
1604         u32 val;
1605
1606         /* PCH only available on ILK+ */
1607         BUG_ON(dev_priv->info->gen < 5);
1608         if (pll == NULL)
1609                return;
1610
1611         if (WARN_ON(pll->refcount == 0))
1612                 return;
1613
1614         DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1615                       pll->pll_reg, pll->active, pll->on,
1616                       intel_crtc->base.base.id);
1617
1618         if (WARN_ON(pll->active == 0)) {
1619                 assert_pch_pll_disabled(dev_priv, pll, NULL);
1620                 return;
1621         }
1622
1623         if (--pll->active) {
1624                 assert_pch_pll_enabled(dev_priv, pll, NULL);
1625                 return;
1626         }
1627
1628         DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1629
1630         /* Make sure transcoder isn't still depending on us */
1631         assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
1632
1633         reg = pll->pll_reg;
1634         val = I915_READ(reg);
1635         val &= ~DPLL_VCO_ENABLE;
1636         I915_WRITE(reg, val);
1637         POSTING_READ(reg);
1638         udelay(200);
1639
1640         pll->on = false;
1641 }
1642
1643 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1644                                            enum pipe pipe)
1645 {
1646         struct drm_device *dev = dev_priv->dev;
1647         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1648         uint32_t reg, val, pipeconf_val;
1649
1650         /* PCH only available on ILK+ */
1651         BUG_ON(dev_priv->info->gen < 5);
1652
1653         /* Make sure PCH DPLL is enabled */
1654         assert_pch_pll_enabled(dev_priv,
1655                                to_intel_crtc(crtc)->pch_pll,
1656                                to_intel_crtc(crtc));
1657
1658         /* FDI must be feeding us bits for PCH ports */
1659         assert_fdi_tx_enabled(dev_priv, pipe);
1660         assert_fdi_rx_enabled(dev_priv, pipe);
1661
1662         if (HAS_PCH_CPT(dev)) {
1663                 /* Workaround: Set the timing override bit before enabling the
1664                  * pch transcoder. */
1665                 reg = TRANS_CHICKEN2(pipe);
1666                 val = I915_READ(reg);
1667                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1668                 I915_WRITE(reg, val);
1669         }
1670
1671         reg = TRANSCONF(pipe);
1672         val = I915_READ(reg);
1673         pipeconf_val = I915_READ(PIPECONF(pipe));
1674
1675         if (HAS_PCH_IBX(dev_priv->dev)) {
1676                 /*
1677                  * make the BPC in transcoder be consistent with
1678                  * that in pipeconf reg.
1679                  */
1680                 val &= ~PIPECONF_BPC_MASK;
1681                 val |= pipeconf_val & PIPECONF_BPC_MASK;
1682         }
1683
1684         val &= ~TRANS_INTERLACE_MASK;
1685         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1686                 if (HAS_PCH_IBX(dev_priv->dev) &&
1687                     intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1688                         val |= TRANS_LEGACY_INTERLACED_ILK;
1689                 else
1690                         val |= TRANS_INTERLACED;
1691         else
1692                 val |= TRANS_PROGRESSIVE;
1693
1694         I915_WRITE(reg, val | TRANS_ENABLE);
1695         if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1696                 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1697 }
1698
1699 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1700                                       enum transcoder cpu_transcoder)
1701 {
1702         u32 val, pipeconf_val;
1703
1704         /* PCH only available on ILK+ */
1705         BUG_ON(dev_priv->info->gen < 5);
1706
1707         /* FDI must be feeding us bits for PCH ports */
1708         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1709         assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1710
1711         /* Workaround: set timing override bit. */
1712         val = I915_READ(_TRANSA_CHICKEN2);
1713         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1714         I915_WRITE(_TRANSA_CHICKEN2, val);
1715
1716         val = TRANS_ENABLE;
1717         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1718
1719         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1720             PIPECONF_INTERLACED_ILK)
1721                 val |= TRANS_INTERLACED;
1722         else
1723                 val |= TRANS_PROGRESSIVE;
1724
1725         I915_WRITE(TRANSCONF(TRANSCODER_A), val);
1726         if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
1727                 DRM_ERROR("Failed to enable PCH transcoder\n");
1728 }
1729
1730 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1731                                             enum pipe pipe)
1732 {
1733         struct drm_device *dev = dev_priv->dev;
1734         uint32_t reg, val;
1735
1736         /* FDI relies on the transcoder */
1737         assert_fdi_tx_disabled(dev_priv, pipe);
1738         assert_fdi_rx_disabled(dev_priv, pipe);
1739
1740         /* Ports must be off as well */
1741         assert_pch_ports_disabled(dev_priv, pipe);
1742
1743         reg = TRANSCONF(pipe);
1744         val = I915_READ(reg);
1745         val &= ~TRANS_ENABLE;
1746         I915_WRITE(reg, val);
1747         /* wait for PCH transcoder off, transcoder state */
1748         if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1749                 DRM_ERROR("failed to disable transcoder %d\n", pipe);
1750
1751         if (!HAS_PCH_IBX(dev)) {
1752                 /* Workaround: Clear the timing override chicken bit again. */
1753                 reg = TRANS_CHICKEN2(pipe);
1754                 val = I915_READ(reg);
1755                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1756                 I915_WRITE(reg, val);
1757         }
1758 }
1759
1760 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1761 {
1762         u32 val;
1763
1764         val = I915_READ(_TRANSACONF);
1765         val &= ~TRANS_ENABLE;
1766         I915_WRITE(_TRANSACONF, val);
1767         /* wait for PCH transcoder off, transcoder state */
1768         if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
1769                 DRM_ERROR("Failed to disable PCH transcoder\n");
1770
1771         /* Workaround: clear timing override bit. */
1772         val = I915_READ(_TRANSA_CHICKEN2);
1773         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1774         I915_WRITE(_TRANSA_CHICKEN2, val);
1775 }
1776
1777 /**
1778  * intel_enable_pipe - enable a pipe, asserting requirements
1779  * @dev_priv: i915 private structure
1780  * @pipe: pipe to enable
1781  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1782  *
1783  * Enable @pipe, making sure that various hardware specific requirements
1784  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1785  *
1786  * @pipe should be %PIPE_A or %PIPE_B.
1787  *
1788  * Will wait until the pipe is actually running (i.e. first vblank) before
1789  * returning.
1790  */
1791 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1792                               bool pch_port)
1793 {
1794         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1795                                                                       pipe);
1796         enum pipe pch_transcoder;
1797         int reg;
1798         u32 val;
1799
1800         if (HAS_PCH_LPT(dev_priv->dev))
1801                 pch_transcoder = TRANSCODER_A;
1802         else
1803                 pch_transcoder = pipe;
1804
1805         /*
1806          * A pipe without a PLL won't actually be able to drive bits from
1807          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1808          * need the check.
1809          */
1810         if (!HAS_PCH_SPLIT(dev_priv->dev))
1811                 assert_pll_enabled(dev_priv, pipe);
1812         else {
1813                 if (pch_port) {
1814                         /* if driving the PCH, we need FDI enabled */
1815                         assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1816                         assert_fdi_tx_pll_enabled(dev_priv,
1817                                                   (enum pipe) cpu_transcoder);
1818                 }
1819                 /* FIXME: assert CPU port conditions for SNB+ */
1820         }
1821
1822         reg = PIPECONF(cpu_transcoder);
1823         val = I915_READ(reg);
1824         if (val & PIPECONF_ENABLE)
1825                 return;
1826
1827         I915_WRITE(reg, val | PIPECONF_ENABLE);
1828         intel_wait_for_vblank(dev_priv->dev, pipe);
1829 }
1830
1831 /**
1832  * intel_disable_pipe - disable a pipe, asserting requirements
1833  * @dev_priv: i915 private structure
1834  * @pipe: pipe to disable
1835  *
1836  * Disable @pipe, making sure that various hardware specific requirements
1837  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1838  *
1839  * @pipe should be %PIPE_A or %PIPE_B.
1840  *
1841  * Will wait until the pipe has shut down before returning.
1842  */
1843 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1844                                enum pipe pipe)
1845 {
1846         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1847                                                                       pipe);
1848         int reg;
1849         u32 val;
1850
1851         /*
1852          * Make sure planes won't keep trying to pump pixels to us,
1853          * or we might hang the display.
1854          */
1855         assert_planes_disabled(dev_priv, pipe);
1856
1857         /* Don't disable pipe A or pipe A PLLs if needed */
1858         if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1859                 return;
1860
1861         reg = PIPECONF(cpu_transcoder);
1862         val = I915_READ(reg);
1863         if ((val & PIPECONF_ENABLE) == 0)
1864                 return;
1865
1866         I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1867         intel_wait_for_pipe_off(dev_priv->dev, pipe);
1868 }
1869
1870 /*
1871  * Plane regs are double buffered, going from enabled->disabled needs a
1872  * trigger in order to latch.  The display address reg provides this.
1873  */
1874 void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1875                                       enum plane plane)
1876 {
1877         if (dev_priv->info->gen >= 4)
1878                 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1879         else
1880                 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1881 }
1882
1883 /**
1884  * intel_enable_plane - enable a display plane on a given pipe
1885  * @dev_priv: i915 private structure
1886  * @plane: plane to enable
1887  * @pipe: pipe being fed
1888  *
1889  * Enable @plane on @pipe, making sure that @pipe is running first.
1890  */
1891 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1892                                enum plane plane, enum pipe pipe)
1893 {
1894         int reg;
1895         u32 val;
1896
1897         /* If the pipe isn't enabled, we can't pump pixels and may hang */
1898         assert_pipe_enabled(dev_priv, pipe);
1899
1900         reg = DSPCNTR(plane);
1901         val = I915_READ(reg);
1902         if (val & DISPLAY_PLANE_ENABLE)
1903                 return;
1904
1905         I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1906         intel_flush_display_plane(dev_priv, plane);
1907         intel_wait_for_vblank(dev_priv->dev, pipe);
1908 }
1909
1910 /**
1911  * intel_disable_plane - disable a display plane
1912  * @dev_priv: i915 private structure
1913  * @plane: plane to disable
1914  * @pipe: pipe consuming the data
1915  *
1916  * Disable @plane; should be an independent operation.
1917  */
1918 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1919                                 enum plane plane, enum pipe pipe)
1920 {
1921         int reg;
1922         u32 val;
1923
1924         reg = DSPCNTR(plane);
1925         val = I915_READ(reg);
1926         if ((val & DISPLAY_PLANE_ENABLE) == 0)
1927                 return;
1928
1929         I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1930         intel_flush_display_plane(dev_priv, plane);
1931         intel_wait_for_vblank(dev_priv->dev, pipe);
1932 }
1933
1934 int
1935 intel_pin_and_fence_fb_obj(struct drm_device *dev,
1936                            struct drm_i915_gem_object *obj,
1937                            struct intel_ring_buffer *pipelined)
1938 {
1939         struct drm_i915_private *dev_priv = dev->dev_private;
1940         u32 alignment;
1941         int ret;
1942
1943         switch (obj->tiling_mode) {
1944         case I915_TILING_NONE:
1945                 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1946                         alignment = 128 * 1024;
1947                 else if (INTEL_INFO(dev)->gen >= 4)
1948                         alignment = 4 * 1024;
1949                 else
1950                         alignment = 64 * 1024;
1951                 break;
1952         case I915_TILING_X:
1953                 /* pin() will align the object as required by fence */
1954                 alignment = 0;
1955                 break;
1956         case I915_TILING_Y:
1957                 /* FIXME: Is this true? */
1958                 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1959                 return -EINVAL;
1960         default:
1961                 BUG();
1962         }
1963
1964         dev_priv->mm.interruptible = false;
1965         ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1966         if (ret)
1967                 goto err_interruptible;
1968
1969         /* Install a fence for tiled scan-out. Pre-i965 always needs a
1970          * fence, whereas 965+ only requires a fence if using
1971          * framebuffer compression.  For simplicity, we always install
1972          * a fence as the cost is not that onerous.
1973          */
1974         ret = i915_gem_object_get_fence(obj);
1975         if (ret)
1976                 goto err_unpin;
1977
1978         i915_gem_object_pin_fence(obj);
1979
1980         dev_priv->mm.interruptible = true;
1981         return 0;
1982
1983 err_unpin:
1984         i915_gem_object_unpin(obj);
1985 err_interruptible:
1986         dev_priv->mm.interruptible = true;
1987         return ret;
1988 }
1989
1990 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1991 {
1992         i915_gem_object_unpin_fence(obj);
1993         i915_gem_object_unpin(obj);
1994 }
1995
1996 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1997  * is assumed to be a power-of-two. */
1998 unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
1999                                                unsigned int bpp,
2000                                                unsigned int pitch)
2001 {
2002         int tile_rows, tiles;
2003
2004         tile_rows = *y / 8;
2005         *y %= 8;
2006         tiles = *x / (512/bpp);
2007         *x %= 512/bpp;
2008
2009         return tile_rows * pitch * 8 + tiles * 4096;
2010 }
2011
2012 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2013                              int x, int y)
2014 {
2015         struct drm_device *dev = crtc->dev;
2016         struct drm_i915_private *dev_priv = dev->dev_private;
2017         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2018         struct intel_framebuffer *intel_fb;
2019         struct drm_i915_gem_object *obj;
2020         int plane = intel_crtc->plane;
2021         unsigned long linear_offset;
2022         u32 dspcntr;
2023         u32 reg;
2024
2025         switch (plane) {
2026         case 0:
2027         case 1:
2028                 break;
2029         default:
2030                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2031                 return -EINVAL;
2032         }
2033
2034         intel_fb = to_intel_framebuffer(fb);
2035         obj = intel_fb->obj;
2036
2037         reg = DSPCNTR(plane);
2038         dspcntr = I915_READ(reg);
2039         /* Mask out pixel format bits in case we change it */
2040         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2041         switch (fb->pixel_format) {
2042         case DRM_FORMAT_C8:
2043                 dspcntr |= DISPPLANE_8BPP;
2044                 break;
2045         case DRM_FORMAT_XRGB1555:
2046         case DRM_FORMAT_ARGB1555:
2047                 dspcntr |= DISPPLANE_BGRX555;
2048                 break;
2049         case DRM_FORMAT_RGB565:
2050                 dspcntr |= DISPPLANE_BGRX565;
2051                 break;
2052         case DRM_FORMAT_XRGB8888:
2053         case DRM_FORMAT_ARGB8888:
2054                 dspcntr |= DISPPLANE_BGRX888;
2055                 break;
2056         case DRM_FORMAT_XBGR8888:
2057         case DRM_FORMAT_ABGR8888:
2058                 dspcntr |= DISPPLANE_RGBX888;
2059                 break;
2060         case DRM_FORMAT_XRGB2101010:
2061         case DRM_FORMAT_ARGB2101010:
2062                 dspcntr |= DISPPLANE_BGRX101010;
2063                 break;
2064         case DRM_FORMAT_XBGR2101010:
2065         case DRM_FORMAT_ABGR2101010:
2066                 dspcntr |= DISPPLANE_RGBX101010;
2067                 break;
2068         default:
2069                 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
2070                 return -EINVAL;
2071         }
2072
2073         if (INTEL_INFO(dev)->gen >= 4) {
2074                 if (obj->tiling_mode != I915_TILING_NONE)
2075                         dspcntr |= DISPPLANE_TILED;
2076                 else
2077                         dspcntr &= ~DISPPLANE_TILED;
2078         }
2079
2080         I915_WRITE(reg, dspcntr);
2081
2082         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2083
2084         if (INTEL_INFO(dev)->gen >= 4) {
2085                 intel_crtc->dspaddr_offset =
2086                         intel_gen4_compute_offset_xtiled(&x, &y,
2087                                                          fb->bits_per_pixel / 8,
2088                                                          fb->pitches[0]);
2089                 linear_offset -= intel_crtc->dspaddr_offset;
2090         } else {
2091                 intel_crtc->dspaddr_offset = linear_offset;
2092         }
2093
2094         DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2095                       obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
2096         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2097         if (INTEL_INFO(dev)->gen >= 4) {
2098                 I915_MODIFY_DISPBASE(DSPSURF(plane),
2099                                      obj->gtt_offset + intel_crtc->dspaddr_offset);
2100                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2101                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2102         } else
2103                 I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
2104         POSTING_READ(reg);
2105
2106         return 0;
2107 }
2108
2109 static int ironlake_update_plane(struct drm_crtc *crtc,
2110                                  struct drm_framebuffer *fb, int x, int y)
2111 {
2112         struct drm_device *dev = crtc->dev;
2113         struct drm_i915_private *dev_priv = dev->dev_private;
2114         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2115         struct intel_framebuffer *intel_fb;
2116         struct drm_i915_gem_object *obj;
2117         int plane = intel_crtc->plane;
2118         unsigned long linear_offset;
2119         u32 dspcntr;
2120         u32 reg;
2121
2122         switch (plane) {
2123         case 0:
2124         case 1:
2125         case 2:
2126                 break;
2127         default:
2128                 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2129                 return -EINVAL;
2130         }
2131
2132         intel_fb = to_intel_framebuffer(fb);
2133         obj = intel_fb->obj;
2134
2135         reg = DSPCNTR(plane);
2136         dspcntr = I915_READ(reg);
2137         /* Mask out pixel format bits in case we change it */
2138         dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2139         switch (fb->pixel_format) {
2140         case DRM_FORMAT_C8:
2141                 dspcntr |= DISPPLANE_8BPP;
2142                 break;
2143         case DRM_FORMAT_RGB565:
2144                 dspcntr |= DISPPLANE_BGRX565;
2145                 break;
2146         case DRM_FORMAT_XRGB8888:
2147         case DRM_FORMAT_ARGB8888:
2148                 dspcntr |= DISPPLANE_BGRX888;
2149                 break;
2150         case DRM_FORMAT_XBGR8888:
2151         case DRM_FORMAT_ABGR8888:
2152                 dspcntr |= DISPPLANE_RGBX888;
2153                 break;
2154         case DRM_FORMAT_XRGB2101010:
2155         case DRM_FORMAT_ARGB2101010:
2156                 dspcntr |= DISPPLANE_BGRX101010;
2157                 break;
2158         case DRM_FORMAT_XBGR2101010:
2159         case DRM_FORMAT_ABGR2101010:
2160                 dspcntr |= DISPPLANE_RGBX101010;
2161                 break;
2162         default:
2163                 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
2164                 return -EINVAL;
2165         }
2166
2167         if (obj->tiling_mode != I915_TILING_NONE)
2168                 dspcntr |= DISPPLANE_TILED;
2169         else
2170                 dspcntr &= ~DISPPLANE_TILED;
2171
2172         /* must disable */
2173         dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2174
2175         I915_WRITE(reg, dspcntr);
2176
2177         linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2178         intel_crtc->dspaddr_offset =
2179                 intel_gen4_compute_offset_xtiled(&x, &y,
2180                                                  fb->bits_per_pixel / 8,
2181                                                  fb->pitches[0]);
2182         linear_offset -= intel_crtc->dspaddr_offset;
2183
2184         DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2185                       obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
2186         I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2187         I915_MODIFY_DISPBASE(DSPSURF(plane),
2188                              obj->gtt_offset + intel_crtc->dspaddr_offset);
2189         if (IS_HASWELL(dev)) {
2190                 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2191         } else {
2192                 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2193                 I915_WRITE(DSPLINOFF(plane), linear_offset);
2194         }
2195         POSTING_READ(reg);
2196
2197         return 0;
2198 }
2199
2200 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2201 static int
2202 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2203                            int x, int y, enum mode_set_atomic state)
2204 {
2205         struct drm_device *dev = crtc->dev;
2206         struct drm_i915_private *dev_priv = dev->dev_private;
2207
2208         if (dev_priv->display.disable_fbc)
2209                 dev_priv->display.disable_fbc(dev);
2210         intel_increase_pllclock(crtc);
2211
2212         return dev_priv->display.update_plane(crtc, fb, x, y);
2213 }
2214
2215 static int
2216 intel_finish_fb(struct drm_framebuffer *old_fb)
2217 {
2218         struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2219         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2220         bool was_interruptible = dev_priv->mm.interruptible;
2221         int ret;
2222
2223         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
2224
2225         wait_event(dev_priv->pending_flip_queue,
2226                    i915_reset_in_progress(&dev_priv->gpu_error) ||
2227                    atomic_read(&obj->pending_flip) == 0);
2228
2229         /* Big Hammer, we also need to ensure that any pending
2230          * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2231          * current scanout is retired before unpinning the old
2232          * framebuffer.
2233          *
2234          * This should only fail upon a hung GPU, in which case we
2235          * can safely continue.
2236          */
2237         dev_priv->mm.interruptible = false;
2238         ret = i915_gem_object_finish_gpu(obj);
2239         dev_priv->mm.interruptible = was_interruptible;
2240
2241         return ret;
2242 }
2243
2244 static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2245 {
2246         struct drm_device *dev = crtc->dev;
2247         struct drm_i915_master_private *master_priv;
2248         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2249
2250         if (!dev->primary->master)
2251                 return;
2252
2253         master_priv = dev->primary->master->driver_priv;
2254         if (!master_priv->sarea_priv)
2255                 return;
2256
2257         switch (intel_crtc->pipe) {
2258         case 0:
2259                 master_priv->sarea_priv->pipeA_x = x;
2260                 master_priv->sarea_priv->pipeA_y = y;
2261                 break;
2262         case 1:
2263                 master_priv->sarea_priv->pipeB_x = x;
2264                 master_priv->sarea_priv->pipeB_y = y;
2265                 break;
2266         default:
2267                 break;
2268         }
2269 }
2270
2271 static int
2272 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2273                     struct drm_framebuffer *fb)
2274 {
2275         struct drm_device *dev = crtc->dev;
2276         struct drm_i915_private *dev_priv = dev->dev_private;
2277         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2278         struct drm_framebuffer *old_fb;
2279         int ret;
2280
2281         /* no fb bound */
2282         if (!fb) {
2283                 DRM_ERROR("No FB bound\n");
2284                 return 0;
2285         }
2286
2287         if(intel_crtc->plane > dev_priv->num_pipe) {
2288                 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2289                                 intel_crtc->plane,
2290                                 dev_priv->num_pipe);
2291                 return -EINVAL;
2292         }
2293
2294         mutex_lock(&dev->struct_mutex);
2295         ret = intel_pin_and_fence_fb_obj(dev,
2296                                          to_intel_framebuffer(fb)->obj,
2297                                          NULL);
2298         if (ret != 0) {
2299                 mutex_unlock(&dev->struct_mutex);
2300                 DRM_ERROR("pin & fence failed\n");
2301                 return ret;
2302         }
2303
2304         if (crtc->fb)
2305                 intel_finish_fb(crtc->fb);
2306
2307         ret = dev_priv->display.update_plane(crtc, fb, x, y);
2308         if (ret) {
2309                 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
2310                 mutex_unlock(&dev->struct_mutex);
2311                 DRM_ERROR("failed to update base address\n");
2312                 return ret;
2313         }
2314
2315         old_fb = crtc->fb;
2316         crtc->fb = fb;
2317         crtc->x = x;
2318         crtc->y = y;
2319
2320         if (old_fb) {
2321                 intel_wait_for_vblank(dev, intel_crtc->pipe);
2322                 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2323         }
2324
2325         intel_update_fbc(dev);
2326         mutex_unlock(&dev->struct_mutex);
2327
2328         intel_crtc_update_sarea_pos(crtc, x, y);
2329
2330         return 0;
2331 }
2332
2333 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2334 {
2335         struct drm_device *dev = crtc->dev;
2336         struct drm_i915_private *dev_priv = dev->dev_private;
2337         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2338         int pipe = intel_crtc->pipe;
2339         u32 reg, temp;
2340
2341         /* enable normal train */
2342         reg = FDI_TX_CTL(pipe);
2343         temp = I915_READ(reg);
2344         if (IS_IVYBRIDGE(dev)) {
2345                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2346                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2347         } else {
2348                 temp &= ~FDI_LINK_TRAIN_NONE;
2349                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2350         }
2351         I915_WRITE(reg, temp);
2352
2353         reg = FDI_RX_CTL(pipe);
2354         temp = I915_READ(reg);
2355         if (HAS_PCH_CPT(dev)) {
2356                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2357                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2358         } else {
2359                 temp &= ~FDI_LINK_TRAIN_NONE;
2360                 temp |= FDI_LINK_TRAIN_NONE;
2361         }
2362         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2363
2364         /* wait one idle pattern time */
2365         POSTING_READ(reg);
2366         udelay(1000);
2367
2368         /* IVB wants error correction enabled */
2369         if (IS_IVYBRIDGE(dev))
2370                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2371                            FDI_FE_ERRC_ENABLE);
2372 }
2373
2374 static void ivb_modeset_global_resources(struct drm_device *dev)
2375 {
2376         struct drm_i915_private *dev_priv = dev->dev_private;
2377         struct intel_crtc *pipe_B_crtc =
2378                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2379         struct intel_crtc *pipe_C_crtc =
2380                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2381         uint32_t temp;
2382
2383         /* When everything is off disable fdi C so that we could enable fdi B
2384          * with all lanes. XXX: This misses the case where a pipe is not using
2385          * any pch resources and so doesn't need any fdi lanes. */
2386         if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
2387                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2388                 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2389
2390                 temp = I915_READ(SOUTH_CHICKEN1);
2391                 temp &= ~FDI_BC_BIFURCATION_SELECT;
2392                 DRM_DEBUG_KMS("disabling fdi C rx\n");
2393                 I915_WRITE(SOUTH_CHICKEN1, temp);
2394         }
2395 }
2396
2397 /* The FDI link training functions for ILK/Ibexpeak. */
2398 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2399 {
2400         struct drm_device *dev = crtc->dev;
2401         struct drm_i915_private *dev_priv = dev->dev_private;
2402         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2403         int pipe = intel_crtc->pipe;
2404         int plane = intel_crtc->plane;
2405         u32 reg, temp, tries;
2406
2407         /* FDI needs bits from pipe & plane first */
2408         assert_pipe_enabled(dev_priv, pipe);
2409         assert_plane_enabled(dev_priv, plane);
2410
2411         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2412            for train result */
2413         reg = FDI_RX_IMR(pipe);
2414         temp = I915_READ(reg);
2415         temp &= ~FDI_RX_SYMBOL_LOCK;
2416         temp &= ~FDI_RX_BIT_LOCK;
2417         I915_WRITE(reg, temp);
2418         I915_READ(reg);
2419         udelay(150);
2420
2421         /* enable CPU FDI TX and PCH FDI RX */
2422         reg = FDI_TX_CTL(pipe);
2423         temp = I915_READ(reg);
2424         temp &= ~(7 << 19);
2425         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2426         temp &= ~FDI_LINK_TRAIN_NONE;
2427         temp |= FDI_LINK_TRAIN_PATTERN_1;
2428         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2429
2430         reg = FDI_RX_CTL(pipe);
2431         temp = I915_READ(reg);
2432         temp &= ~FDI_LINK_TRAIN_NONE;
2433         temp |= FDI_LINK_TRAIN_PATTERN_1;
2434         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2435
2436         POSTING_READ(reg);
2437         udelay(150);
2438
2439         /* Ironlake workaround, enable clock pointer after FDI enable*/
2440         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2441         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2442                    FDI_RX_PHASE_SYNC_POINTER_EN);
2443
2444         reg = FDI_RX_IIR(pipe);
2445         for (tries = 0; tries < 5; tries++) {
2446                 temp = I915_READ(reg);
2447                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2448
2449                 if ((temp & FDI_RX_BIT_LOCK)) {
2450                         DRM_DEBUG_KMS("FDI train 1 done.\n");
2451                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2452                         break;
2453                 }
2454         }
2455         if (tries == 5)
2456                 DRM_ERROR("FDI train 1 fail!\n");
2457
2458         /* Train 2 */
2459         reg = FDI_TX_CTL(pipe);
2460         temp = I915_READ(reg);
2461         temp &= ~FDI_LINK_TRAIN_NONE;
2462         temp |= FDI_LINK_TRAIN_PATTERN_2;
2463         I915_WRITE(reg, temp);
2464
2465         reg = FDI_RX_CTL(pipe);
2466         temp = I915_READ(reg);
2467         temp &= ~FDI_LINK_TRAIN_NONE;
2468         temp |= FDI_LINK_TRAIN_PATTERN_2;
2469         I915_WRITE(reg, temp);
2470
2471         POSTING_READ(reg);
2472         udelay(150);
2473
2474         reg = FDI_RX_IIR(pipe);
2475         for (tries = 0; tries < 5; tries++) {
2476                 temp = I915_READ(reg);
2477                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2478
2479                 if (temp & FDI_RX_SYMBOL_LOCK) {
2480                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2481                         DRM_DEBUG_KMS("FDI train 2 done.\n");
2482                         break;
2483                 }
2484         }
2485         if (tries == 5)
2486                 DRM_ERROR("FDI train 2 fail!\n");
2487
2488         DRM_DEBUG_KMS("FDI train done\n");
2489
2490 }
2491
2492 static const int snb_b_fdi_train_param[] = {
2493         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2494         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2495         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2496         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2497 };
2498
2499 /* The FDI link training functions for SNB/Cougarpoint. */
2500 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2501 {
2502         struct drm_device *dev = crtc->dev;
2503         struct drm_i915_private *dev_priv = dev->dev_private;
2504         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2505         int pipe = intel_crtc->pipe;
2506         u32 reg, temp, i, retry;
2507
2508         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2509            for train result */
2510         reg = FDI_RX_IMR(pipe);
2511         temp = I915_READ(reg);
2512         temp &= ~FDI_RX_SYMBOL_LOCK;
2513         temp &= ~FDI_RX_BIT_LOCK;
2514         I915_WRITE(reg, temp);
2515
2516         POSTING_READ(reg);
2517         udelay(150);
2518
2519         /* enable CPU FDI TX and PCH FDI RX */
2520         reg = FDI_TX_CTL(pipe);
2521         temp = I915_READ(reg);
2522         temp &= ~(7 << 19);
2523         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2524         temp &= ~FDI_LINK_TRAIN_NONE;
2525         temp |= FDI_LINK_TRAIN_PATTERN_1;
2526         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2527         /* SNB-B */
2528         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2529         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2530
2531         I915_WRITE(FDI_RX_MISC(pipe),
2532                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2533
2534         reg = FDI_RX_CTL(pipe);
2535         temp = I915_READ(reg);
2536         if (HAS_PCH_CPT(dev)) {
2537                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2538                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2539         } else {
2540                 temp &= ~FDI_LINK_TRAIN_NONE;
2541                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2542         }
2543         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2544
2545         POSTING_READ(reg);
2546         udelay(150);
2547
2548         for (i = 0; i < 4; i++) {
2549                 reg = FDI_TX_CTL(pipe);
2550                 temp = I915_READ(reg);
2551                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2552                 temp |= snb_b_fdi_train_param[i];
2553                 I915_WRITE(reg, temp);
2554
2555                 POSTING_READ(reg);
2556                 udelay(500);
2557
2558                 for (retry = 0; retry < 5; retry++) {
2559                         reg = FDI_RX_IIR(pipe);
2560                         temp = I915_READ(reg);
2561                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2562                         if (temp & FDI_RX_BIT_LOCK) {
2563                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2564                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
2565                                 break;
2566                         }
2567                         udelay(50);
2568                 }
2569                 if (retry < 5)
2570                         break;
2571         }
2572         if (i == 4)
2573                 DRM_ERROR("FDI train 1 fail!\n");
2574
2575         /* Train 2 */
2576         reg = FDI_TX_CTL(pipe);
2577         temp = I915_READ(reg);
2578         temp &= ~FDI_LINK_TRAIN_NONE;
2579         temp |= FDI_LINK_TRAIN_PATTERN_2;
2580         if (IS_GEN6(dev)) {
2581                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2582                 /* SNB-B */
2583                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2584         }
2585         I915_WRITE(reg, temp);
2586
2587         reg = FDI_RX_CTL(pipe);
2588         temp = I915_READ(reg);
2589         if (HAS_PCH_CPT(dev)) {
2590                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2591                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2592         } else {
2593                 temp &= ~FDI_LINK_TRAIN_NONE;
2594                 temp |= FDI_LINK_TRAIN_PATTERN_2;
2595         }
2596         I915_WRITE(reg, temp);
2597
2598         POSTING_READ(reg);
2599         udelay(150);
2600
2601         for (i = 0; i < 4; i++) {
2602                 reg = FDI_TX_CTL(pipe);
2603                 temp = I915_READ(reg);
2604                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2605                 temp |= snb_b_fdi_train_param[i];
2606                 I915_WRITE(reg, temp);
2607
2608                 POSTING_READ(reg);
2609                 udelay(500);
2610
2611                 for (retry = 0; retry < 5; retry++) {
2612                         reg = FDI_RX_IIR(pipe);
2613                         temp = I915_READ(reg);
2614                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2615                         if (temp & FDI_RX_SYMBOL_LOCK) {
2616                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2617                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
2618                                 break;
2619                         }
2620                         udelay(50);
2621                 }
2622                 if (retry < 5)
2623                         break;
2624         }
2625         if (i == 4)
2626                 DRM_ERROR("FDI train 2 fail!\n");
2627
2628         DRM_DEBUG_KMS("FDI train done.\n");
2629 }
2630
2631 /* Manual link training for Ivy Bridge A0 parts */
2632 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2633 {
2634         struct drm_device *dev = crtc->dev;
2635         struct drm_i915_private *dev_priv = dev->dev_private;
2636         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2637         int pipe = intel_crtc->pipe;
2638         u32 reg, temp, i;
2639
2640         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2641            for train result */
2642         reg = FDI_RX_IMR(pipe);
2643         temp = I915_READ(reg);
2644         temp &= ~FDI_RX_SYMBOL_LOCK;
2645         temp &= ~FDI_RX_BIT_LOCK;
2646         I915_WRITE(reg, temp);
2647
2648         POSTING_READ(reg);
2649         udelay(150);
2650
2651         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2652                       I915_READ(FDI_RX_IIR(pipe)));
2653
2654         /* enable CPU FDI TX and PCH FDI RX */
2655         reg = FDI_TX_CTL(pipe);
2656         temp = I915_READ(reg);
2657         temp &= ~(7 << 19);
2658         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2659         temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2660         temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2661         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2662         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2663         temp |= FDI_COMPOSITE_SYNC;
2664         I915_WRITE(reg, temp | FDI_TX_ENABLE);
2665
2666         I915_WRITE(FDI_RX_MISC(pipe),
2667                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2668
2669         reg = FDI_RX_CTL(pipe);
2670         temp = I915_READ(reg);
2671         temp &= ~FDI_LINK_TRAIN_AUTO;
2672         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2673         temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2674         temp |= FDI_COMPOSITE_SYNC;
2675         I915_WRITE(reg, temp | FDI_RX_ENABLE);
2676
2677         POSTING_READ(reg);
2678         udelay(150);
2679
2680         for (i = 0; i < 4; i++) {
2681                 reg = FDI_TX_CTL(pipe);
2682                 temp = I915_READ(reg);
2683                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2684                 temp |= snb_b_fdi_train_param[i];
2685                 I915_WRITE(reg, temp);
2686
2687                 POSTING_READ(reg);
2688                 udelay(500);
2689
2690                 reg = FDI_RX_IIR(pipe);
2691                 temp = I915_READ(reg);
2692                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2693
2694                 if (temp & FDI_RX_BIT_LOCK ||
2695                     (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2696                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2697                         DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
2698                         break;
2699                 }
2700         }
2701         if (i == 4)
2702                 DRM_ERROR("FDI train 1 fail!\n");
2703
2704         /* Train 2 */
2705         reg = FDI_TX_CTL(pipe);
2706         temp = I915_READ(reg);
2707         temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2708         temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2709         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2710         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2711         I915_WRITE(reg, temp);
2712
2713         reg = FDI_RX_CTL(pipe);
2714         temp = I915_READ(reg);
2715         temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2716         temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2717         I915_WRITE(reg, temp);
2718
2719         POSTING_READ(reg);
2720         udelay(150);
2721
2722         for (i = 0; i < 4; i++) {
2723                 reg = FDI_TX_CTL(pipe);
2724                 temp = I915_READ(reg);
2725                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2726                 temp |= snb_b_fdi_train_param[i];
2727                 I915_WRITE(reg, temp);
2728
2729                 POSTING_READ(reg);
2730                 udelay(500);
2731
2732                 reg = FDI_RX_IIR(pipe);
2733                 temp = I915_READ(reg);
2734                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2735
2736                 if (temp & FDI_RX_SYMBOL_LOCK) {
2737                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2738                         DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
2739                         break;
2740                 }
2741         }
2742         if (i == 4)
2743                 DRM_ERROR("FDI train 2 fail!\n");
2744
2745         DRM_DEBUG_KMS("FDI train done.\n");
2746 }
2747
2748 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2749 {
2750         struct drm_device *dev = intel_crtc->base.dev;
2751         struct drm_i915_private *dev_priv = dev->dev_private;
2752         int pipe = intel_crtc->pipe;
2753         u32 reg, temp;
2754
2755
2756         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2757         reg = FDI_RX_CTL(pipe);
2758         temp = I915_READ(reg);
2759         temp &= ~((0x7 << 19) | (0x7 << 16));
2760         temp |= (intel_crtc->fdi_lanes - 1) << 19;
2761         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2762         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2763
2764         POSTING_READ(reg);
2765         udelay(200);
2766
2767         /* Switch from Rawclk to PCDclk */
2768         temp = I915_READ(reg);
2769         I915_WRITE(reg, temp | FDI_PCDCLK);
2770
2771         POSTING_READ(reg);
2772         udelay(200);
2773
2774         /* Enable CPU FDI TX PLL, always on for Ironlake */
2775         reg = FDI_TX_CTL(pipe);
2776         temp = I915_READ(reg);
2777         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2778                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2779
2780                 POSTING_READ(reg);
2781                 udelay(100);
2782         }
2783 }
2784
2785 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2786 {
2787         struct drm_device *dev = intel_crtc->base.dev;
2788         struct drm_i915_private *dev_priv = dev->dev_private;
2789         int pipe = intel_crtc->pipe;
2790         u32 reg, temp;
2791
2792         /* Switch from PCDclk to Rawclk */
2793         reg = FDI_RX_CTL(pipe);
2794         temp = I915_READ(reg);
2795         I915_WRITE(reg, temp & ~FDI_PCDCLK);
2796
2797         /* Disable CPU FDI TX PLL */
2798         reg = FDI_TX_CTL(pipe);
2799         temp = I915_READ(reg);
2800         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2801
2802         POSTING_READ(reg);
2803         udelay(100);
2804
2805         reg = FDI_RX_CTL(pipe);
2806         temp = I915_READ(reg);
2807         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2808
2809         /* Wait for the clocks to turn off. */
2810         POSTING_READ(reg);
2811         udelay(100);
2812 }
2813
2814 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2815 {
2816         struct drm_device *dev = crtc->dev;
2817         struct drm_i915_private *dev_priv = dev->dev_private;
2818         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2819         int pipe = intel_crtc->pipe;
2820         u32 reg, temp;
2821
2822         /* disable CPU FDI tx and PCH FDI rx */
2823         reg = FDI_TX_CTL(pipe);
2824         temp = I915_READ(reg);
2825         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2826         POSTING_READ(reg);
2827
2828         reg = FDI_RX_CTL(pipe);
2829         temp = I915_READ(reg);
2830         temp &= ~(0x7 << 16);
2831         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2832         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2833
2834         POSTING_READ(reg);
2835         udelay(100);
2836
2837         /* Ironlake workaround, disable clock pointer after downing FDI */
2838         if (HAS_PCH_IBX(dev)) {
2839                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2840         }
2841
2842         /* still set train pattern 1 */
2843         reg = FDI_TX_CTL(pipe);
2844         temp = I915_READ(reg);
2845         temp &= ~FDI_LINK_TRAIN_NONE;
2846         temp |= FDI_LINK_TRAIN_PATTERN_1;
2847         I915_WRITE(reg, temp);
2848
2849         reg = FDI_RX_CTL(pipe);
2850         temp = I915_READ(reg);
2851         if (HAS_PCH_CPT(dev)) {
2852                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2853                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2854         } else {
2855                 temp &= ~FDI_LINK_TRAIN_NONE;
2856                 temp |= FDI_LINK_TRAIN_PATTERN_1;
2857         }
2858         /* BPC in FDI rx is consistent with that in PIPECONF */
2859         temp &= ~(0x07 << 16);
2860         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2861         I915_WRITE(reg, temp);
2862
2863         POSTING_READ(reg);
2864         udelay(100);
2865 }
2866
2867 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2868 {
2869         struct drm_device *dev = crtc->dev;
2870         struct drm_i915_private *dev_priv = dev->dev_private;
2871         unsigned long flags;
2872         bool pending;
2873
2874         if (i915_reset_in_progress(&dev_priv->gpu_error))
2875                 return false;
2876
2877         spin_lock_irqsave(&dev->event_lock, flags);
2878         pending = to_intel_crtc(crtc)->unpin_work != NULL;
2879         spin_unlock_irqrestore(&dev->event_lock, flags);
2880
2881         return pending;
2882 }
2883
2884 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2885 {
2886         struct drm_device *dev = crtc->dev;
2887         struct drm_i915_private *dev_priv = dev->dev_private;
2888
2889         if (crtc->fb == NULL)
2890                 return;
2891
2892         WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
2893
2894         wait_event(dev_priv->pending_flip_queue,
2895                    !intel_crtc_has_pending_flip(crtc));
2896
2897         mutex_lock(&dev->struct_mutex);
2898         intel_finish_fb(crtc->fb);
2899         mutex_unlock(&dev->struct_mutex);
2900 }
2901
2902 static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
2903 {
2904         struct drm_device *dev = crtc->dev;
2905         struct intel_encoder *intel_encoder;
2906
2907         /*
2908          * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2909          * must be driven by its own crtc; no sharing is possible.
2910          */
2911         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2912                 switch (intel_encoder->type) {
2913                 case INTEL_OUTPUT_EDP:
2914                         if (!intel_encoder_is_pch_edp(&intel_encoder->base))
2915                                 return false;
2916                         continue;
2917                 }
2918         }
2919
2920         return true;
2921 }
2922
2923 static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
2924 {
2925         return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
2926 }
2927
2928 /* Program iCLKIP clock to the desired frequency */
2929 static void lpt_program_iclkip(struct drm_crtc *crtc)
2930 {
2931         struct drm_device *dev = crtc->dev;
2932         struct drm_i915_private *dev_priv = dev->dev_private;
2933         u32 divsel, phaseinc, auxdiv, phasedir = 0;
2934         u32 temp;
2935
2936         mutex_lock(&dev_priv->dpio_lock);
2937
2938         /* It is necessary to ungate the pixclk gate prior to programming
2939          * the divisors, and gate it back when it is done.
2940          */
2941         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2942
2943         /* Disable SSCCTL */
2944         intel_sbi_write(dev_priv, SBI_SSCCTL6,
2945                         intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
2946                                 SBI_SSCCTL_DISABLE,
2947                         SBI_ICLK);
2948
2949         /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2950         if (crtc->mode.clock == 20000) {
2951                 auxdiv = 1;
2952                 divsel = 0x41;
2953                 phaseinc = 0x20;
2954         } else {
2955                 /* The iCLK virtual clock root frequency is in MHz,
2956                  * but the crtc->mode.clock in in KHz. To get the divisors,
2957                  * it is necessary to divide one by another, so we
2958                  * convert the virtual clock precision to KHz here for higher
2959                  * precision.
2960                  */
2961                 u32 iclk_virtual_root_freq = 172800 * 1000;
2962                 u32 iclk_pi_range = 64;
2963                 u32 desired_divisor, msb_divisor_value, pi_value;
2964
2965                 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2966                 msb_divisor_value = desired_divisor / iclk_pi_range;
2967                 pi_value = desired_divisor % iclk_pi_range;
2968
2969                 auxdiv = 0;
2970                 divsel = msb_divisor_value - 2;
2971                 phaseinc = pi_value;
2972         }
2973
2974         /* This should not happen with any sane values */
2975         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2976                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2977         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
2978                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2979
2980         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2981                         crtc->mode.clock,
2982                         auxdiv,
2983                         divsel,
2984                         phasedir,
2985                         phaseinc);
2986
2987         /* Program SSCDIVINTPHASE6 */
2988         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2989         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2990         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2991         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2992         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2993         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2994         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2995         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2996
2997         /* Program SSCAUXDIV */
2998         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2999         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3000         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3001         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3002
3003         /* Enable modulator and associated divider */
3004         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3005         temp &= ~SBI_SSCCTL_DISABLE;
3006         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3007
3008         /* Wait for initialization time */
3009         udelay(24);
3010
3011         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3012
3013         mutex_unlock(&dev_priv->dpio_lock);
3014 }
3015
3016 /*
3017  * Enable PCH resources required for PCH ports:
3018  *   - PCH PLLs
3019  *   - FDI training & RX/TX
3020  *   - update transcoder timings
3021  *   - DP transcoding bits
3022  *   - transcoder
3023  */
3024 static void ironlake_pch_enable(struct drm_crtc *crtc)
3025 {
3026         struct drm_device *dev = crtc->dev;
3027         struct drm_i915_private *dev_priv = dev->dev_private;
3028         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3029         int pipe = intel_crtc->pipe;
3030         u32 reg, temp;
3031
3032         assert_transcoder_disabled(dev_priv, pipe);
3033
3034         /* Write the TU size bits before fdi link training, so that error
3035          * detection works. */
3036         I915_WRITE(FDI_RX_TUSIZE1(pipe),
3037                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3038
3039         /* For PCH output, training FDI link */
3040         dev_priv->display.fdi_link_train(crtc);
3041
3042         /* XXX: pch pll's can be enabled any time before we enable the PCH
3043          * transcoder, and we actually should do this to not upset any PCH
3044          * transcoder that already use the clock when we share it.
3045          *
3046          * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
3047          * unconditionally resets the pll - we need that to have the right LVDS
3048          * enable sequence. */
3049         ironlake_enable_pch_pll(intel_crtc);
3050
3051         if (HAS_PCH_CPT(dev)) {
3052                 u32 sel;
3053
3054                 temp = I915_READ(PCH_DPLL_SEL);
3055                 switch (pipe) {
3056                 default:
3057                 case 0:
3058                         temp |= TRANSA_DPLL_ENABLE;
3059                         sel = TRANSA_DPLLB_SEL;
3060                         break;
3061                 case 1:
3062                         temp |= TRANSB_DPLL_ENABLE;
3063                         sel = TRANSB_DPLLB_SEL;
3064                         break;
3065                 case 2:
3066                         temp |= TRANSC_DPLL_ENABLE;
3067                         sel = TRANSC_DPLLB_SEL;
3068                         break;
3069                 }
3070                 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
3071                         temp |= sel;
3072                 else
3073                         temp &= ~sel;
3074                 I915_WRITE(PCH_DPLL_SEL, temp);
3075         }
3076
3077         /* set transcoder timing, panel must allow it */
3078         assert_panel_unlocked(dev_priv, pipe);
3079         I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3080         I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3081         I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
3082
3083         I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3084         I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3085         I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
3086         I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
3087
3088         intel_fdi_normal_train(crtc);
3089
3090         /* For PCH DP, enable TRANS_DP_CTL */
3091         if (HAS_PCH_CPT(dev) &&
3092             (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3093              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3094                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3095                 reg = TRANS_DP_CTL(pipe);
3096                 temp = I915_READ(reg);
3097                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
3098                           TRANS_DP_SYNC_MASK |
3099                           TRANS_DP_BPC_MASK);
3100                 temp |= (TRANS_DP_OUTPUT_ENABLE |
3101                          TRANS_DP_ENH_FRAMING);
3102                 temp |= bpc << 9; /* same format but at 11:9 */
3103
3104                 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3105                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3106                 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3107                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3108
3109                 switch (intel_trans_dp_port_sel(crtc)) {
3110                 case PCH_DP_B:
3111                         temp |= TRANS_DP_PORT_SEL_B;
3112                         break;
3113                 case PCH_DP_C:
3114                         temp |= TRANS_DP_PORT_SEL_C;
3115                         break;
3116                 case PCH_DP_D:
3117                         temp |= TRANS_DP_PORT_SEL_D;
3118                         break;
3119                 default:
3120                         BUG();
3121                 }
3122
3123                 I915_WRITE(reg, temp);
3124         }
3125
3126         ironlake_enable_pch_transcoder(dev_priv, pipe);
3127 }
3128
3129 static void lpt_pch_enable(struct drm_crtc *crtc)
3130 {
3131         struct drm_device *dev = crtc->dev;
3132         struct drm_i915_private *dev_priv = dev->dev_private;
3133         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3134         enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3135
3136         assert_transcoder_disabled(dev_priv, TRANSCODER_A);
3137
3138         lpt_program_iclkip(crtc);
3139
3140         /* Set transcoder timing. */
3141         I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
3142         I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
3143         I915_WRITE(_TRANS_HSYNC_A,  I915_READ(HSYNC(cpu_transcoder)));
3144
3145         I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
3146         I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
3147         I915_WRITE(_TRANS_VSYNC_A,  I915_READ(VSYNC(cpu_transcoder)));
3148         I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
3149
3150         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3151 }
3152
3153 static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
3154 {
3155         struct intel_pch_pll *pll = intel_crtc->pch_pll;
3156
3157         if (pll == NULL)
3158                 return;
3159
3160         if (pll->refcount == 0) {
3161                 WARN(1, "bad PCH PLL refcount\n");
3162                 return;
3163         }
3164
3165         --pll->refcount;
3166         intel_crtc->pch_pll = NULL;
3167 }
3168
3169 static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
3170 {
3171         struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
3172         struct intel_pch_pll *pll;
3173         int i;
3174
3175         pll = intel_crtc->pch_pll;
3176         if (pll) {
3177                 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
3178                               intel_crtc->base.base.id, pll->pll_reg);
3179                 goto prepare;
3180         }
3181
3182         if (HAS_PCH_IBX(dev_priv->dev)) {
3183                 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3184                 i = intel_crtc->pipe;
3185                 pll = &dev_priv->pch_plls[i];
3186
3187                 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
3188                               intel_crtc->base.base.id, pll->pll_reg);
3189
3190                 goto found;
3191         }
3192
3193         for (i = 0; i < dev_priv->num_pch_pll; i++) {
3194                 pll = &dev_priv->pch_plls[i];
3195
3196                 /* Only want to check enabled timings first */
3197                 if (pll->refcount == 0)
3198                         continue;
3199
3200                 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
3201                     fp == I915_READ(pll->fp0_reg)) {
3202                         DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
3203                                       intel_crtc->base.base.id,
3204                                       pll->pll_reg, pll->refcount, pll->active);
3205
3206                         goto found;
3207                 }
3208         }
3209
3210         /* Ok no matching timings, maybe there's a free one? */
3211         for (i = 0; i < dev_priv->num_pch_pll; i++) {
3212                 pll = &dev_priv->pch_plls[i];
3213                 if (pll->refcount == 0) {
3214                         DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
3215                                       intel_crtc->base.base.id, pll->pll_reg);
3216                         goto found;
3217                 }
3218         }
3219
3220         return NULL;
3221
3222 found:
3223         intel_crtc->pch_pll = pll;
3224         pll->refcount++;
3225         DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
3226 prepare: /* separate function? */
3227         DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
3228
3229         /* Wait for the clocks to stabilize before rewriting the regs */
3230         I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3231         POSTING_READ(pll->pll_reg);
3232         udelay(150);
3233
3234         I915_WRITE(pll->fp0_reg, fp);
3235         I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
3236         pll->on = false;
3237         return pll;
3238 }
3239
3240 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3241 {
3242         struct drm_i915_private *dev_priv = dev->dev_private;
3243         int dslreg = PIPEDSL(pipe);
3244         u32 temp;
3245
3246         temp = I915_READ(dslreg);
3247         udelay(500);
3248         if (wait_for(I915_READ(dslreg) != temp, 5)) {
3249                 if (wait_for(I915_READ(dslreg) != temp, 5))
3250                         DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3251         }
3252 }
3253
3254 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3255 {
3256         struct drm_device *dev = crtc->dev;
3257         struct drm_i915_private *dev_priv = dev->dev_private;
3258         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3259         struct intel_encoder *encoder;
3260         int pipe = intel_crtc->pipe;
3261         int plane = intel_crtc->plane;
3262         u32 temp;
3263         bool is_pch_port;
3264
3265         WARN_ON(!crtc->enabled);
3266
3267         if (intel_crtc->active)
3268                 return;
3269
3270         intel_crtc->active = true;
3271         intel_update_watermarks(dev);
3272
3273         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3274                 temp = I915_READ(PCH_LVDS);
3275                 if ((temp & LVDS_PORT_EN) == 0)
3276                         I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3277         }
3278
3279         is_pch_port = ironlake_crtc_driving_pch(crtc);
3280
3281         if (is_pch_port) {
3282                 /* Note: FDI PLL enabling _must_ be done before we enable the
3283                  * cpu pipes, hence this is separate from all the other fdi/pch
3284                  * enabling. */
3285                 ironlake_fdi_pll_enable(intel_crtc);
3286         } else {
3287                 assert_fdi_tx_disabled(dev_priv, pipe);
3288                 assert_fdi_rx_disabled(dev_priv, pipe);
3289         }
3290
3291         for_each_encoder_on_crtc(dev, crtc, encoder)
3292                 if (encoder->pre_enable)
3293                         encoder->pre_enable(encoder);
3294
3295         /* Enable panel fitting for LVDS */
3296         if (dev_priv->pch_pf_size &&
3297             (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
3298              intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3299                 /* Force use of hard-coded filter coefficients
3300                  * as some pre-programmed values are broken,
3301                  * e.g. x201.
3302                  */
3303                 if (IS_IVYBRIDGE(dev))
3304                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3305                                                  PF_PIPE_SEL_IVB(pipe));
3306                 else
3307                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3308                 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3309                 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3310         }
3311
3312         /*
3313          * On ILK+ LUT must be loaded before the pipe is running but with
3314          * clocks enabled
3315          */
3316         intel_crtc_load_lut(crtc);
3317
3318         intel_enable_pipe(dev_priv, pipe, is_pch_port);
3319         intel_enable_plane(dev_priv, plane, pipe);
3320
3321         if (is_pch_port)
3322                 ironlake_pch_enable(crtc);
3323
3324         mutex_lock(&dev->struct_mutex);
3325         intel_update_fbc(dev);
3326         mutex_unlock(&dev->struct_mutex);
3327
3328         intel_crtc_update_cursor(crtc, true);
3329
3330         for_each_encoder_on_crtc(dev, crtc, encoder)
3331                 encoder->enable(encoder);
3332
3333         if (HAS_PCH_CPT(dev))
3334                 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3335
3336         /*
3337          * There seems to be a race in PCH platform hw (at least on some
3338          * outputs) where an enabled pipe still completes any pageflip right
3339          * away (as if the pipe is off) instead of waiting for vblank. As soon
3340          * as the first vblank happend, everything works as expected. Hence just
3341          * wait for one vblank before returning to avoid strange things
3342          * happening.
3343          */
3344         intel_wait_for_vblank(dev, intel_crtc->pipe);
3345 }
3346
3347 static void haswell_crtc_enable(struct drm_crtc *crtc)
3348 {
3349         struct drm_device *dev = crtc->dev;
3350         struct drm_i915_private *dev_priv = dev->dev_private;
3351         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3352         struct intel_encoder *encoder;
3353         int pipe = intel_crtc->pipe;
3354         int plane = intel_crtc->plane;
3355         bool is_pch_port;
3356
3357         WARN_ON(!crtc->enabled);
3358
3359         if (intel_crtc->active)
3360                 return;
3361
3362         intel_crtc->active = true;
3363         intel_update_watermarks(dev);
3364
3365         is_pch_port = haswell_crtc_driving_pch(crtc);
3366
3367         if (is_pch_port)
3368                 dev_priv->display.fdi_link_train(crtc);
3369
3370         for_each_encoder_on_crtc(dev, crtc, encoder)
3371                 if (encoder->pre_enable)
3372                         encoder->pre_enable(encoder);
3373
3374         intel_ddi_enable_pipe_clock(intel_crtc);
3375
3376         /* Enable panel fitting for eDP */
3377         if (dev_priv->pch_pf_size &&
3378             intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
3379                 /* Force use of hard-coded filter coefficients
3380                  * as some pre-programmed values are broken,
3381                  * e.g. x201.
3382                  */
3383                 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3384                                          PF_PIPE_SEL_IVB(pipe));
3385                 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3386                 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3387         }
3388
3389         /*
3390          * On ILK+ LUT must be loaded before the pipe is running but with
3391          * clocks enabled
3392          */
3393         intel_crtc_load_lut(crtc);
3394
3395         intel_ddi_set_pipe_settings(crtc);
3396         intel_ddi_enable_pipe_func(crtc);
3397
3398         intel_enable_pipe(dev_priv, pipe, is_pch_port);
3399         intel_enable_plane(dev_priv, plane, pipe);
3400
3401         if (is_pch_port)
3402                 lpt_pch_enable(crtc);
3403
3404         mutex_lock(&dev->struct_mutex);
3405         intel_update_fbc(dev);
3406         mutex_unlock(&dev->struct_mutex);
3407
3408         intel_crtc_update_cursor(crtc, true);
3409
3410         for_each_encoder_on_crtc(dev, crtc, encoder)
3411                 encoder->enable(encoder);
3412
3413         /*
3414          * There seems to be a race in PCH platform hw (at least on some
3415          * outputs) where an enabled pipe still completes any pageflip right
3416          * away (as if the pipe is off) instead of waiting for vblank. As soon
3417          * as the first vblank happend, everything works as expected. Hence just
3418          * wait for one vblank before returning to avoid strange things
3419          * happening.
3420          */
3421         intel_wait_for_vblank(dev, intel_crtc->pipe);
3422 }
3423
3424 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3425 {
3426         struct drm_device *dev = crtc->dev;
3427         struct drm_i915_private *dev_priv = dev->dev_private;
3428         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3429         struct intel_encoder *encoder;
3430         int pipe = intel_crtc->pipe;
3431         int plane = intel_crtc->plane;
3432         u32 reg, temp;
3433
3434
3435         if (!intel_crtc->active)
3436                 return;
3437
3438         for_each_encoder_on_crtc(dev, crtc, encoder)
3439                 encoder->disable(encoder);
3440
3441         intel_crtc_wait_for_pending_flips(crtc);
3442         drm_vblank_off(dev, pipe);
3443         intel_crtc_update_cursor(crtc, false);
3444
3445         intel_disable_plane(dev_priv, plane, pipe);
3446
3447         if (dev_priv->cfb_plane == plane)
3448                 intel_disable_fbc(dev);
3449
3450         intel_disable_pipe(dev_priv, pipe);
3451
3452         /* Disable PF */
3453         I915_WRITE(PF_CTL(pipe), 0);
3454         I915_WRITE(PF_WIN_SZ(pipe), 0);
3455
3456         for_each_encoder_on_crtc(dev, crtc, encoder)
3457                 if (encoder->post_disable)
3458                         encoder->post_disable(encoder);
3459
3460         ironlake_fdi_disable(crtc);
3461
3462         ironlake_disable_pch_transcoder(dev_priv, pipe);
3463
3464         if (HAS_PCH_CPT(dev)) {
3465                 /* disable TRANS_DP_CTL */
3466                 reg = TRANS_DP_CTL(pipe);
3467                 temp = I915_READ(reg);
3468                 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3469                 temp |= TRANS_DP_PORT_SEL_NONE;
3470                 I915_WRITE(reg, temp);
3471
3472                 /* disable DPLL_SEL */
3473                 temp = I915_READ(PCH_DPLL_SEL);
3474                 switch (pipe) {
3475                 case 0:
3476                         temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3477                         break;
3478                 case 1:
3479                         temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3480                         break;
3481                 case 2:
3482                         /* C shares PLL A or B */
3483                         temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3484                         break;
3485                 default:
3486                         BUG(); /* wtf */
3487                 }
3488                 I915_WRITE(PCH_DPLL_SEL, temp);
3489         }
3490
3491         /* disable PCH DPLL */
3492         intel_disable_pch_pll(intel_crtc);
3493
3494         ironlake_fdi_pll_disable(intel_crtc);
3495
3496         intel_crtc->active = false;
3497         intel_update_watermarks(dev);
3498
3499         mutex_lock(&dev->struct_mutex);
3500         intel_update_fbc(dev);
3501         mutex_unlock(&dev->struct_mutex);
3502 }
3503
3504 static void haswell_crtc_disable(struct drm_crtc *crtc)
3505 {
3506         struct drm_device *dev = crtc->dev;
3507         struct drm_i915_private *dev_priv = dev->dev_private;
3508         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3509         struct intel_encoder *encoder;
3510         int pipe = intel_crtc->pipe;
3511         int plane = intel_crtc->plane;
3512         enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3513         bool is_pch_port;
3514
3515         if (!intel_crtc->active)
3516                 return;
3517
3518         is_pch_port = haswell_crtc_driving_pch(crtc);
3519
3520         for_each_encoder_on_crtc(dev, crtc, encoder)
3521                 encoder->disable(encoder);
3522
3523         intel_crtc_wait_for_pending_flips(crtc);
3524         drm_vblank_off(dev, pipe);
3525         intel_crtc_update_cursor(crtc, false);
3526
3527         intel_disable_plane(dev_priv, plane, pipe);
3528
3529         if (dev_priv->cfb_plane == plane)
3530                 intel_disable_fbc(dev);
3531
3532         intel_disable_pipe(dev_priv, pipe);
3533
3534         intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3535
3536         /* Disable PF */
3537         I915_WRITE(PF_CTL(pipe), 0);
3538         I915_WRITE(PF_WIN_SZ(pipe), 0);
3539
3540         intel_ddi_disable_pipe_clock(intel_crtc);
3541
3542         for_each_encoder_on_crtc(dev, crtc, encoder)
3543                 if (encoder->post_disable)
3544                         encoder->post_disable(encoder);
3545
3546         if (is_pch_port) {
3547                 lpt_disable_pch_transcoder(dev_priv);
3548                 intel_ddi_fdi_disable(crtc);
3549         }
3550
3551         intel_crtc->active = false;
3552         intel_update_watermarks(dev);
3553
3554         mutex_lock(&dev->struct_mutex);
3555         intel_update_fbc(dev);
3556         mutex_unlock(&dev->struct_mutex);
3557 }
3558
3559 static void ironlake_crtc_off(struct drm_crtc *crtc)
3560 {
3561         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3562         intel_put_pch_pll(intel_crtc);
3563 }
3564
3565 static void haswell_crtc_off(struct drm_crtc *crtc)
3566 {
3567         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3568
3569         /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3570          * start using it. */
3571         intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe;
3572
3573         intel_ddi_put_crtc_pll(crtc);
3574 }
3575
3576 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3577 {
3578         if (!enable && intel_crtc->overlay) {
3579                 struct drm_device *dev = intel_crtc->base.dev;
3580                 struct drm_i915_private *dev_priv = dev->dev_private;
3581
3582                 mutex_lock(&dev->struct_mutex);
3583                 dev_priv->mm.interruptible = false;
3584                 (void) intel_overlay_switch_off(intel_crtc->overlay);
3585                 dev_priv->mm.interruptible = true;
3586                 mutex_unlock(&dev->struct_mutex);
3587         }
3588
3589         /* Let userspace switch the overlay on again. In most cases userspace
3590          * has to recompute where to put it anyway.
3591          */
3592 }
3593
3594 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3595 {
3596         struct drm_device *dev = crtc->dev;
3597         struct drm_i915_private *dev_priv = dev->dev_private;
3598         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3599         struct intel_encoder *encoder;
3600         int pipe = intel_crtc->pipe;
3601         int plane = intel_crtc->plane;
3602
3603         WARN_ON(!crtc->enabled);
3604
3605         if (intel_crtc->active)
3606                 return;
3607
3608         intel_crtc->active = true;
3609         intel_update_watermarks(dev);
3610
3611         intel_enable_pll(dev_priv, pipe);
3612         intel_enable_pipe(dev_priv, pipe, false);
3613         intel_enable_plane(dev_priv, plane, pipe);
3614
3615         intel_crtc_load_lut(crtc);
3616         intel_update_fbc(dev);
3617
3618         /* Give the overlay scaler a chance to enable if it's on this pipe */
3619         intel_crtc_dpms_overlay(intel_crtc, true);
3620         intel_crtc_update_cursor(crtc, true);
3621
3622         for_each_encoder_on_crtc(dev, crtc, encoder)
3623                 encoder->enable(encoder);
3624 }
3625
3626 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3627 {
3628         struct drm_device *dev = crtc->dev;
3629         struct drm_i915_private *dev_priv = dev->dev_private;
3630         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3631         struct intel_encoder *encoder;
3632         int pipe = intel_crtc->pipe;
3633         int plane = intel_crtc->plane;
3634
3635
3636         if (!intel_crtc->active)
3637                 return;
3638
3639         for_each_encoder_on_crtc(dev, crtc, encoder)
3640                 encoder->disable(encoder);
3641
3642         /* Give the overlay scaler a chance to disable if it's on this pipe */
3643         intel_crtc_wait_for_pending_flips(crtc);
3644         drm_vblank_off(dev, pipe);
3645         intel_crtc_dpms_overlay(intel_crtc, false);
3646         intel_crtc_update_cursor(crtc, false);
3647
3648         if (dev_priv->cfb_plane == plane)
3649                 intel_disable_fbc(dev);
3650
3651         intel_disable_plane(dev_priv, plane, pipe);
3652         intel_disable_pipe(dev_priv, pipe);
3653         intel_disable_pll(dev_priv, pipe);
3654
3655         intel_crtc->active = false;
3656         intel_update_fbc(dev);
3657         intel_update_watermarks(dev);
3658 }
3659
3660 static void i9xx_crtc_off(struct drm_crtc *crtc)
3661 {
3662 }
3663
3664 static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3665                                     bool enabled)
3666 {
3667         struct drm_device *dev = crtc->dev;
3668         struct drm_i915_master_private *master_priv;
3669         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3670         int pipe = intel_crtc->pipe;
3671
3672         if (!dev->primary->master)
3673                 return;
3674
3675         master_priv = dev->primary->master->driver_priv;
3676         if (!master_priv->sarea_priv)
3677                 return;
3678
3679         switch (pipe) {
3680         case 0:
3681                 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3682                 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3683                 break;
3684         case 1:
3685                 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3686                 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3687                 break;
3688         default:
3689                 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3690                 break;
3691         }
3692 }
3693
3694 /**
3695  * Sets the power management mode of the pipe and plane.
3696  */
3697 void intel_crtc_update_dpms(struct drm_crtc *crtc)
3698 {
3699         struct drm_device *dev = crtc->dev;
3700         struct drm_i915_private *dev_priv = dev->dev_private;
3701         struct intel_encoder *intel_encoder;
3702         bool enable = false;
3703
3704         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3705                 enable |= intel_encoder->connectors_active;
3706
3707         if (enable)
3708                 dev_priv->display.crtc_enable(crtc);
3709         else
3710                 dev_priv->display.crtc_disable(crtc);
3711
3712         intel_crtc_update_sarea(crtc, enable);
3713 }
3714
3715 static void intel_crtc_noop(struct drm_crtc *crtc)
3716 {
3717 }
3718
3719 static void intel_crtc_disable(struct drm_crtc *crtc)
3720 {
3721         struct drm_device *dev = crtc->dev;
3722         struct drm_connector *connector;
3723         struct drm_i915_private *dev_priv = dev->dev_private;
3724         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3725
3726         /* crtc should still be enabled when we disable it. */
3727         WARN_ON(!crtc->enabled);
3728
3729         intel_crtc->eld_vld = false;
3730         dev_priv->display.crtc_disable(crtc);
3731         intel_crtc_update_sarea(crtc, false);
3732         dev_priv->display.off(crtc);
3733
3734         assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3735         assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3736
3737         if (crtc->fb) {
3738                 mutex_lock(&dev->struct_mutex);
3739                 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3740                 mutex_unlock(&dev->struct_mutex);
3741                 crtc->fb = NULL;
3742         }
3743
3744         /* Update computed state. */
3745         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3746                 if (!connector->encoder || !connector->encoder->crtc)
3747                         continue;
3748
3749                 if (connector->encoder->crtc != crtc)
3750                         continue;
3751
3752                 connector->dpms = DRM_MODE_DPMS_OFF;
3753                 to_intel_encoder(connector->encoder)->connectors_active = false;
3754         }
3755 }
3756
3757 void intel_modeset_disable(struct drm_device *dev)
3758 {
3759         struct drm_crtc *crtc;
3760
3761         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3762                 if (crtc->enabled)
3763                         intel_crtc_disable(crtc);
3764         }
3765 }
3766
3767 void intel_encoder_noop(struct drm_encoder *encoder)
3768 {
3769 }
3770
3771 void intel_encoder_destroy(struct drm_encoder *encoder)
3772 {
3773         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3774
3775         drm_encoder_cleanup(encoder);
3776         kfree(intel_encoder);
3777 }
3778
3779 /* Simple dpms helper for encodres with just one connector, no cloning and only
3780  * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3781  * state of the entire output pipe. */
3782 void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
3783 {
3784         if (mode == DRM_MODE_DPMS_ON) {
3785                 encoder->connectors_active = true;
3786
3787                 intel_crtc_update_dpms(encoder->base.crtc);
3788         } else {
3789                 encoder->connectors_active = false;
3790
3791                 intel_crtc_update_dpms(encoder->base.crtc);
3792         }
3793 }
3794
3795 /* Cross check the actual hw state with our own modeset state tracking (and it's
3796  * internal consistency). */
3797 static void intel_connector_check_state(struct intel_connector *connector)
3798 {
3799         if (connector->get_hw_state(connector)) {
3800                 struct intel_encoder *encoder = connector->encoder;
3801                 struct drm_crtc *crtc;
3802                 bool encoder_enabled;
3803                 enum pipe pipe;
3804
3805                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3806                               connector->base.base.id,
3807                               drm_get_connector_name(&connector->base));
3808
3809                 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
3810                      "wrong connector dpms state\n");
3811                 WARN(connector->base.encoder != &encoder->base,
3812                      "active connector not linked to encoder\n");
3813                 WARN(!encoder->connectors_active,
3814                      "encoder->connectors_active not set\n");
3815
3816                 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
3817                 WARN(!encoder_enabled, "encoder not enabled\n");
3818                 if (WARN_ON(!encoder->base.crtc))
3819                         return;
3820
3821                 crtc = encoder->base.crtc;
3822
3823                 WARN(!crtc->enabled, "crtc not enabled\n");
3824                 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
3825                 WARN(pipe != to_intel_crtc(crtc)->pipe,
3826                      "encoder active on the wrong pipe\n");
3827         }
3828 }
3829
3830 /* Even simpler default implementation, if there's really no special case to
3831  * consider. */
3832 void intel_connector_dpms(struct drm_connector *connector, int mode)
3833 {
3834         struct intel_encoder *encoder = intel_attached_encoder(connector);
3835
3836         /* All the simple cases only support two dpms states. */
3837         if (mode != DRM_MODE_DPMS_ON)
3838                 mode = DRM_MODE_DPMS_OFF;
3839
3840         if (mode == connector->dpms)
3841                 return;
3842
3843         connector->dpms = mode;
3844
3845         /* Only need to change hw state when actually enabled */
3846         if (encoder->base.crtc)
3847                 intel_encoder_dpms(encoder, mode);
3848         else
3849                 WARN_ON(encoder->connectors_active != false);
3850
3851         intel_modeset_check_state(connector->dev);
3852 }
3853
3854 /* Simple connector->get_hw_state implementation for encoders that support only
3855  * one connector and no cloning and hence the encoder state determines the state
3856  * of the connector. */
3857 bool intel_connector_get_hw_state(struct intel_connector *connector)
3858 {
3859         enum pipe pipe = 0;
3860         struct intel_encoder *encoder = connector->encoder;
3861
3862         return encoder->get_hw_state(encoder, &pipe);
3863 }
3864
3865 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3866                                   const struct drm_display_mode *mode,
3867                                   struct drm_display_mode *adjusted_mode)
3868 {
3869         struct drm_device *dev = crtc->dev;
3870
3871         if (HAS_PCH_SPLIT(dev)) {
3872                 /* FDI link clock is fixed at 2.7G */
3873                 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3874                         return false;
3875         }
3876
3877         /* All interlaced capable intel hw wants timings in frames. Note though
3878          * that intel_lvds_mode_fixup does some funny tricks with the crtc
3879          * timings, so we need to be careful not to clobber these.*/
3880         if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3881                 drm_mode_set_crtcinfo(adjusted_mode, 0);
3882
3883         /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
3884          * with a hsync front porch of 0.
3885          */
3886         if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
3887                 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
3888                 return false;
3889
3890         return true;
3891 }
3892
3893 static int valleyview_get_display_clock_speed(struct drm_device *dev)
3894 {
3895         return 400000; /* FIXME */
3896 }
3897
3898 static int i945_get_display_clock_speed(struct drm_device *dev)
3899 {
3900         return 400000;
3901 }
3902
3903 static int i915_get_display_clock_speed(struct drm_device *dev)
3904 {
3905         return 333000;
3906 }
3907
3908 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3909 {
3910         return 200000;
3911 }
3912
3913 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3914 {
3915         u16 gcfgc = 0;
3916
3917         pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3918
3919         if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3920                 return 133000;
3921         else {
3922                 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3923                 case GC_DISPLAY_CLOCK_333_MHZ:
3924                         return 333000;
3925                 default:
3926                 case GC_DISPLAY_CLOCK_190_200_MHZ:
3927                         return 190000;
3928                 }
3929         }
3930 }
3931
3932 static int i865_get_display_clock_speed(struct drm_device *dev)
3933 {
3934         return 266000;
3935 }
3936
3937 static int i855_get_display_clock_speed(struct drm_device *dev)
3938 {
3939         u16 hpllcc = 0;
3940         /* Assume that the hardware is in the high speed state.  This
3941          * should be the default.
3942          */
3943         switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3944         case GC_CLOCK_133_200:
3945         case GC_CLOCK_100_200:
3946                 return 200000;
3947         case GC_CLOCK_166_250:
3948                 return 250000;
3949         case GC_CLOCK_100_133:
3950                 return 133000;
3951         }
3952
3953         /* Shouldn't happen */
3954         return 0;
3955 }
3956
3957 static int i830_get_display_clock_speed(struct drm_device *dev)
3958 {
3959         return 133000;
3960 }
3961
3962 static void
3963 intel_reduce_ratio(uint32_t *num, uint32_t *den)
3964 {
3965         while (*num > 0xffffff || *den > 0xffffff) {
3966                 *num >>= 1;
3967                 *den >>= 1;
3968         }
3969 }
3970
3971 void
3972 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
3973                        int pixel_clock, int link_clock,
3974                        struct intel_link_m_n *m_n)
3975 {
3976         m_n->tu = 64;
3977         m_n->gmch_m = bits_per_pixel * pixel_clock;
3978         m_n->gmch_n = link_clock * nlanes * 8;
3979         intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3980         m_n->link_m = pixel_clock;
3981         m_n->link_n = link_clock;
3982         intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
3983 }
3984
3985 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3986 {
3987         if (i915_panel_use_ssc >= 0)
3988                 return i915_panel_use_ssc != 0;
3989         return dev_priv->lvds_use_ssc
3990                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
3991 }
3992
3993 /**
3994  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
3995  * @crtc: CRTC structure
3996  * @mode: requested mode
3997  *
3998  * A pipe may be connected to one or more outputs.  Based on the depth of the
3999  * attached framebuffer, choose a good color depth to use on the pipe.
4000  *
4001  * If possible, match the pipe depth to the fb depth.  In some cases, this
4002  * isn't ideal, because the connected output supports a lesser or restricted
4003  * set of depths.  Resolve that here:
4004  *    LVDS typically supports only 6bpc, so clamp down in that case
4005  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4006  *    Displays may support a restricted set as well, check EDID and clamp as
4007  *      appropriate.
4008  *    DP may want to dither down to 6bpc to fit larger modes
4009  *
4010  * RETURNS:
4011  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4012  * true if they don't match).
4013  */
4014 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4015                                          struct drm_framebuffer *fb,
4016                                          unsigned int *pipe_bpp,
4017                                          struct drm_display_mode *mode)
4018 {
4019         struct drm_device *dev = crtc->dev;
4020         struct drm_i915_private *dev_priv = dev->dev_private;
4021         struct drm_connector *connector;
4022         struct intel_encoder *intel_encoder;
4023         unsigned int display_bpc = UINT_MAX, bpc;
4024
4025         /* Walk the encoders & connectors on this crtc, get min bpc */
4026         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4027
4028                 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
4029                         unsigned int lvds_bpc;
4030
4031                         if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
4032                             LVDS_A3_POWER_UP)
4033                                 lvds_bpc = 8;
4034                         else
4035                                 lvds_bpc = 6;
4036
4037                         if (lvds_bpc < display_bpc) {
4038                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4039                                 display_bpc = lvds_bpc;
4040                         }
4041                         continue;
4042                 }
4043
4044                 /* Not one of the known troublemakers, check the EDID */
4045                 list_for_each_entry(connector, &dev->mode_config.connector_list,
4046                                     head) {
4047                         if (connector->encoder != &intel_encoder->base)
4048                                 continue;
4049
4050                         /* Don't use an invalid EDID bpc value */
4051                         if (connector->display_info.bpc &&
4052                             connector->display_info.bpc < display_bpc) {
4053                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4054                                 display_bpc = connector->display_info.bpc;
4055                         }
4056                 }
4057
4058                 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4059                         /* Use VBT settings if we have an eDP panel */
4060                         unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4061
4062                         if (edp_bpc && edp_bpc < display_bpc) {
4063                                 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4064                                 display_bpc = edp_bpc;
4065                         }
4066                         continue;
4067                 }
4068
4069                 /*
4070                  * HDMI is either 12 or 8, so if the display lets 10bpc sneak
4071                  * through, clamp it down.  (Note: >12bpc will be caught below.)
4072                  */
4073                 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4074                         if (display_bpc > 8 && display_bpc < 12) {
4075                                 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
4076                                 display_bpc = 12;
4077                         } else {
4078                                 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
4079                                 display_bpc = 8;
4080                         }
4081                 }
4082         }
4083
4084         if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4085                 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
4086                 display_bpc = 6;
4087         }
4088
4089         /*
4090          * We could just drive the pipe at the highest bpc all the time and
4091          * enable dithering as needed, but that costs bandwidth.  So choose
4092          * the minimum value that expresses the full color range of the fb but
4093          * also stays within the max display bpc discovered above.
4094          */
4095
4096         switch (fb->depth) {
4097         case 8:
4098                 bpc = 8; /* since we go through a colormap */
4099                 break;
4100         case 15:
4101         case 16:
4102                 bpc = 6; /* min is 18bpp */
4103                 break;
4104         case 24:
4105                 bpc = 8;
4106                 break;
4107         case 30:
4108                 bpc = 10;
4109                 break;
4110         case 48:
4111                 bpc = 12;
4112                 break;
4113         default:
4114                 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4115                 bpc = min((unsigned int)8, display_bpc);
4116                 break;
4117         }
4118
4119         display_bpc = min(display_bpc, bpc);
4120
4121         DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
4122                       bpc, display_bpc);
4123
4124         *pipe_bpp = display_bpc * 3;
4125
4126         return display_bpc != bpc;
4127 }
4128
4129 static int vlv_get_refclk(struct drm_crtc *crtc)
4130 {
4131         struct drm_device *dev = crtc->dev;
4132         struct drm_i915_private *dev_priv = dev->dev_private;
4133         int refclk = 27000; /* for DP & HDMI */
4134
4135         return 100000; /* only one validated so far */
4136
4137         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
4138                 refclk = 96000;
4139         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4140                 if (intel_panel_use_ssc(dev_priv))
4141                         refclk = 100000;
4142                 else
4143                         refclk = 96000;
4144         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4145                 refclk = 100000;
4146         }
4147
4148         return refclk;
4149 }
4150
4151 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4152 {
4153         struct drm_device *dev = crtc->dev;
4154         struct drm_i915_private *dev_priv = dev->dev_private;
4155         int refclk;
4156
4157         if (IS_VALLEYVIEW(dev)) {
4158                 refclk = vlv_get_refclk(crtc);
4159         } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4160             intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4161                 refclk = dev_priv->lvds_ssc_freq * 1000;
4162                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4163                               refclk / 1000);
4164         } else if (!IS_GEN2(dev)) {
4165                 refclk = 96000;
4166         } else {
4167                 refclk = 48000;
4168         }
4169
4170         return refclk;
4171 }
4172
4173 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
4174                                       intel_clock_t *clock)
4175 {
4176         /* SDVO TV has fixed PLL values depend on its clock range,
4177            this mirrors vbios setting. */
4178         if (adjusted_mode->clock >= 100000
4179             && adjusted_mode->clock < 140500) {
4180                 clock->p1 = 2;
4181                 clock->p2 = 10;
4182                 clock->n = 3;
4183                 clock->m1 = 16;
4184                 clock->m2 = 8;
4185         } else if (adjusted_mode->clock >= 140500
4186                    && adjusted_mode->clock <= 200000) {
4187                 clock->p1 = 1;
4188                 clock->p2 = 10;
4189                 clock->n = 6;
4190                 clock->m1 = 12;
4191                 clock->m2 = 8;
4192         }
4193 }
4194
4195 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
4196                                      intel_clock_t *clock,
4197                                      intel_clock_t *reduced_clock)
4198 {
4199         struct drm_device *dev = crtc->dev;
4200         struct drm_i915_private *dev_priv = dev->dev_private;
4201         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4202         int pipe = intel_crtc->pipe;
4203         u32 fp, fp2 = 0;
4204
4205         if (IS_PINEVIEW(dev)) {
4206                 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
4207                 if (reduced_clock)
4208                         fp2 = (1 << reduced_clock->n) << 16 |
4209                                 reduced_clock->m1 << 8 | reduced_clock->m2;
4210         } else {
4211                 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
4212                 if (reduced_clock)
4213                         fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
4214                                 reduced_clock->m2;
4215         }
4216
4217         I915_WRITE(FP0(pipe), fp);
4218
4219         intel_crtc->lowfreq_avail = false;
4220         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4221             reduced_clock && i915_powersave) {
4222                 I915_WRITE(FP1(pipe), fp2);
4223                 intel_crtc->lowfreq_avail = true;
4224         } else {
4225                 I915_WRITE(FP1(pipe), fp);
4226         }
4227 }
4228
4229 static void vlv_update_pll(struct drm_crtc *crtc,
4230                            struct drm_display_mode *mode,
4231                            struct drm_display_mode *adjusted_mode,
4232                            intel_clock_t *clock, intel_clock_t *reduced_clock,
4233                            int num_connectors)
4234 {
4235         struct drm_device *dev = crtc->dev;
4236         struct drm_i915_private *dev_priv = dev->dev_private;
4237         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4238         int pipe = intel_crtc->pipe;
4239         u32 dpll, mdiv, pdiv;
4240         u32 bestn, bestm1, bestm2, bestp1, bestp2;
4241         bool is_sdvo;
4242         u32 temp;
4243
4244         mutex_lock(&dev_priv->dpio_lock);
4245
4246         is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4247                 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4248
4249         dpll = DPLL_VGA_MODE_DIS;
4250         dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
4251         dpll |= DPLL_REFA_CLK_ENABLE_VLV;
4252         dpll |= DPLL_INTEGRATED_CLOCK_VLV;
4253
4254         I915_WRITE(DPLL(pipe), dpll);
4255         POSTING_READ(DPLL(pipe));
4256
4257         bestn = clock->n;
4258         bestm1 = clock->m1;
4259         bestm2 = clock->m2;
4260         bestp1 = clock->p1;
4261         bestp2 = clock->p2;
4262
4263         /*
4264          * In Valleyview PLL and program lane counter registers are exposed
4265          * through DPIO interface
4266          */
4267         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4268         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4269         mdiv |= ((bestn << DPIO_N_SHIFT));
4270         mdiv |= (1 << DPIO_POST_DIV_SHIFT);
4271         mdiv |= (1 << DPIO_K_SHIFT);
4272         mdiv |= DPIO_ENABLE_CALIBRATION;
4273         intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
4274
4275         intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
4276
4277         pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
4278                 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
4279                 (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
4280                 (5 << DPIO_CLK_BIAS_CTL_SHIFT);
4281         intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
4282
4283         intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
4284
4285         dpll |= DPLL_VCO_ENABLE;
4286         I915_WRITE(DPLL(pipe), dpll);
4287         POSTING_READ(DPLL(pipe));
4288         if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4289                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
4290
4291         intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
4292
4293         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4294                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4295
4296         I915_WRITE(DPLL(pipe), dpll);
4297
4298         /* Wait for the clocks to stabilize. */
4299         POSTING_READ(DPLL(pipe));
4300         udelay(150);
4301
4302         temp = 0;
4303         if (is_sdvo) {
4304                 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4305                 if (temp > 1)
4306                         temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4307                 else
4308                         temp = 0;
4309         }
4310         I915_WRITE(DPLL_MD(pipe), temp);
4311         POSTING_READ(DPLL_MD(pipe));
4312
4313         /* Now program lane control registers */
4314         if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
4315                         || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
4316         {
4317                 temp = 0x1000C4;
4318                 if(pipe == 1)
4319                         temp |= (1 << 21);
4320                 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
4321         }
4322         if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
4323         {
4324                 temp = 0x1000C4;
4325                 if(pipe == 1)
4326                         temp |= (1 << 21);
4327                 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
4328         }
4329
4330         mutex_unlock(&dev_priv->dpio_lock);
4331 }
4332
4333 static void i9xx_update_pll(struct drm_crtc *crtc,
4334                             struct drm_display_mode *mode,
4335                             struct drm_display_mode *adjusted_mode,
4336                             intel_clock_t *clock, intel_clock_t *reduced_clock,
4337                             int num_connectors)
4338 {
4339         struct drm_device *dev = crtc->dev;
4340         struct drm_i915_private *dev_priv = dev->dev_private;
4341         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4342         struct intel_encoder *encoder;
4343         int pipe = intel_crtc->pipe;
4344         u32 dpll;
4345         bool is_sdvo;
4346
4347         i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4348
4349         is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4350                 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4351
4352         dpll = DPLL_VGA_MODE_DIS;
4353
4354         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4355                 dpll |= DPLLB_MODE_LVDS;
4356         else
4357                 dpll |= DPLLB_MODE_DAC_SERIAL;
4358         if (is_sdvo) {
4359                 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4360                 if (pixel_multiplier > 1) {
4361                         if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4362                                 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4363                 }
4364                 dpll |= DPLL_DVO_HIGH_SPEED;
4365         }
4366         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4367                 dpll |= DPLL_DVO_HIGH_SPEED;
4368
4369         /* compute bitmask from p1 value */
4370         if (IS_PINEVIEW(dev))
4371                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4372         else {
4373                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4374                 if (IS_G4X(dev) && reduced_clock)
4375                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4376         }
4377         switch (clock->p2) {
4378         case 5:
4379                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4380                 break;
4381         case 7:
4382                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4383                 break;
4384         case 10:
4385                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4386                 break;
4387         case 14:
4388                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4389                 break;
4390         }
4391         if (INTEL_INFO(dev)->gen >= 4)
4392                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4393
4394         if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4395                 dpll |= PLL_REF_INPUT_TVCLKINBC;
4396         else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4397                 /* XXX: just matching BIOS for now */
4398                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
4399                 dpll |= 3;
4400         else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4401                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4402                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4403         else
4404                 dpll |= PLL_REF_INPUT_DREFCLK;
4405
4406         dpll |= DPLL_VCO_ENABLE;
4407         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4408         POSTING_READ(DPLL(pipe));
4409         udelay(150);
4410
4411         for_each_encoder_on_crtc(dev, crtc, encoder)
4412                 if (encoder->pre_pll_enable)
4413                         encoder->pre_pll_enable(encoder);
4414
4415         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4416                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4417
4418         I915_WRITE(DPLL(pipe), dpll);
4419
4420         /* Wait for the clocks to stabilize. */
4421         POSTING_READ(DPLL(pipe));
4422         udelay(150);
4423
4424         if (INTEL_INFO(dev)->gen >= 4) {
4425                 u32 temp = 0;
4426                 if (is_sdvo) {
4427                         temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4428                         if (temp > 1)
4429                                 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4430                         else
4431                                 temp = 0;
4432                 }
4433                 I915_WRITE(DPLL_MD(pipe), temp);
4434         } else {
4435                 /* The pixel multiplier can only be updated once the
4436                  * DPLL is enabled and the clocks are stable.
4437                  *
4438                  * So write it again.
4439                  */
4440                 I915_WRITE(DPLL(pipe), dpll);
4441         }
4442 }
4443
4444 static void i8xx_update_pll(struct drm_crtc *crtc,
4445                             struct drm_display_mode *adjusted_mode,
4446                             intel_clock_t *clock, intel_clock_t *reduced_clock,
4447                             int num_connectors)
4448 {
4449         struct drm_device *dev = crtc->dev;
4450         struct drm_i915_private *dev_priv = dev->dev_private;
4451         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4452         struct intel_encoder *encoder;
4453         int pipe = intel_crtc->pipe;
4454         u32 dpll;
4455
4456         i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4457
4458         dpll = DPLL_VGA_MODE_DIS;
4459
4460         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4461                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4462         } else {
4463                 if (clock->p1 == 2)
4464                         dpll |= PLL_P1_DIVIDE_BY_TWO;
4465                 else
4466                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4467                 if (clock->p2 == 4)
4468                         dpll |= PLL_P2_DIVIDE_BY_4;
4469         }
4470
4471         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4472                 /* XXX: just matching BIOS for now */
4473                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
4474                 dpll |= 3;
4475         else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4476                  intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4477                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4478         else
4479                 dpll |= PLL_REF_INPUT_DREFCLK;
4480
4481         dpll |= DPLL_VCO_ENABLE;
4482         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4483         POSTING_READ(DPLL(pipe));
4484         udelay(150);
4485
4486         for_each_encoder_on_crtc(dev, crtc, encoder)
4487                 if (encoder->pre_pll_enable)
4488                         encoder->pre_pll_enable(encoder);
4489
4490         I915_WRITE(DPLL(pipe), dpll);
4491
4492         /* Wait for the clocks to stabilize. */
4493         POSTING_READ(DPLL(pipe));
4494         udelay(150);
4495
4496         /* The pixel multiplier can only be updated once the
4497          * DPLL is enabled and the clocks are stable.
4498          *
4499          * So write it again.
4500          */
4501         I915_WRITE(DPLL(pipe), dpll);
4502 }
4503
4504 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
4505                                    struct drm_display_mode *mode,
4506                                    struct drm_display_mode *adjusted_mode)
4507 {
4508         struct drm_device *dev = intel_crtc->base.dev;
4509         struct drm_i915_private *dev_priv = dev->dev_private;
4510         enum pipe pipe = intel_crtc->pipe;
4511         enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
4512         uint32_t vsyncshift;
4513
4514         if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4515                 /* the chip adds 2 halflines automatically */
4516                 adjusted_mode->crtc_vtotal -= 1;
4517                 adjusted_mode->crtc_vblank_end -= 1;
4518                 vsyncshift = adjusted_mode->crtc_hsync_start
4519                              - adjusted_mode->crtc_htotal / 2;
4520         } else {
4521                 vsyncshift = 0;
4522         }
4523
4524         if (INTEL_INFO(dev)->gen > 3)
4525                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
4526
4527         I915_WRITE(HTOTAL(cpu_transcoder),
4528                    (adjusted_mode->crtc_hdisplay - 1) |
4529                    ((adjusted_mode->crtc_htotal - 1) << 16));
4530         I915_WRITE(HBLANK(cpu_transcoder),
4531                    (adjusted_mode->crtc_hblank_start - 1) |
4532                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
4533         I915_WRITE(HSYNC(cpu_transcoder),
4534                    (adjusted_mode->crtc_hsync_start - 1) |
4535                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
4536
4537         I915_WRITE(VTOTAL(cpu_transcoder),
4538                    (adjusted_mode->crtc_vdisplay - 1) |
4539                    ((adjusted_mode->crtc_vtotal - 1) << 16));
4540         I915_WRITE(VBLANK(cpu_transcoder),
4541                    (adjusted_mode->crtc_vblank_start - 1) |
4542                    ((adjusted_mode->crtc_vblank_end - 1) << 16));
4543         I915_WRITE(VSYNC(cpu_transcoder),
4544                    (adjusted_mode->crtc_vsync_start - 1) |
4545                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
4546
4547         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4548          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4549          * documented on the DDI_FUNC_CTL register description, EDP Input Select
4550          * bits. */
4551         if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
4552             (pipe == PIPE_B || pipe == PIPE_C))
4553                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
4554
4555         /* pipesrc controls the size that is scaled from, which should
4556          * always be the user's requested size.
4557          */
4558         I915_WRITE(PIPESRC(pipe),
4559                    ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4560 }
4561
4562 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4563                               struct drm_display_mode *mode,
4564                               struct drm_display_mode *adjusted_mode,
4565                               int x, int y,
4566                               struct drm_framebuffer *fb)
4567 {
4568         struct drm_device *dev = crtc->dev;
4569         struct drm_i915_private *dev_priv = dev->dev_private;
4570         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4571         int pipe = intel_crtc->pipe;
4572         int plane = intel_crtc->plane;
4573         int refclk, num_connectors = 0;
4574         intel_clock_t clock, reduced_clock;
4575         u32 dspcntr, pipeconf;
4576         bool ok, has_reduced_clock = false, is_sdvo = false;
4577         bool is_lvds = false, is_tv = false, is_dp = false;
4578         struct intel_encoder *encoder;
4579         const intel_limit_t *limit;
4580         int ret;
4581
4582         for_each_encoder_on_crtc(dev, crtc, encoder) {
4583                 switch (encoder->type) {
4584                 case INTEL_OUTPUT_LVDS:
4585                         is_lvds = true;
4586                         break;
4587                 case INTEL_OUTPUT_SDVO:
4588                 case INTEL_OUTPUT_HDMI:
4589                         is_sdvo = true;
4590                         if (encoder->needs_tv_clock)
4591                                 is_tv = true;
4592                         break;
4593                 case INTEL_OUTPUT_TVOUT:
4594                         is_tv = true;
4595                         break;
4596                 case INTEL_OUTPUT_DISPLAYPORT:
4597                         is_dp = true;
4598                         break;
4599                 }
4600
4601                 num_connectors++;
4602         }
4603
4604         refclk = i9xx_get_refclk(crtc, num_connectors);
4605
4606         /*
4607          * Returns a set of divisors for the desired target clock with the given
4608          * refclk, or FALSE.  The returned values represent the clock equation:
4609          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4610          */
4611         limit = intel_limit(crtc, refclk);
4612         ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4613                              &clock);
4614         if (!ok) {
4615                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4616                 return -EINVAL;
4617         }
4618
4619         /* Ensure that the cursor is valid for the new mode before changing... */
4620         intel_crtc_update_cursor(crtc, true);
4621
4622         if (is_lvds && dev_priv->lvds_downclock_avail) {
4623                 /*
4624                  * Ensure we match the reduced clock's P to the target clock.
4625                  * If the clocks don't match, we can't switch the display clock
4626                  * by using the FP0/FP1. In such case we will disable the LVDS
4627                  * downclock feature.
4628                 */
4629                 has_reduced_clock = limit->find_pll(limit, crtc,
4630                                                     dev_priv->lvds_downclock,
4631                                                     refclk,
4632                                                     &clock,
4633                                                     &reduced_clock);
4634         }
4635
4636         if (is_sdvo && is_tv)
4637                 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
4638
4639         if (IS_GEN2(dev))
4640                 i8xx_update_pll(crtc, adjusted_mode, &clock,
4641                                 has_reduced_clock ? &reduced_clock : NULL,
4642                                 num_connectors);
4643         else if (IS_VALLEYVIEW(dev))
4644                 vlv_update_pll(crtc, mode, adjusted_mode, &clock,
4645                                 has_reduced_clock ? &reduced_clock : NULL,
4646                                 num_connectors);
4647         else
4648                 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4649                                 has_reduced_clock ? &reduced_clock : NULL,
4650                                 num_connectors);
4651
4652         /* setup pipeconf */
4653         pipeconf = I915_READ(PIPECONF(pipe));
4654
4655         /* Set up the display plane register */
4656         dspcntr = DISPPLANE_GAMMA_ENABLE;
4657
4658         if (pipe == 0)
4659                 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4660         else
4661                 dspcntr |= DISPPLANE_SEL_PIPE_B;
4662
4663         if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
4664                 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4665                  * core speed.
4666                  *
4667                  * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4668                  * pipe == 0 check?
4669                  */
4670                 if (mode->clock >
4671                     dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
4672                         pipeconf |= PIPECONF_DOUBLE_WIDE;
4673                 else
4674                         pipeconf &= ~PIPECONF_DOUBLE_WIDE;
4675         }
4676
4677         /* default to 8bpc */
4678         pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
4679         if (is_dp) {
4680                 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4681                         pipeconf |= PIPECONF_6BPC |
4682                                     PIPECONF_DITHER_EN |
4683                                     PIPECONF_DITHER_TYPE_SP;
4684                 }
4685         }
4686
4687         if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4688                 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4689                         pipeconf |= PIPECONF_6BPC |
4690                                         PIPECONF_ENABLE |
4691                                         I965_PIPECONF_ACTIVE;
4692                 }
4693         }
4694
4695         DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4696         drm_mode_debug_printmodeline(mode);
4697
4698         if (HAS_PIPE_CXSR(dev)) {
4699                 if (intel_crtc->lowfreq_avail) {
4700                         DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4701                         pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4702                 } else {
4703                         DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4704                         pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4705                 }
4706         }
4707
4708         pipeconf &= ~PIPECONF_INTERLACE_MASK;
4709         if (!IS_GEN2(dev) &&
4710             adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
4711                 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4712         else
4713                 pipeconf |= PIPECONF_PROGRESSIVE;
4714
4715         intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
4716
4717         /* pipesrc and dspsize control the size that is scaled from,
4718          * which should always be the user's requested size.
4719          */
4720         I915_WRITE(DSPSIZE(plane),
4721                    ((mode->vdisplay - 1) << 16) |
4722                    (mode->hdisplay - 1));
4723         I915_WRITE(DSPPOS(plane), 0);
4724
4725         I915_WRITE(PIPECONF(pipe), pipeconf);
4726         POSTING_READ(PIPECONF(pipe));
4727         intel_enable_pipe(dev_priv, pipe, false);
4728
4729         intel_wait_for_vblank(dev, pipe);
4730
4731         I915_WRITE(DSPCNTR(plane), dspcntr);
4732         POSTING_READ(DSPCNTR(plane));
4733
4734         ret = intel_pipe_set_base(crtc, x, y, fb);
4735
4736         intel_update_watermarks(dev);
4737
4738         return ret;
4739 }
4740
4741 static void ironlake_init_pch_refclk(struct drm_device *dev)
4742 {
4743         struct drm_i915_private *dev_priv = dev->dev_private;
4744         struct drm_mode_config *mode_config = &dev->mode_config;
4745         struct intel_encoder *encoder;
4746         u32 temp;
4747         bool has_lvds = false;
4748         bool has_cpu_edp = false;
4749         bool has_pch_edp = false;
4750         bool has_panel = false;
4751         bool has_ck505 = false;
4752         bool can_ssc = false;
4753
4754         /* We need to take the global config into account */
4755         list_for_each_entry(encoder, &mode_config->encoder_list,
4756                             base.head) {
4757                 switch (encoder->type) {
4758                 case INTEL_OUTPUT_LVDS:
4759                         has_panel = true;
4760                         has_lvds = true;
4761                         break;
4762                 case INTEL_OUTPUT_EDP:
4763                         has_panel = true;
4764                         if (intel_encoder_is_pch_edp(&encoder->base))
4765                                 has_pch_edp = true;
4766                         else
4767                                 has_cpu_edp = true;
4768                         break;
4769                 }
4770         }
4771
4772         if (HAS_PCH_IBX(dev)) {
4773                 has_ck505 = dev_priv->display_clock_mode;
4774                 can_ssc = has_ck505;
4775         } else {
4776                 has_ck505 = false;
4777                 can_ssc = true;
4778         }
4779
4780         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
4781                       has_panel, has_lvds, has_pch_edp, has_cpu_edp,
4782                       has_ck505);
4783
4784         /* Ironlake: try to setup display ref clock before DPLL
4785          * enabling. This is only under driver's control after
4786          * PCH B stepping, previous chipset stepping should be
4787          * ignoring this setting.
4788          */
4789         temp = I915_READ(PCH_DREF_CONTROL);
4790         /* Always enable nonspread source */
4791         temp &= ~DREF_NONSPREAD_SOURCE_MASK;
4792
4793         if (has_ck505)
4794                 temp |= DREF_NONSPREAD_CK505_ENABLE;
4795         else
4796                 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
4797
4798         if (has_panel) {
4799                 temp &= ~DREF_SSC_SOURCE_MASK;
4800                 temp |= DREF_SSC_SOURCE_ENABLE;
4801
4802                 /* SSC must be turned on before enabling the CPU output  */
4803                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4804                         DRM_DEBUG_KMS("Using SSC on panel\n");
4805                         temp |= DREF_SSC1_ENABLE;
4806                 } else
4807                         temp &= ~DREF_SSC1_ENABLE;
4808
4809                 /* Get SSC going before enabling the outputs */
4810                 I915_WRITE(PCH_DREF_CONTROL, temp);
4811                 POSTING_READ(PCH_DREF_CONTROL);
4812                 udelay(200);
4813
4814                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4815
4816                 /* Enable CPU source on CPU attached eDP */
4817                 if (has_cpu_edp) {
4818                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4819                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
4820                                 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4821                         }
4822                         else
4823                                 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4824                 } else
4825                         temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4826
4827                 I915_WRITE(PCH_DREF_CONTROL, temp);
4828                 POSTING_READ(PCH_DREF_CONTROL);
4829                 udelay(200);
4830         } else {
4831                 DRM_DEBUG_KMS("Disabling SSC entirely\n");
4832
4833                 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4834
4835                 /* Turn off CPU output */
4836                 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4837
4838                 I915_WRITE(PCH_DREF_CONTROL, temp);
4839                 POSTING_READ(PCH_DREF_CONTROL);
4840                 udelay(200);
4841
4842                 /* Turn off the SSC source */
4843                 temp &= ~DREF_SSC_SOURCE_MASK;
4844                 temp |= DREF_SSC_SOURCE_DISABLE;
4845
4846                 /* Turn off SSC1 */
4847                 temp &= ~ DREF_SSC1_ENABLE;
4848
4849                 I915_WRITE(PCH_DREF_CONTROL, temp);
4850                 POSTING_READ(PCH_DREF_CONTROL);
4851                 udelay(200);
4852         }
4853 }
4854
4855 /* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
4856 static void lpt_init_pch_refclk(struct drm_device *dev)
4857 {
4858         struct drm_i915_private *dev_priv = dev->dev_private;
4859         struct drm_mode_config *mode_config = &dev->mode_config;
4860         struct intel_encoder *encoder;
4861         bool has_vga = false;
4862         bool is_sdv = false;
4863         u32 tmp;
4864
4865         list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4866                 switch (encoder->type) {
4867                 case INTEL_OUTPUT_ANALOG:
4868                         has_vga = true;
4869                         break;
4870                 }
4871         }
4872
4873         if (!has_vga)
4874                 return;
4875
4876         mutex_lock(&dev_priv->dpio_lock);
4877
4878         /* XXX: Rip out SDV support once Haswell ships for real. */
4879         if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
4880                 is_sdv = true;
4881
4882         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4883         tmp &= ~SBI_SSCCTL_DISABLE;
4884         tmp |= SBI_SSCCTL_PATHALT;
4885         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4886
4887         udelay(24);
4888
4889         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4890         tmp &= ~SBI_SSCCTL_PATHALT;
4891         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4892
4893         if (!is_sdv) {
4894                 tmp = I915_READ(SOUTH_CHICKEN2);
4895                 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
4896                 I915_WRITE(SOUTH_CHICKEN2, tmp);
4897
4898                 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
4899                                        FDI_MPHY_IOSFSB_RESET_STATUS, 100))
4900                         DRM_ERROR("FDI mPHY reset assert timeout\n");
4901
4902                 tmp = I915_READ(SOUTH_CHICKEN2);
4903                 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
4904                 I915_WRITE(SOUTH_CHICKEN2, tmp);
4905
4906                 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
4907                                         FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
4908                                        100))
4909                         DRM_ERROR("FDI mPHY reset de-assert timeout\n");
4910         }
4911
4912         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
4913         tmp &= ~(0xFF << 24);
4914         tmp |= (0x12 << 24);
4915         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
4916
4917         if (!is_sdv) {
4918                 tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
4919                 tmp &= ~(0x3 << 6);
4920                 tmp |= (1 << 6) | (1 << 0);
4921                 intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
4922         }
4923
4924         if (is_sdv) {
4925                 tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
4926                 tmp |= 0x7FFF;
4927                 intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
4928         }
4929
4930         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
4931         tmp |= (1 << 11);
4932         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
4933
4934         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
4935         tmp |= (1 << 11);
4936         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
4937
4938         if (is_sdv) {
4939                 tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
4940                 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
4941                 intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
4942
4943                 tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
4944                 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
4945                 intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
4946
4947                 tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
4948                 tmp |= (0x3F << 8);
4949                 intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
4950
4951                 tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
4952                 tmp |= (0x3F << 8);
4953                 intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
4954         }
4955
4956         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
4957         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
4958         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
4959
4960         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
4961         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
4962         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
4963
4964         if (!is_sdv) {
4965                 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
4966                 tmp &= ~(7 << 13);
4967                 tmp |= (5 << 13);
4968                 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
4969
4970                 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
4971                 tmp &= ~(7 << 13);
4972                 tmp |= (5 << 13);
4973                 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
4974         }
4975
4976         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
4977         tmp &= ~0xFF;
4978         tmp |= 0x1C;
4979         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
4980
4981         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
4982         tmp &= ~0xFF;
4983         tmp |= 0x1C;
4984         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
4985
4986         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
4987         tmp &= ~(0xFF << 16);
4988         tmp |= (0x1C << 16);
4989         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
4990
4991         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
4992         tmp &= ~(0xFF << 16);
4993         tmp |= (0x1C << 16);
4994         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
4995
4996         if (!is_sdv) {
4997                 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
4998                 tmp |= (1 << 27);
4999                 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5000
5001                 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5002                 tmp |= (1 << 27);
5003                 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5004
5005                 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5006                 tmp &= ~(0xF << 28);
5007                 tmp |= (4 << 28);
5008                 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5009
5010                 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5011                 tmp &= ~(0xF << 28);
5012                 tmp |= (4 << 28);
5013                 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5014         }
5015
5016         /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
5017         tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
5018         tmp |= SBI_DBUFF0_ENABLE;
5019         intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
5020
5021         mutex_unlock(&dev_priv->dpio_lock);
5022 }
5023
5024 /*
5025  * Initialize reference clocks when the driver loads
5026  */
5027 void intel_init_pch_refclk(struct drm_device *dev)
5028 {
5029         if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5030                 ironlake_init_pch_refclk(dev);
5031         else if (HAS_PCH_LPT(dev))
5032                 lpt_init_pch_refclk(dev);
5033 }
5034
5035 static int ironlake_get_refclk(struct drm_crtc *crtc)
5036 {
5037         struct drm_device *dev = crtc->dev;
5038         struct drm_i915_private *dev_priv = dev->dev_private;
5039         struct intel_encoder *encoder;
5040         struct intel_encoder *edp_encoder = NULL;
5041         int num_connectors = 0;
5042         bool is_lvds = false;
5043
5044         for_each_encoder_on_crtc(dev, crtc, encoder) {
5045                 switch (encoder->type) {
5046                 case INTEL_OUTPUT_LVDS:
5047                         is_lvds = true;
5048                         break;
5049                 case INTEL_OUTPUT_EDP:
5050                         edp_encoder = encoder;
5051                         break;
5052                 }
5053                 num_connectors++;
5054         }
5055
5056         if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5057                 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5058                               dev_priv->lvds_ssc_freq);
5059                 return dev_priv->lvds_ssc_freq * 1000;
5060         }
5061
5062         return 120000;
5063 }
5064
5065 static void ironlake_set_pipeconf(struct drm_crtc *crtc,
5066                                   struct drm_display_mode *adjusted_mode,
5067                                   bool dither)
5068 {
5069         struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5070         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5071         int pipe = intel_crtc->pipe;
5072         uint32_t val;
5073
5074         val = I915_READ(PIPECONF(pipe));
5075
5076         val &= ~PIPECONF_BPC_MASK;
5077         switch (intel_crtc->bpp) {
5078         case 18:
5079                 val |= PIPECONF_6BPC;
5080                 break;
5081         case 24:
5082                 val |= PIPECONF_8BPC;
5083                 break;
5084         case 30:
5085                 val |= PIPECONF_10BPC;
5086                 break;
5087         case 36:
5088                 val |= PIPECONF_12BPC;
5089                 break;
5090         default:
5091                 /* Case prevented by intel_choose_pipe_bpp_dither. */
5092                 BUG();
5093         }
5094
5095         val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
5096         if (dither)
5097                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5098
5099         val &= ~PIPECONF_INTERLACE_MASK;
5100         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5101                 val |= PIPECONF_INTERLACED_ILK;
5102         else
5103                 val |= PIPECONF_PROGRESSIVE;
5104
5105         if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
5106                 val |= PIPECONF_COLOR_RANGE_SELECT;
5107         else
5108                 val &= ~PIPECONF_COLOR_RANGE_SELECT;
5109
5110         I915_WRITE(PIPECONF(pipe), val);
5111         POSTING_READ(PIPECONF(pipe));
5112 }
5113
5114 static void haswell_set_pipeconf(struct drm_crtc *crtc,
5115                                  struct drm_display_mode *adjusted_mode,
5116                                  bool dither)
5117 {
5118         struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5119         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5120         enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
5121         uint32_t val;
5122
5123         val = I915_READ(PIPECONF(cpu_transcoder));
5124
5125         val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
5126         if (dither)
5127                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5128
5129         val &= ~PIPECONF_INTERLACE_MASK_HSW;
5130         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5131                 val |= PIPECONF_INTERLACED_ILK;
5132         else
5133                 val |= PIPECONF_PROGRESSIVE;
5134
5135         I915_WRITE(PIPECONF(cpu_transcoder), val);
5136         POSTING_READ(PIPECONF(cpu_transcoder));
5137 }
5138
5139 static bool ironlake_compute_clocks(struct drm_crtc *crtc,
5140                                     struct drm_display_mode *adjusted_mode,
5141                                     intel_clock_t *clock,
5142                                     bool *has_reduced_clock,
5143                                     intel_clock_t *reduced_clock)
5144 {
5145         struct drm_device *dev = crtc->dev;
5146         struct drm_i915_private *dev_priv = dev->dev_private;
5147         struct intel_encoder *intel_encoder;
5148         int refclk;
5149         const intel_limit_t *limit;
5150         bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
5151
5152         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5153                 switch (intel_encoder->type) {
5154                 case INTEL_OUTPUT_LVDS:
5155                         is_lvds = true;
5156                         break;
5157                 case INTEL_OUTPUT_SDVO:
5158                 case INTEL_OUTPUT_HDMI:
5159                         is_sdvo = true;
5160                         if (intel_encoder->needs_tv_clock)
5161                                 is_tv = true;
5162                         break;
5163                 case INTEL_OUTPUT_TVOUT:
5164                         is_tv = true;
5165                         break;
5166                 }
5167         }
5168
5169         refclk = ironlake_get_refclk(crtc);
5170
5171         /*
5172          * Returns a set of divisors for the desired target clock with the given
5173          * refclk, or FALSE.  The returned values represent the clock equation:
5174          * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5175          */
5176         limit = intel_limit(crtc, refclk);
5177         ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5178                               clock);
5179         if (!ret)
5180                 return false;
5181
5182         if (is_lvds && dev_priv->lvds_downclock_avail) {
5183                 /*
5184                  * Ensure we match the reduced clock's P to the target clock.
5185                  * If the clocks don't match, we can't switch the display clock
5186                  * by using the FP0/FP1. In such case we will disable the LVDS
5187                  * downclock feature.
5188                 */
5189                 *has_reduced_clock = limit->find_pll(limit, crtc,
5190                                                      dev_priv->lvds_downclock,
5191                                                      refclk,
5192                                                      clock,
5193                                                      reduced_clock);
5194         }
5195
5196         if (is_sdvo && is_tv)
5197                 i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
5198
5199         return true;
5200 }
5201
5202 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
5203 {
5204         struct drm_i915_private *dev_priv = dev->dev_private;
5205         uint32_t temp;
5206
5207         temp = I915_READ(SOUTH_CHICKEN1);
5208         if (temp & FDI_BC_BIFURCATION_SELECT)
5209                 return;
5210
5211         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5212         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5213
5214         temp |= FDI_BC_BIFURCATION_SELECT;
5215         DRM_DEBUG_KMS("enabling fdi C rx\n");
5216         I915_WRITE(SOUTH_CHICKEN1, temp);
5217         POSTING_READ(SOUTH_CHICKEN1);
5218 }
5219
5220 static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
5221 {
5222         struct drm_device *dev = intel_crtc->base.dev;
5223         struct drm_i915_private *dev_priv = dev->dev_private;
5224         struct intel_crtc *pipe_B_crtc =
5225                 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5226
5227         DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
5228                       intel_crtc->pipe, intel_crtc->fdi_lanes);
5229         if (intel_crtc->fdi_lanes > 4) {
5230                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
5231                               intel_crtc->pipe, intel_crtc->fdi_lanes);
5232                 /* Clamp lanes to avoid programming the hw with bogus values. */
5233                 intel_crtc->fdi_lanes = 4;
5234
5235                 return false;
5236         }
5237
5238         if (dev_priv->num_pipe == 2)
5239                 return true;
5240
5241         switch (intel_crtc->pipe) {
5242         case PIPE_A:
5243                 return true;
5244         case PIPE_B:
5245                 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5246                     intel_crtc->fdi_lanes > 2) {
5247                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5248                                       intel_crtc->pipe, intel_crtc->fdi_lanes);
5249                         /* Clamp lanes to avoid programming the hw with bogus values. */
5250                         intel_crtc->fdi_lanes = 2;
5251
5252                         return false;
5253                 }
5254
5255                 if (intel_crtc->fdi_lanes > 2)
5256                         WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
5257                 else
5258                         cpt_enable_fdi_bc_bifurcation(dev);
5259
5260                 return true;
5261         case PIPE_C:
5262                 if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
5263                         if (intel_crtc->fdi_lanes > 2) {
5264                                 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5265                                               intel_crtc->pipe, intel_crtc->fdi_lanes);
5266                                 /* Clamp lanes to avoid programming the hw with bogus values. */
5267                                 intel_crtc->fdi_lanes = 2;
5268
5269                                 return false;
5270                         }
5271                 } else {
5272                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5273                         return false;
5274                 }
5275
5276                 cpt_enable_fdi_bc_bifurcation(dev);
5277
5278                 return true;
5279         default:
5280                 BUG();
5281         }
5282 }
5283
5284 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5285 {
5286         /*
5287          * Account for spread spectrum to avoid
5288          * oversubscribing the link. Max center spread
5289          * is 2.5%; use 5% for safety's sake.
5290          */
5291         u32 bps = target_clock * bpp * 21 / 20;
5292         return bps / (link_bw * 8) + 1;
5293 }
5294
5295 static void ironlake_set_m_n(struct drm_crtc *crtc,
5296                              struct drm_display_mode *mode,
5297                              struct drm_display_mode *adjusted_mode)
5298 {
5299         struct drm_device *dev = crtc->dev;
5300         struct drm_i915_private *dev_priv = dev->dev_private;
5301         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5302         enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
5303         struct intel_encoder *intel_encoder, *edp_encoder = NULL;
5304         struct intel_link_m_n m_n = {0};
5305         int target_clock, pixel_multiplier, lane, link_bw;
5306         bool is_dp = false, is_cpu_edp = false;
5307
5308         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5309                 switch (intel_encoder->type) {
5310                 case INTEL_OUTPUT_DISPLAYPORT:
5311                         is_dp = true;
5312                         break;
5313                 case INTEL_OUTPUT_EDP:
5314                         is_dp = true;
5315                         if (!intel_encoder_is_pch_edp(&intel_encoder->base))
5316                                 is_cpu_edp = true;
5317                         edp_encoder = intel_encoder;
5318                         break;
5319                 }
5320         }
5321
5322         /* FDI link */
5323         pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5324         lane = 0;
5325         /* CPU eDP doesn't require FDI link, so just set DP M/N
5326            according to current link config */
5327         if (is_cpu_edp) {
5328                 intel_edp_link_config(edp_encoder, &lane, &link_bw);
5329         } else {
5330                 /* FDI is a binary signal running at ~2.7GHz, encoding
5331                  * each output octet as 10 bits. The actual frequency
5332                  * is stored as a divider into a 100MHz clock, and the
5333                  * mode pixel clock is stored in units of 1KHz.
5334                  * Hence the bw of each lane in terms of the mode signal
5335                  * is:
5336                  */
5337                 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5338         }
5339
5340         /* [e]DP over FDI requires target mode clock instead of link clock. */
5341         if (edp_encoder)
5342                 target_clock = intel_edp_target_clock(edp_encoder, mode);
5343         else if (is_dp)
5344                 target_clock = mode->clock;
5345         else
5346                 target_clock = adjusted_mode->clock;
5347
5348         if (!lane)
5349                 lane = ironlake_get_lanes_required(target_clock, link_bw,
5350                                                    intel_crtc->bpp);
5351
5352         intel_crtc->fdi_lanes = lane;
5353
5354         if (pixel_multiplier > 1)
5355                 link_bw *= pixel_multiplier;
5356         intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n);
5357
5358         I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
5359         I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
5360         I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
5361         I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
5362 }
5363
5364 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5365                                       struct drm_display_mode *adjusted_mode,
5366                                       intel_clock_t *clock, u32 fp)
5367 {
5368         struct drm_crtc *crtc = &intel_crtc->base;
5369         struct drm_device *dev = crtc->dev;
5370         struct drm_i915_private *dev_priv = dev->dev_private;
5371         struct intel_encoder *intel_encoder;
5372         uint32_t dpll;
5373         int factor, pixel_multiplier, num_connectors = 0;
5374         bool is_lvds = false, is_sdvo = false, is_tv = false;
5375         bool is_dp = false, is_cpu_edp = false;
5376
5377         for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5378                 switch (intel_encoder->type) {
5379                 case INTEL_OUTPUT_LVDS:
5380                         is_lvds = true;
5381                         break;
5382                 case INTEL_OUTPUT_SDVO:
5383                 case INTEL_OUTPUT_HDMI:
5384                         is_sdvo = true;
5385                         if (intel_encoder->needs_tv_clock)
5386                                 is_tv = true;
5387                         break;
5388                 case INTEL_OUTPUT_TVOUT:
5389                         is_tv = true;
5390                         break;
5391                 case INTEL_OUTPUT_DISPLAYPORT:
5392                         is_dp = true;
5393                         break;
5394                 case INTEL_OUTPUT_EDP:
5395                         is_dp = true;
5396                         if (!intel_encoder_is_pch_edp(&intel_encoder->base))
5397                                 is_cpu_edp = true;
5398                         break;
5399                 }
5400
5401                 num_connectors++;
5402         }
5403
5404         /* Enable autotuning of the PLL clock (if permissible) */
5405         factor = 21;
5406         if (is_lvds) {
5407                 if ((intel_panel_use_ssc(dev_priv) &&
5408                      dev_priv->lvds_ssc_freq == 100) ||
5409                     intel_is_dual_link_lvds(dev))
5410                         factor = 25;
5411         } else if (is_sdvo && is_tv)
5412                 factor = 20;
5413
5414         if (clock->m < factor * clock->n)
5415                 fp |= FP_CB_TUNE;
5416
5417         dpll = 0;
5418
5419         if (is_lvds)
5420                 dpll |= DPLLB_MODE_LVDS;
5421         else
5422                 dpll |= DPLLB_MODE_DAC_SERIAL;
5423         if (is_sdvo) {
5424                 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5425                 if (pixel_multiplier > 1) {
5426                         dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5427                 }
5428                 dpll |= DPLL_DVO_HIGH_SPEED;
5429         }
5430         if (is_dp && !is_cpu_edp)
5431                 dpll |= DPLL_DVO_HIGH_SPEED;
5432
5433         /* compute bitmask from p1 value */
5434         dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5435         /* also FPA1 */
5436         dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5437
5438         switch (clock->p2) {
5439         case 5:
5440                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5441                 break;
5442         case 7:
5443                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5444                 break;
5445         case 10:
5446                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5447                 break;
5448         case 14:
5449                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5450                 break;
5451         }
5452
5453         if (is_sdvo && is_tv)
5454                 dpll |= PLL_REF_INPUT_TVCLKINBC;
5455         else if (is_tv)
5456                 /* XXX: just matching BIOS for now */
5457                 /*      dpll |= PLL_REF_INPUT_TVCLKINBC; */
5458                 dpll |= 3;
5459         else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5460                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5461         else
5462                 dpll |= PLL_REF_INPUT_DREFCLK;
5463
5464         return dpll;
5465 }
5466
5467 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5468                                   struct drm_display_mode *mode,
5469                                   struct drm_display_mode *adjusted_mode,
5470                                   int x, int y,
5471                                   struct drm_framebuffer *fb)
5472 {
5473         struct drm_device *dev = crtc->dev;
5474         struct drm_i915_private *dev_priv = dev->dev_private;
5475         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5476         int pipe = intel_crtc->pipe;
5477         int plane = intel_crtc->plane;
5478         int num_connectors = 0;
5479         intel_clock_t clock, reduced_clock;
5480         u32 dpll, fp = 0, fp2 = 0;
5481         bool ok, has_reduced_clock = false;
5482         bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5483         struct intel_encoder *encoder;
5484         int ret;
5485         bool dither, fdi_config_ok;
5486
5487         for_each_encoder_on_crtc(dev, crtc, encoder) {
5488                 switch (encoder->type) {
5489                 case INTEL_OUTPUT_LVDS:
5490                         is_lvds = true;
5491                         break;
5492                 case INTEL_OUTPUT_DISPLAYPORT:
5493                         is_dp = true;
5494                         break;
5495                 case INTEL_OUTPUT_EDP:
5496                         is_dp = true;
5497                         if (!intel_encoder_is_pch_edp(&encoder->base))
5498                                 is_cpu_edp = true;
5499                         break;
5500                 }
5501
5502                 num_connectors++;
5503         }
5504
5505         WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5506              "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
5507
5508         ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5509                                      &has_reduced_clock, &reduced_clock);
5510         if (!ok) {
5511                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5512                 return -EINVAL;
5513         }
5514
5515         /* Ensure that the cursor is valid for the new mode before changing... */
5516         intel_crtc_update_cursor(crtc, true);
5517
5518         /* determine panel color depth */
5519         dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5520                                               adjusted_mode);
5521         if (is_lvds && dev_priv->lvds_dither)
5522                 dither = true;
5523
5524         fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5525         if (has_reduced_clock)
5526                 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5527                         reduced_clock.m2;
5528
5529         dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
5530
5531         DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5532         drm_mode_debug_printmodeline(mode);
5533
5534         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5535         if (!is_cpu_edp) {
5536                 struct intel_pch_pll *pll;
5537
5538                 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5539                 if (pll == NULL) {
5540                         DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5541                                          pipe);
5542                         return -EINVAL;
5543                 }
5544         } else
5545                 intel_put_pch_pll(intel_crtc);
5546
5547         if (is_dp && !is_cpu_edp)
5548                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5549
5550         for_each_encoder_on_crtc(dev, crtc, encoder)
5551                 if (encoder->pre_pll_enable)
5552                         encoder->pre_pll_enable(encoder);
5553
5554         if (intel_crtc->pch_pll) {
5555                 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5556
5557                 /* Wait for the clocks to stabilize. */
5558                 POSTING_READ(intel_crtc->pch_pll->pll_reg);
5559                 udelay(150);
5560
5561                 /* The pixel multiplier can only be updated once the
5562                  * DPLL is enabled and the clocks are stable.
5563                  *
5564                  * So write it again.
5565                  */
5566                 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5567         }
5568
5569         intel_crtc->lowfreq_avail = false;
5570         if (intel_crtc->pch_pll) {
5571                 if (is_lvds && has_reduced_clock && i915_powersave) {
5572                         I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
5573                         intel_crtc->lowfreq_avail = true;
5574                 } else {
5575                         I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
5576                 }
5577         }
5578
5579         intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5580
5581         /* Note, this also computes intel_crtc->fdi_lanes which is used below in
5582          * ironlake_check_fdi_lanes. */
5583         ironlake_set_m_n(crtc, mode, adjusted_mode);
5584
5585         fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
5586
5587         ironlake_set_pipeconf(crtc, adjusted_mode, dither);
5588
5589         intel_wait_for_vblank(dev, pipe);
5590
5591         /* Set up the display plane register */
5592         I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
5593         POSTING_READ(DSPCNTR(plane));
5594
5595         ret = intel_pipe_set_base(crtc, x, y, fb);
5596
5597         intel_update_watermarks(dev);
5598
5599         intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5600
5601         return fdi_config_ok ? ret : -EINVAL;
5602 }
5603
5604 static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5605                                  struct drm_display_mode *mode,
5606                                  struct drm_display_mode *adjusted_mode,
5607                                  int x, int y,
5608                                  struct drm_framebuffer *fb)
5609 {
5610         struct drm_device *dev = crtc->dev;
5611         struct drm_i915_private *dev_priv = dev->dev_private;
5612         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5613         int pipe = intel_crtc->pipe;
5614         int plane = intel_crtc->plane;
5615         int num_connectors = 0;
5616         bool is_dp = false, is_cpu_edp = false;
5617         struct intel_encoder *encoder;
5618         int ret;
5619         bool dither;
5620
5621         for_each_encoder_on_crtc(dev, crtc, encoder) {
5622                 switch (encoder->type) {
5623                 case INTEL_OUTPUT_DISPLAYPORT:
5624                         is_dp = true;
5625                         break;
5626                 case INTEL_OUTPUT_EDP:
5627                         is_dp = true;
5628                         if (!intel_encoder_is_pch_edp(&encoder->base))
5629                                 is_cpu_edp = true;
5630                         break;
5631                 }
5632
5633                 num_connectors++;
5634         }
5635
5636         if (is_cpu_edp)
5637                 intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5638         else
5639                 intel_crtc->cpu_transcoder = pipe;
5640
5641         /* We are not sure yet this won't happen. */
5642         WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5643              INTEL_PCH_TYPE(dev));
5644
5645         WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
5646              num_connectors, pipe_name(pipe));
5647
5648         WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
5649                 (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
5650
5651         WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
5652
5653         if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
5654                 return -EINVAL;
5655
5656         /* Ensure that the cursor is valid for the new mode before changing... */
5657         intel_crtc_update_cursor(crtc, true);
5658
5659         /* determine panel color depth */
5660         dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5661                                               adjusted_mode);
5662
5663         DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5664         drm_mode_debug_printmodeline(mode);
5665
5666         if (is_dp && !is_cpu_edp)
5667                 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5668
5669         intel_crtc->lowfreq_avail = false;
5670
5671         intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5672
5673         if (!is_dp || is_cpu_edp)
5674                 ironlake_set_m_n(crtc, mode, adjusted_mode);
5675
5676         haswell_set_pipeconf(crtc, adjusted_mode, dither);
5677
5678         /* Set up the display plane register */
5679         I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
5680         POSTING_READ(DSPCNTR(plane));
5681
5682         ret = intel_pipe_set_base(crtc, x, y, fb);
5683
5684         intel_update_watermarks(dev);
5685
5686         intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5687
5688         return ret;
5689 }
5690
5691 static int intel_crtc_mode_set(struct drm_crtc *crtc,
5692                                struct drm_display_mode *mode,
5693                                struct drm_display_mode *adjusted_mode,
5694                                int x, int y,
5695                                struct drm_framebuffer *fb)
5696 {
5697         struct drm_device *dev = crtc->dev;
5698         struct drm_i915_private *dev_priv = dev->dev_private;
5699         struct drm_encoder_helper_funcs *encoder_funcs;
5700         struct intel_encoder *encoder;
5701         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5702         int pipe = intel_crtc->pipe;
5703         int ret;
5704
5705         drm_vblank_pre_modeset(dev, pipe);
5706
5707         ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
5708                                               x, y, fb);
5709         drm_vblank_post_modeset(dev, pipe);
5710
5711         if (ret != 0)
5712                 return ret;
5713
5714         for_each_encoder_on_crtc(dev, crtc, encoder) {
5715                 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
5716                         encoder->base.base.id,
5717                         drm_get_encoder_name(&encoder->base),
5718                         mode->base.id, mode->name);
5719                 encoder_funcs = encoder->base.helper_private;
5720                 encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
5721         }
5722
5723         return 0;
5724 }
5725
5726 static bool intel_eld_uptodate(struct drm_connector *connector,
5727                                int reg_eldv, uint32_t bits_eldv,
5728                                int reg_elda, uint32_t bits_elda,
5729                                int reg_edid)
5730 {
5731         struct drm_i915_private *dev_priv = connector->dev->dev_private;
5732         uint8_t *eld = connector->eld;
5733         uint32_t i;
5734
5735         i = I915_READ(reg_eldv);
5736         i &= bits_eldv;
5737
5738         if (!eld[0])
5739                 return !i;
5740
5741         if (!i)
5742                 return false;
5743
5744         i = I915_READ(reg_elda);
5745         i &= ~bits_elda;
5746         I915_WRITE(reg_elda, i);
5747
5748         for (i = 0; i < eld[2]; i++)
5749                 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
5750                         return false;
5751
5752         return true;
5753 }
5754
5755 static void g4x_write_eld(struct drm_connector *connector,
5756                           struct drm_crtc *crtc)
5757 {
5758         struct drm_i915_private *dev_priv = connector->dev->dev_private;
5759         uint8_t *eld = connector->eld;
5760         uint32_t eldv;
5761         uint32_t len;
5762         uint32_t i;
5763
5764         i = I915_READ(G4X_AUD_VID_DID);
5765
5766         if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
5767                 eldv = G4X_ELDV_DEVCL_DEVBLC;
5768         else
5769                 eldv = G4X_ELDV_DEVCTG;
5770
5771         if (intel_eld_uptodate(connector,
5772                                G4X_AUD_CNTL_ST, eldv,
5773                                G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
5774                                G4X_HDMIW_HDMIEDID))
5775                 return;
5776
5777         i = I915_READ(G4X_AUD_CNTL_ST);
5778         i &= ~(eldv | G4X_ELD_ADDR);
5779         len = (i >> 9) & 0x1f;          /* ELD buffer size */
5780         I915_WRITE(G4X_AUD_CNTL_ST, i);
5781
5782         if (!eld[0])
5783                 return;
5784
5785         len = min_t(uint8_t, eld[2], len);
5786         DRM_DEBUG_DRIVER("ELD size %d\n", len);
5787         for (i = 0; i < len; i++)
5788                 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
5789
5790         i = I915_READ(G4X_AUD_CNTL_ST);
5791         i |= eldv;
5792         I915_WRITE(G4X_AUD_CNTL_ST, i);
5793 }
5794
5795 static void haswell_write_eld(struct drm_connector *connector,
5796                                      struct drm_crtc *crtc)
5797 {
5798         struct drm_i915_private *dev_priv = connector->dev->dev_private;
5799         uint8_t *eld = connector->eld;
5800         struct drm_device *dev = crtc->dev;
5801         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5802         uint32_t eldv;
5803         uint32_t i;
5804         int len;
5805         int pipe = to_intel_crtc(crtc)->pipe;
5806         int tmp;
5807
5808         int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
5809         int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
5810         int aud_config = HSW_AUD_CFG(pipe);
5811         int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
5812
5813
5814         DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
5815
5816         /* Audio output enable */
5817         DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
5818         tmp = I915_READ(aud_cntrl_st2);
5819         tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
5820         I915_WRITE(aud_cntrl_st2, tmp);
5821
5822         /* Wait for 1 vertical blank */
5823         intel_wait_for_vblank(dev, pipe);
5824
5825         /* Set ELD valid state */
5826         tmp = I915_READ(aud_cntrl_st2);
5827         DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
5828         tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
5829         I915_WRITE(aud_cntrl_st2, tmp);
5830         tmp = I915_READ(aud_cntrl_st2);
5831         DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
5832
5833         /* Enable HDMI mode */
5834         tmp = I915_READ(aud_config);
5835         DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
5836         /* clear N_programing_enable and N_value_index */
5837         tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
5838         I915_WRITE(aud_config, tmp);
5839
5840         DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
5841
5842         eldv = AUDIO_ELD_VALID_A << (pipe * 4);
5843         intel_crtc->eld_vld = true;
5844
5845         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
5846                 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
5847                 eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
5848                 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
5849         } else
5850                 I915_WRITE(aud_config, 0);
5851
5852         if (intel_eld_uptodate(connector,
5853                                aud_cntrl_st2, eldv,
5854                                aud_cntl_st, IBX_ELD_ADDRESS,
5855                                hdmiw_hdmiedid))
5856                 return;
5857
5858         i = I915_READ(aud_cntrl_st2);
5859         i &= ~eldv;
5860         I915_WRITE(aud_cntrl_st2, i);
5861
5862         if (!eld[0])
5863                 return;
5864
5865         i = I915_READ(aud_cntl_st);
5866         i &= ~IBX_ELD_ADDRESS;
5867         I915_WRITE(aud_cntl_st, i);
5868         i = (i >> 29) & DIP_PORT_SEL_MASK;              /* DIP_Port_Select, 0x1 = PortB */
5869         DRM_DEBUG_DRIVER("port num:%d\n", i);
5870
5871         len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
5872         DRM_DEBUG_DRIVER("ELD size %d\n", len);
5873         for (i = 0; i < len; i++)
5874                 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
5875
5876         i = I915_READ(aud_cntrl_st2);
5877         i |= eldv;
5878         I915_WRITE(aud_cntrl_st2, i);
5879
5880 }
5881
5882 static void ironlake_write_eld(struct drm_connector *connector,
5883                                      struct drm_crtc *crtc)
5884 {
5885         struct drm_i915_private *dev_priv = connector->dev->dev_private;
5886         uint8_t *eld = connector->eld;
5887         uint32_t eldv;
5888         uint32_t i;
5889         int len;
5890         int hdmiw_hdmiedid;
5891         int aud_config;
5892         int aud_cntl_st;
5893         int aud_cntrl_st2;
5894         int pipe = to_intel_crtc(crtc)->pipe;
5895
5896         if (HAS_PCH_IBX(connector->dev)) {
5897                 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
5898                 aud_config = IBX_AUD_CFG(pipe);
5899                 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
5900                 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
5901         } else {
5902                 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
5903                 aud_config = CPT_AUD_CFG(pipe);
5904                 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
5905                 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
5906         }
5907
5908         DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
5909
5910         i = I915_READ(aud_cntl_st);
5911         i = (i >> 29) & DIP_PORT_SEL_MASK;              /* DIP_Port_Select, 0x1 = PortB */
5912         if (!i) {
5913                 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
5914                 /* operate blindly on all ports */
5915                 eldv = IBX_ELD_VALIDB;
5916                 eldv |= IBX_ELD_VALIDB << 4;
5917                 eldv |= IBX_ELD_VALIDB << 8;
5918         } else {
5919                 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
5920                 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
5921         }
5922
5923         if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
5924                 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
5925                 eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
5926                 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
5927         } else
5928                 I915_WRITE(aud_config, 0);
5929
5930         if (intel_eld_uptodate(connector,
5931                                aud_cntrl_st2, eldv,
5932                                aud_cntl_st, IBX_ELD_ADDRESS,
5933                                hdmiw_hdmiedid))
5934                 return;
5935
5936         i = I915_READ(aud_cntrl_st2);
5937         i &= ~eldv;
5938         I915_WRITE(aud_cntrl_st2, i);
5939
5940         if (!eld[0])
5941                 return;
5942
5943         i = I915_READ(aud_cntl_st);
5944         i &= ~IBX_ELD_ADDRESS;
5945         I915_WRITE(aud_cntl_st, i);
5946
5947         len = min_t(uint8_t, eld[2], 21);       /* 84 bytes of hw ELD buffer */
5948         DRM_DEBUG_DRIVER("ELD size %d\n", len);
5949         for (i = 0; i < len; i++)
5950                 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
5951
5952         i = I915_READ(aud_cntrl_st2);
5953         i |= eldv;
5954         I915_WRITE(aud_cntrl_st2, i);
5955 }
5956
5957 void intel_write_eld(struct drm_encoder *encoder,
5958                      struct drm_display_mode *mode)
5959 {
5960         struct drm_crtc *crtc = encoder->crtc;
5961         struct drm_connector *connector;
5962         struct drm_device *dev = encoder->dev;
5963         struct drm_i915_private *dev_priv = dev->dev_private;
5964
5965         connector = drm_select_eld(encoder, mode);
5966         if (!connector)
5967                 return;
5968
5969         DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5970                          connector->base.id,
5971                          drm_get_connector_name(connector),
5972                          connector->encoder->base.id,
5973                          drm_get_encoder_name(connector->encoder));
5974
5975         connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
5976
5977         if (dev_priv->display.write_eld)
5978                 dev_priv->display.write_eld(connector, crtc);
5979 }
5980
5981 /** Loads the palette/gamma unit for the CRTC with the prepared values */
5982 void intel_crtc_load_lut(struct drm_crtc *crtc)
5983 {
5984         struct drm_device *dev = crtc->dev;
5985         struct drm_i915_private *dev_priv = dev->dev_private;
5986         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5987         int palreg = PALETTE(intel_crtc->pipe);
5988         int i;
5989
5990         /* The clocks have to be on to load the palette. */
5991         if (!crtc->enabled || !intel_crtc->active)
5992                 return;
5993
5994         /* use legacy palette for Ironlake */
5995         if (HAS_PCH_SPLIT(dev))
5996                 palreg = LGC_PALETTE(intel_crtc->pipe);
5997
5998         for (i = 0; i < 256; i++) {
5999                 I915_WRITE(palreg + 4 * i,
6000                            (intel_crtc->lut_r[i] << 16) |
6001                            (intel_crtc->lut_g[i] << 8) |
6002                            intel_crtc->lut_b[i]);
6003         }
6004 }
6005
6006 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6007 {
6008         struct drm_device *dev = crtc->dev;
6009         struct drm_i915_private *dev_priv = dev->dev_private;
6010         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6011         bool visible = base != 0;
6012         u32 cntl;
6013
6014         if (intel_crtc->cursor_visible == visible)
6015                 return;
6016
6017         cntl = I915_READ(_CURACNTR);
6018         if (visible) {
6019                 /* On these chipsets we can only modify the base whilst
6020                  * the cursor is disabled.
6021                  */
6022                 I915_WRITE(_CURABASE, base);
6023
6024                 cntl &= ~(CURSOR_FORMAT_MASK);
6025                 /* XXX width must be 64, stride 256 => 0x00 << 28 */
6026                 cntl |= CURSOR_ENABLE |
6027                         CURSOR_GAMMA_ENABLE |
6028                         CURSOR_FORMAT_ARGB;
6029         } else
6030                 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
6031         I915_WRITE(_CURACNTR, cntl);
6032
6033         intel_crtc->cursor_visible = visible;
6034 }
6035
6036 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6037 {
6038         struct drm_device *dev = crtc->dev;
6039         struct drm_i915_private *dev_priv = dev->dev_private;
6040         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6041         int pipe = intel_crtc->pipe;
6042         bool visible = base != 0;
6043
6044         if (intel_crtc->cursor_visible != visible) {
6045                 uint32_t cntl = I915_READ(CURCNTR(pipe));
6046                 if (base) {
6047                         cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6048                         cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6049                         cntl |= pipe << 28; /* Connect to correct pipe */
6050                 } else {
6051                         cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6052                         cntl |= CURSOR_MODE_DISABLE;
6053                 }
6054                 I915_WRITE(CURCNTR(pipe), cntl);
6055
6056                 intel_crtc->cursor_visible = visible;
6057         }
6058         /* and commit changes on next vblank */
6059         I915_WRITE(CURBASE(pipe), base);
6060 }
6061
6062 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6063 {
6064         struct drm_device *dev = crtc->dev;
6065         struct drm_i915_private *dev_priv = dev->dev_private;
6066         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6067         int pipe = intel_crtc->pipe;
6068         bool visible = base != 0;
6069
6070         if (intel_crtc->cursor_visible != visible) {
6071                 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
6072                 if (base) {
6073                         cntl &= ~CURSOR_MODE;
6074                         cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6075                 } else {
6076                         cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6077                         cntl |= CURSOR_MODE_DISABLE;
6078                 }
6079                 I915_WRITE(CURCNTR_IVB(pipe), cntl);
6080
6081                 intel_crtc->cursor_visible = visible;
6082         }
6083         /* and commit changes on next vblank */
6084         I915_WRITE(CURBASE_IVB(pipe), base);
6085 }
6086
6087 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6088 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6089                                      bool on)
6090 {
6091         struct drm_device *dev = crtc->dev;
6092         struct drm_i915_private *dev_priv = dev->dev_private;
6093         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6094         int pipe = intel_crtc->pipe;
6095         int x = intel_crtc->cursor_x;
6096         int y = intel_crtc->cursor_y;
6097         u32 base, pos;
6098         bool visible;
6099
6100         pos = 0;
6101
6102         if (on && crtc->enabled && crtc->fb) {
6103                 base = intel_crtc->cursor_addr;
6104                 if (x > (int) crtc->fb->width)
6105                         base = 0;
6106
6107                 if (y > (int) crtc->fb->height)
6108                         base = 0;
6109         } else
6110                 base = 0;
6111
6112         if (x < 0) {
6113                 if (x + intel_crtc->cursor_width < 0)
6114                         base = 0;
6115
6116                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6117                 x = -x;
6118         }
6119         pos |= x << CURSOR_X_SHIFT;
6120
6121         if (y < 0) {
6122                 if (y + intel_crtc->cursor_height < 0)
6123                         base = 0;
6124
6125                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6126                 y = -y;
6127         }
6128         pos |= y << CURSOR_Y_SHIFT;
6129
6130         visible = base != 0;
6131         if (!visible && !intel_crtc->cursor_visible)
6132                 return;
6133
6134         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
6135                 I915_WRITE(CURPOS_IVB(pipe), pos);
6136                 ivb_update_cursor(crtc, base);
6137         } else {
6138                 I915_WRITE(CURPOS(pipe), pos);
6139                 if (IS_845G(dev) || IS_I865G(dev))
6140                         i845_update_cursor(crtc, base);
6141                 else
6142                         i9xx_update_cursor(crtc, base);
6143         }
6144 }
6145
6146 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6147                                  struct drm_file *file,
6148                                  uint32_t handle,
6149                                  uint32_t width, uint32_t height)
6150 {
6151         struct drm_device *dev = crtc->dev;
6152         struct drm_i915_private *dev_priv = dev->dev_private;
6153         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6154         struct drm_i915_gem_object *obj;
6155         uint32_t addr;
6156         int ret;
6157
6158         /* if we want to turn off the cursor ignore width and height */
6159         if (!handle) {
6160                 DRM_DEBUG_KMS("cursor off\n");
6161                 addr = 0;
6162                 obj = NULL;
6163                 mutex_lock(&dev->struct_mutex);
6164                 goto finish;
6165         }
6166
6167         /* Currently we only support 64x64 cursors */
6168         if (width != 64 || height != 64) {
6169                 DRM_ERROR("we currently only support 64x64 cursors\n");
6170                 return -EINVAL;
6171         }
6172
6173         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
6174         if (&obj->base == NULL)
6175                 return -ENOENT;
6176
6177         if (obj->base.size < width * height * 4) {
6178                 DRM_ERROR("buffer is to small\n");
6179                 ret = -ENOMEM;
6180                 goto fail;
6181         }
6182
6183         /* we only need to pin inside GTT if cursor is non-phy */
6184         mutex_lock(&dev->struct_mutex);
6185         if (!dev_priv->info->cursor_needs_physical) {
6186                 if (obj->tiling_mode) {
6187                         DRM_ERROR("cursor cannot be tiled\n");
6188                         ret = -EINVAL;
6189                         goto fail_locked;
6190                 }
6191
6192                 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
6193                 if (ret) {
6194                         DRM_ERROR("failed to move cursor bo into the GTT\n");
6195                         goto fail_locked;
6196                 }
6197
6198                 ret = i915_gem_object_put_fence(obj);
6199                 if (ret) {
6200                         DRM_ERROR("failed to release fence for cursor");
6201                         goto fail_unpin;
6202                 }
6203
6204                 addr = obj->gtt_offset;
6205         } else {
6206                 int align = IS_I830(dev) ? 16 * 1024 : 256;
6207                 ret = i915_gem_attach_phys_object(dev, obj,
6208                                                   (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6209                                                   align);
6210                 if (ret) {
6211                         DRM_ERROR("failed to attach phys object\n");
6212                         goto fail_locked;
6213                 }
6214                 addr = obj->phys_obj->handle->busaddr;
6215         }
6216
6217         if (IS_GEN2(dev))
6218                 I915_WRITE(CURSIZE, (height << 12) | width);
6219
6220  finish:
6221         if (intel_crtc->cursor_bo) {
6222                 if (dev_priv->info->cursor_needs_physical) {
6223                         if (intel_crtc->cursor_bo != obj)
6224                                 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6225                 } else
6226                         i915_gem_object_unpin(intel_crtc->cursor_bo);
6227                 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6228         }
6229
6230         mutex_unlock(&dev->struct_mutex);
6231
6232         intel_crtc->cursor_addr = addr;
6233         intel_crtc->cursor_bo = obj;
6234         intel_crtc->cursor_width = width;
6235         intel_crtc->cursor_height = height;
6236
6237         intel_crtc_update_cursor(crtc, true);
6238
6239         return 0;
6240 fail_unpin:
6241         i915_gem_object_unpin(obj);
6242 fail_locked:
6243         mutex_unlock(&dev->struct_mutex);
6244 fail:
6245         drm_gem_object_unreference_unlocked(&obj->base);
6246         return ret;
6247 }
6248
6249 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6250 {
6251         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6252
6253         intel_crtc->cursor_x = x;
6254         intel_crtc->cursor_y = y;
6255
6256         intel_crtc_update_cursor(crtc, true);
6257
6258         return 0;
6259 }
6260
6261 /** Sets the color ramps on behalf of RandR */
6262 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6263                                  u16 blue, int regno)
6264 {
6265         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6266
6267         intel_crtc->lut_r[regno] = red >> 8;
6268         intel_crtc->lut_g[regno] = green >> 8;
6269         intel_crtc->lut_b[regno] = blue >> 8;
6270 }
6271
6272 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6273                              u16 *blue, int regno)
6274 {
6275         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6276
6277         *red = intel_crtc->lut_r[regno] << 8;
6278         *green = intel_crtc->lut_g[regno] << 8;
6279         *blue = intel_crtc->lut_b[regno] << 8;
6280 }
6281
6282 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6283                                  u16 *blue, uint32_t start, uint32_t size)
6284 {
6285         int end = (start + size > 256) ? 256 : start + size, i;
6286         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6287
6288         for (i = start; i < end; i++) {
6289                 intel_crtc->lut_r[i] = red[i] >> 8;
6290                 intel_crtc->lut_g[i] = green[i] >> 8;
6291                 intel_crtc->lut_b[i] = blue[i] >> 8;
6292         }
6293
6294         intel_crtc_load_lut(crtc);
6295 }
6296
6297 /**
6298  * Get a pipe with a simple mode set on it for doing load-based monitor
6299  * detection.
6300  *
6301  * It will be up to the load-detect code to adjust the pipe as appropriate for
6302  * its requirements.  The pipe will be connected to no other encoders.
6303  *
6304  * Currently this code will only succeed if there is a pipe with no encoders
6305  * configured for it.  In the future, it could choose to temporarily disable
6306  * some outputs to free up a pipe for its use.
6307  *
6308  * \return crtc, or NULL if no pipes are available.
6309  */
6310
6311 /* VESA 640x480x72Hz mode to set on the pipe */
6312 static struct drm_display_mode load_detect_mode = {
6313         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6314                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6315 };
6316
6317 static struct drm_framebuffer *
6318 intel_framebuffer_create(struct drm_device *dev,
6319                          struct drm_mode_fb_cmd2 *mode_cmd,
6320                          struct drm_i915_gem_object *obj)
6321 {
6322         struct intel_framebuffer *intel_fb;
6323         int ret;
6324
6325         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6326         if (!intel_fb) {
6327                 drm_gem_object_unreference_unlocked(&obj->base);
6328                 return ERR_PTR(-ENOMEM);
6329         }
6330
6331         ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6332         if (ret) {
6333                 drm_gem_object_unreference_unlocked(&obj->base);
6334                 kfree(intel_fb);
6335                 return ERR_PTR(ret);
6336         }
6337
6338         return &intel_fb->base;
6339 }
6340
6341 static u32
6342 intel_framebuffer_pitch_for_width(int width, int bpp)
6343 {
6344         u32 pitch = DIV_ROUND_UP(width * bpp, 8);
6345         return ALIGN(pitch, 64);
6346 }
6347
6348 static u32
6349 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6350 {
6351         u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6352         return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
6353 }
6354
6355 static struct drm_framebuffer *
6356 intel_framebuffer_create_for_mode(struct drm_device *dev,
6357                                   struct drm_display_mode *mode,
6358                                   int depth, int bpp)
6359 {
6360         struct drm_i915_gem_object *obj;
6361         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
6362
6363         obj = i915_gem_alloc_object(dev,
6364                                     intel_framebuffer_size_for_mode(mode, bpp));
6365         if (obj == NULL)
6366                 return ERR_PTR(-ENOMEM);
6367
6368         mode_cmd.width = mode->hdisplay;
6369         mode_cmd.height = mode->vdisplay;
6370         mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
6371                                                                 bpp);
6372         mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
6373
6374         return intel_framebuffer_create(dev, &mode_cmd, obj);
6375 }
6376
6377 static struct drm_framebuffer *
6378 mode_fits_in_fbdev(struct drm_device *dev,
6379                    struct drm_display_mode *mode)
6380 {
6381         struct drm_i915_private *dev_priv = dev->dev_private;
6382         struct drm_i915_gem_object *obj;
6383         struct drm_framebuffer *fb;
6384
6385         if (dev_priv->fbdev == NULL)
6386                 return NULL;
6387
6388         obj = dev_priv->fbdev->ifb.obj;
6389         if (obj == NULL)
6390                 return NULL;
6391
6392         fb = &dev_priv->fbdev->ifb.base;
6393         if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
6394                                                                fb->bits_per_pixel))
6395                 return NULL;
6396
6397         if (obj->base.size < mode->vdisplay * fb->pitches[0])
6398                 return NULL;
6399
6400         return fb;
6401 }
6402
6403 bool intel_get_load_detect_pipe(struct drm_connector *connector,
6404                                 struct drm_display_mode *mode,
6405                                 struct intel_load_detect_pipe *old)
6406 {
6407         struct intel_crtc *intel_crtc;
6408         struct intel_encoder *intel_encoder =
6409                 intel_attached_encoder(connector);
6410         struct drm_crtc *possible_crtc;
6411         struct drm_encoder *encoder = &intel_encoder->base;
6412         struct drm_crtc *crtc = NULL;
6413         struct drm_device *dev = encoder->dev;
6414         struct drm_framebuffer *fb;
6415         int i = -1;
6416
6417         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6418                       connector->base.id, drm_get_connector_name(connector),
6419                       encoder->base.id, drm_get_encoder_name(encoder));
6420
6421         /*
6422          * Algorithm gets a little messy:
6423          *
6424          *   - if the connector already has an assigned crtc, use it (but make
6425          *     sure it's on first)
6426          *
6427          *   - try to find the first unused crtc that can drive this connector,
6428          *     and use that if we find one
6429          */
6430
6431         /* See if we already have a CRTC for this connector */
6432         if (encoder->crtc) {
6433                 crtc = encoder->crtc;
6434
6435                 old->dpms_mode = connector->dpms;
6436                 old->load_detect_temp = false;
6437
6438                 /* Make sure the crtc and connector are running */
6439                 if (connector->dpms != DRM_MODE_DPMS_ON)
6440                         connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
6441
6442                 return true;
6443         }
6444
6445         /* Find an unused one (if possible) */
6446         list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6447                 i++;
6448                 if (!(encoder->possible_crtcs & (1 << i)))
6449                         continue;
6450                 if (!possible_crtc->enabled) {
6451                         crtc = possible_crtc;
6452                         break;
6453                 }
6454         }
6455
6456         /*
6457          * If we didn't find an unused CRTC, don't use any.
6458          */
6459         if (!crtc) {
6460                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
6461                 return false;
6462         }
6463
6464         intel_encoder->new_crtc = to_intel_crtc(crtc);
6465         to_intel_connector(connector)->new_encoder = intel_encoder;
6466
6467         intel_crtc = to_intel_crtc(crtc);
6468         old->dpms_mode = connector->dpms;
6469         old->load_detect_temp = true;
6470         old->release_fb = NULL;
6471
6472         if (!mode)
6473                 mode = &load_detect_mode;
6474
6475         /* We need a framebuffer large enough to accommodate all accesses
6476          * that the plane may generate whilst we perform load detection.
6477          * We can not rely on the fbcon either being present (we get called
6478          * during its initialisation to detect all boot displays, or it may
6479          * not even exist) or that it is large enough to satisfy the
6480          * requested mode.
6481          */
6482         fb = mode_fits_in_fbdev(dev, mode);
6483         if (fb == NULL) {
6484                 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6485                 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
6486                 old->release_fb = fb;
6487         } else
6488                 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6489         if (IS_ERR(fb)) {
6490                 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6491                 return false;
6492         }
6493
6494         if (intel_set_mode(crtc, mode, 0, 0, fb)) {
6495                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6496                 if (old->release_fb)
6497                         old->release_fb->funcs->destroy(old->release_fb);
6498                 return false;
6499         }
6500
6501         /* let the connector get through one full cycle before testing */
6502         intel_wait_for_vblank(dev, intel_crtc->pipe);
6503         return true;
6504 }
6505
6506 void intel_release_load_detect_pipe(struct drm_connector *connector,
6507                                     struct intel_load_detect_pipe *old)
6508 {
6509         struct intel_encoder *intel_encoder =
6510                 intel_attached_encoder(connector);
6511         struct drm_encoder *encoder = &intel_encoder->base;
6512
6513         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6514                       connector->base.id, drm_get_connector_name(connector),
6515                       encoder->base.id, drm_get_encoder_name(encoder));
6516
6517         if (old->load_detect_temp) {
6518                 struct drm_crtc *crtc = encoder->crtc;
6519
6520                 to_intel_connector(connector)->new_encoder = NULL;
6521                 intel_encoder->new_crtc = NULL;
6522                 intel_set_mode(crtc, NULL, 0, 0, NULL);
6523
6524                 if (old->release_fb)
6525                         old->release_fb->funcs->destroy(old->release_fb);
6526
6527                 return;
6528         }
6529
6530         /* Switch crtc and encoder back off if necessary */
6531         if (old->dpms_mode != DRM_MODE_DPMS_ON)
6532                 connector->funcs->dpms(connector, old->dpms_mode);
6533 }
6534
6535 /* Returns the clock of the currently programmed mode of the given pipe. */
6536 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6537 {
6538         struct drm_i915_private *dev_priv = dev->dev_private;
6539         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6540         int pipe = intel_crtc->pipe;
6541         u32 dpll = I915_READ(DPLL(pipe));
6542         u32 fp;
6543         intel_clock_t clock;
6544
6545         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6546                 fp = I915_READ(FP0(pipe));
6547         else
6548                 fp = I915_READ(FP1(pipe));
6549
6550         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6551         if (IS_PINEVIEW(dev)) {
6552                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6553                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6554         } else {
6555                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6556                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6557         }
6558
6559         if (!IS_GEN2(dev)) {
6560                 if (IS_PINEVIEW(dev))
6561                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6562                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6563                 else
6564                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6565                                DPLL_FPA01_P1_POST_DIV_SHIFT);
6566
6567                 switch (dpll & DPLL_MODE_MASK) {
6568                 case DPLLB_MODE_DAC_SERIAL:
6569                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6570                                 5 : 10;
6571                         break;
6572                 case DPLLB_MODE_LVDS:
6573                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6574                                 7 : 14;
6575                         break;
6576                 default:
6577                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6578                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
6579                         return 0;
6580                 }
6581
6582                 /* XXX: Handle the 100Mhz refclk */
6583                 intel_clock(dev, 96000, &clock);
6584         } else {
6585                 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
6586
6587                 if (is_lvds) {
6588                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6589                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
6590                         clock.p2 = 14;
6591
6592                         if ((dpll & PLL_REF_INPUT_MASK) ==
6593                             PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6594                                 /* XXX: might not be 66MHz */
6595                                 intel_clock(dev, 66000, &clock);
6596                         } else
6597                                 intel_clock(dev, 48000, &clock);
6598                 } else {
6599                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
6600                                 clock.p1 = 2;
6601                         else {
6602                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6603                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6604                         }
6605                         if (dpll & PLL_P2_DIVIDE_BY_4)
6606                                 clock.p2 = 4;
6607                         else
6608                                 clock.p2 = 2;
6609
6610                         intel_clock(dev, 48000, &clock);
6611                 }
6612         }
6613
6614         /* XXX: It would be nice to validate the clocks, but we can't reuse
6615          * i830PllIsValid() because it relies on the xf86_config connector
6616          * configuration being accurate, which it isn't necessarily.
6617          */
6618
6619         return clock.dot;
6620 }
6621
6622 /** Returns the currently programmed mode of the given pipe. */
6623 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6624                                              struct drm_crtc *crtc)
6625 {
6626         struct drm_i915_private *dev_priv = dev->dev_private;
6627         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6628         enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
6629         struct drm_display_mode *mode;
6630         int htot = I915_READ(HTOTAL(cpu_transcoder));
6631         int hsync = I915_READ(HSYNC(cpu_transcoder));
6632         int vtot = I915_READ(VTOTAL(cpu_transcoder));
6633         int vsync = I915_READ(VSYNC(cpu_transcoder));
6634
6635         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6636         if (!mode)
6637                 return NULL;
6638
6639         mode->clock = intel_crtc_clock_get(dev, crtc);
6640         mode->hdisplay = (htot & 0xffff) + 1;
6641         mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6642         mode->hsync_start = (hsync & 0xffff) + 1;
6643         mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6644         mode->vdisplay = (vtot & 0xffff) + 1;
6645         mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6646         mode->vsync_start = (vsync & 0xffff) + 1;
6647         mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6648
6649         drm_mode_set_name(mode);
6650
6651         return mode;
6652 }
6653
6654 static void intel_increase_pllclock(struct drm_crtc *crtc)
6655 {
6656         struct drm_device *dev = crtc->dev;
6657         drm_i915_private_t *dev_priv = dev->dev_private;
6658         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6659         int pipe = intel_crtc->pipe;
6660         int dpll_reg = DPLL(pipe);
6661         int dpll;
6662
6663         if (HAS_PCH_SPLIT(dev))
6664                 return;
6665
6666         if (!dev_priv->lvds_downclock_avail)
6667                 return;
6668
6669         dpll = I915_READ(dpll_reg);
6670         if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
6671                 DRM_DEBUG_DRIVER("upclocking LVDS\n");
6672
6673                 assert_panel_unlocked(dev_priv, pipe);
6674
6675                 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
6676                 I915_WRITE(dpll_reg, dpll);
6677                 intel_wait_for_vblank(dev, pipe);
6678
6679                 dpll = I915_READ(dpll_reg);
6680                 if (dpll & DISPLAY_RATE_SELECT_FPA1)
6681                         DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
6682         }
6683 }
6684
6685 static void intel_decrease_pllclock(struct drm_crtc *crtc)
6686 {
6687         struct drm_device *dev = crtc->dev;
6688         drm_i915_private_t *dev_priv = dev->dev_private;
6689         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6690
6691         if (HAS_PCH_SPLIT(dev))
6692                 return;
6693
6694         if (!dev_priv->lvds_downclock_avail)
6695                 return;
6696
6697         /*
6698          * Since this is called by a timer, we should never get here in
6699          * the manual case.
6700          */
6701         if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
6702                 int pipe = intel_crtc->pipe;
6703                 int dpll_reg = DPLL(pipe);
6704                 int dpll;
6705
6706                 DRM_DEBUG_DRIVER("downclocking LVDS\n");
6707
6708                 assert_panel_unlocked(dev_priv, pipe);
6709
6710                 dpll = I915_READ(dpll_reg);
6711                 dpll |= DISPLAY_RATE_SELECT_FPA1;
6712                 I915_WRITE(dpll_reg, dpll);
6713                 intel_wait_for_vblank(dev, pipe);
6714                 dpll = I915_READ(dpll_reg);
6715                 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
6716                         DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
6717         }
6718
6719 }
6720
6721 void intel_mark_busy(struct drm_device *dev)
6722 {
6723         i915_update_gfx_val(dev->dev_private);
6724 }
6725
6726 void intel_mark_idle(struct drm_device *dev)
6727 {
6728 }
6729
6730 void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
6731 {
6732         struct drm_device *dev = obj->base.dev;
6733         struct drm_crtc *crtc;
6734
6735         if (!i915_powersave)
6736                 return;
6737
6738         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6739                 if (!crtc->fb)
6740                         continue;
6741
6742                 if (to_intel_framebuffer(crtc->fb)->obj == obj)
6743                         intel_increase_pllclock(crtc);
6744         }
6745 }
6746
6747 void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
6748 {
6749         struct drm_device *dev = obj->base.dev;
6750         struct drm_crtc *crtc;
6751
6752         if (!i915_powersave)
6753                 return;
6754
6755         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6756                 if (!crtc->fb)
6757                         continue;
6758
6759                 if (to_intel_framebuffer(crtc->fb)->obj == obj)
6760                         intel_decrease_pllclock(crtc);
6761         }
6762 }
6763
6764 static void intel_crtc_destroy(struct drm_crtc *crtc)
6765 {
6766         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6767         struct drm_device *dev = crtc->dev;
6768         struct intel_unpin_work *work;
6769         unsigned long flags;
6770
6771         spin_lock_irqsave(&dev->event_lock, flags);
6772         work = intel_crtc->unpin_work;
6773         intel_crtc->unpin_work = NULL;
6774         spin_unlock_irqrestore(&dev->event_lock, flags);
6775
6776         if (work) {
6777                 cancel_work_sync(&work->work);
6778                 kfree(work);
6779         }
6780
6781         drm_crtc_cleanup(crtc);
6782
6783         kfree(intel_crtc);
6784 }
6785
6786 static void intel_unpin_work_fn(struct work_struct *__work)
6787 {
6788         struct intel_unpin_work *work =
6789                 container_of(__work, struct intel_unpin_work, work);
6790         struct drm_device *dev = work->crtc->dev;
6791
6792         mutex_lock(&dev->struct_mutex);
6793         intel_unpin_fb_obj(work->old_fb_obj);
6794         drm_gem_object_unreference(&work->pending_flip_obj->base);
6795         drm_gem_object_unreference(&work->old_fb_obj->base);
6796
6797         intel_update_fbc(dev);
6798         mutex_unlock(&dev->struct_mutex);
6799
6800         BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
6801         atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
6802
6803         kfree(work);
6804 }
6805
6806 static void do_intel_finish_page_flip(struct drm_device *dev,
6807                                       struct drm_crtc *crtc)
6808 {
6809         drm_i915_private_t *dev_priv = dev->dev_private;
6810         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6811         struct intel_unpin_work *work;
6812         struct drm_i915_gem_object *obj;
6813         unsigned long flags;
6814
6815         /* Ignore early vblank irqs */
6816         if (intel_crtc == NULL)
6817                 return;
6818
6819         spin_lock_irqsave(&dev->event_lock, flags);
6820         work = intel_crtc->unpin_work;
6821
6822         /* Ensure we don't miss a work->pending update ... */
6823         smp_rmb();
6824
6825         if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
6826                 spin_unlock_irqrestore(&dev->event_lock, flags);
6827                 return;
6828         }
6829
6830         /* and that the unpin work is consistent wrt ->pending. */
6831         smp_rmb();
6832
6833         intel_crtc->unpin_work = NULL;
6834
6835         if (work->event)
6836                 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
6837
6838         drm_vblank_put(dev, intel_crtc->pipe);
6839
6840         spin_unlock_irqrestore(&dev->event_lock, flags);
6841
6842         obj = work->old_fb_obj;
6843
6844         wake_up_all(&dev_priv->pending_flip_queue);
6845
6846         queue_work(dev_priv->wq, &work->work);
6847
6848         trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6849 }
6850
6851 void intel_finish_page_flip(struct drm_device *dev, int pipe)
6852 {
6853         drm_i915_private_t *dev_priv = dev->dev_private;
6854         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
6855
6856         do_intel_finish_page_flip(dev, crtc);
6857 }
6858
6859 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
6860 {
6861         drm_i915_private_t *dev_priv = dev->dev_private;
6862         struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
6863
6864         do_intel_finish_page_flip(dev, crtc);
6865 }
6866
6867 void intel_prepare_page_flip(struct drm_device *dev, int plane)
6868 {
6869         drm_i915_private_t *dev_priv = dev->dev_private;
6870         struct intel_crtc *intel_crtc =
6871                 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
6872         unsigned long flags;
6873
6874         /* NB: An MMIO update of the plane base pointer will also
6875          * generate a page-flip completion irq, i.e. every modeset
6876          * is also accompanied by a spurious intel_prepare_page_flip().
6877          */
6878         spin_lock_irqsave(&dev->event_lock, flags);
6879         if (intel_crtc->unpin_work)
6880                 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
6881         spin_unlock_irqrestore(&dev->event_lock, flags);
6882 }
6883
6884 inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
6885 {
6886         /* Ensure that the work item is consistent when activating it ... */
6887         smp_wmb();
6888         atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
6889         /* and that it is marked active as soon as the irq could fire. */
6890         smp_wmb();
6891 }
6892
6893 static int intel_gen2_queue_flip(struct drm_device *dev,
6894                                  struct drm_crtc *crtc,
6895                                  struct drm_framebuffer *fb,
6896                                  struct drm_i915_gem_object *obj)
6897 {
6898         struct drm_i915_private *dev_priv = dev->dev_private;
6899         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6900         u32 flip_mask;
6901         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
6902         int ret;
6903
6904         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6905         if (ret)
6906                 goto err;
6907
6908         ret = intel_ring_begin(ring, 6);
6909         if (ret)
6910                 goto err_unpin;
6911
6912         /* Can't queue multiple flips, so wait for the previous
6913          * one to finish before executing the next.
6914          */
6915         if (intel_crtc->plane)
6916                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6917         else
6918                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6919         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6920         intel_ring_emit(ring, MI_NOOP);
6921         intel_ring_emit(ring, MI_DISPLAY_FLIP |
6922                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6923         intel_ring_emit(ring, fb->pitches[0]);
6924         intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6925         intel_ring_emit(ring, 0); /* aux display base address, unused */
6926
6927         intel_mark_page_flip_active(intel_crtc);
6928         intel_ring_advance(ring);
6929         return 0;
6930
6931 err_unpin:
6932         intel_unpin_fb_obj(obj);
6933 err:
6934         return ret;
6935 }
6936
6937 static int intel_gen3_queue_flip(struct drm_device *dev,
6938                                  struct drm_crtc *crtc,
6939                                  struct drm_framebuffer *fb,
6940                                  struct drm_i915_gem_object *obj)
6941 {
6942         struct drm_i915_private *dev_priv = dev->dev_private;
6943         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6944         u32 flip_mask;
6945         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
6946         int ret;
6947
6948         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6949         if (ret)
6950                 goto err;
6951
6952         ret = intel_ring_begin(ring, 6);
6953         if (ret)
6954                 goto err_unpin;
6955
6956         if (intel_crtc->plane)
6957                 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6958         else
6959                 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6960         intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6961         intel_ring_emit(ring, MI_NOOP);
6962         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
6963                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6964         intel_ring_emit(ring, fb->pitches[0]);
6965         intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6966         intel_ring_emit(ring, MI_NOOP);
6967
6968         intel_mark_page_flip_active(intel_crtc);
6969         intel_ring_advance(ring);
6970         return 0;
6971
6972 err_unpin:
6973         intel_unpin_fb_obj(obj);
6974 err:
6975         return ret;
6976 }
6977
6978 static int intel_gen4_queue_flip(struct drm_device *dev,
6979                                  struct drm_crtc *crtc,
6980                                  struct drm_framebuffer *fb,
6981                                  struct drm_i915_gem_object *obj)
6982 {
6983         struct drm_i915_private *dev_priv = dev->dev_private;
6984         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6985         uint32_t pf, pipesrc;
6986         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
6987         int ret;
6988
6989         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6990         if (ret)
6991                 goto err;
6992
6993         ret = intel_ring_begin(ring, 4);
6994         if (ret)
6995                 goto err_unpin;
6996
6997         /* i965+ uses the linear or tiled offsets from the
6998          * Display Registers (which do not change across a page-flip)
6999          * so we need only reprogram the base address.
7000          */
7001         intel_ring_emit(ring, MI_DISPLAY_FLIP |
7002                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7003         intel_ring_emit(ring, fb->pitches[0]);
7004         intel_ring_emit(ring,
7005                         (obj->gtt_offset + intel_crtc->dspaddr_offset) |
7006                         obj->tiling_mode);
7007
7008         /* XXX Enabling the panel-fitter across page-flip is so far
7009          * untested on non-native modes, so ignore it for now.
7010          * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7011          */
7012         pf = 0;
7013         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7014         intel_ring_emit(ring, pf | pipesrc);
7015
7016         intel_mark_page_flip_active(intel_crtc);
7017         intel_ring_advance(ring);
7018         return 0;
7019
7020 err_unpin:
7021         intel_unpin_fb_obj(obj);
7022 err:
7023         return ret;
7024 }
7025
7026 static int intel_gen6_queue_flip(struct drm_device *dev,
7027                                  struct drm_crtc *crtc,
7028                                  struct drm_framebuffer *fb,
7029                                  struct drm_i915_gem_object *obj)
7030 {
7031         struct drm_i915_private *dev_priv = dev->dev_private;
7032         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7033         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
7034         uint32_t pf, pipesrc;
7035         int ret;
7036
7037         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7038         if (ret)
7039                 goto err;
7040
7041         ret = intel_ring_begin(ring, 4);
7042         if (ret)
7043                 goto err_unpin;
7044
7045         intel_ring_emit(ring, MI_DISPLAY_FLIP |
7046                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7047         intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
7048         intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7049
7050         /* Contrary to the suggestions in the documentation,
7051          * "Enable Panel Fitter" does not seem to be required when page
7052          * flipping with a non-native mode, and worse causes a normal
7053          * modeset to fail.
7054          * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7055          */
7056         pf = 0;
7057         pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7058         intel_ring_emit(ring, pf | pipesrc);
7059
7060         intel_mark_page_flip_active(intel_crtc);
7061         intel_ring_advance(ring);
7062         return 0;
7063
7064 err_unpin:
7065         intel_unpin_fb_obj(obj);
7066 err:
7067         return ret;
7068 }
7069
7070 /*
7071  * On gen7 we currently use the blit ring because (in early silicon at least)
7072  * the render ring doesn't give us interrpts for page flip completion, which
7073  * means clients will hang after the first flip is queued.  Fortunately the
7074  * blit ring generates interrupts properly, so use it instead.
7075  */
7076 static int intel_gen7_queue_flip(struct drm_device *dev,
7077                                  struct drm_crtc *crtc,
7078                                  struct drm_framebuffer *fb,
7079                                  struct drm_i915_gem_object *obj)
7080 {
7081         struct drm_i915_private *dev_priv = dev->dev_private;
7082         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7083         struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
7084         uint32_t plane_bit = 0;
7085         int ret;
7086
7087         ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7088         if (ret)
7089                 goto err;
7090
7091         switch(intel_crtc->plane) {
7092         case PLANE_A:
7093                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
7094                 break;
7095         case PLANE_B:
7096                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
7097                 break;
7098         case PLANE_C:
7099                 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
7100                 break;
7101         default:
7102                 WARN_ONCE(1, "unknown plane in flip command\n");
7103                 ret = -ENODEV;
7104                 goto err_unpin;
7105         }
7106
7107         ret = intel_ring_begin(ring, 4);
7108         if (ret)
7109                 goto err_unpin;
7110
7111         intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
7112         intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7113         intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7114         intel_ring_emit(ring, (MI_NOOP));
7115
7116         intel_mark_page_flip_active(intel_crtc);
7117         intel_ring_advance(ring);
7118         return 0;
7119
7120 err_unpin:
7121         intel_unpin_fb_obj(obj);
7122 err:
7123         return ret;
7124 }
7125
7126 static int intel_default_queue_flip(struct drm_device *dev,
7127                                     struct drm_crtc *crtc,
7128                                     struct drm_framebuffer *fb,
7129                                     struct drm_i915_gem_object *obj)
7130 {
7131         return -ENODEV;
7132 }
7133
7134 static int intel_crtc_page_flip(struct drm_crtc *crtc,
7135                                 struct drm_framebuffer *fb,
7136                                 struct drm_pending_vblank_event *event)
7137 {
7138         struct drm_device *dev = crtc->dev;
7139         struct drm_i915_private *dev_priv = dev->dev_private;
7140         struct intel_framebuffer *intel_fb;
7141         struct drm_i915_gem_object *obj;
7142         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7143         struct intel_unpin_work *work;
7144         unsigned long flags;
7145         int ret;
7146
7147         /* Can't change pixel format via MI display flips. */
7148         if (fb->pixel_format != crtc->fb->pixel_format)
7149                 return -EINVAL;
7150
7151         /*
7152          * TILEOFF/LINOFF registers can't be changed via MI display flips.
7153          * Note that pitch changes could also affect these register.
7154          */
7155         if (INTEL_INFO(dev)->gen > 3 &&
7156             (fb->offsets[0] != crtc->fb->offsets[0] ||
7157              fb->pitches[0] != crtc->fb->pitches[0]))
7158                 return -EINVAL;
7159
7160         work = kzalloc(sizeof *work, GFP_KERNEL);
7161         if (work == NULL)
7162                 return -ENOMEM;
7163
7164         work->event = event;
7165         work->crtc = crtc;
7166         intel_fb = to_intel_framebuffer(crtc->fb);
7167         work->old_fb_obj = intel_fb->obj;
7168         INIT_WORK(&work->work, intel_unpin_work_fn);
7169
7170         ret = drm_vblank_get(dev, intel_crtc->pipe);
7171         if (ret)
7172                 goto free_work;
7173
7174         /* We borrow the event spin lock for protecting unpin_work */
7175         spin_lock_irqsave(&dev->event_lock, flags);
7176         if (intel_crtc->unpin_work) {
7177                 spin_unlock_irqrestore(&dev->event_lock, flags);
7178                 kfree(work);
7179                 drm_vblank_put(dev, intel_crtc->pipe);
7180
7181                 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
7182                 return -EBUSY;
7183         }
7184         intel_crtc->unpin_work = work;
7185         spin_unlock_irqrestore(&dev->event_lock, flags);
7186
7187         intel_fb = to_intel_framebuffer(fb);
7188         obj = intel_fb->obj;
7189
7190         if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
7191                 flush_workqueue(dev_priv->wq);
7192
7193         ret = i915_mutex_lock_interruptible(dev);
7194         if (ret)
7195                 goto cleanup;
7196
7197         /* Reference the objects for the scheduled work. */
7198         drm_gem_object_reference(&work->old_fb_obj->base);
7199         drm_gem_object_reference(&obj->base);
7200
7201         crtc->fb = fb;
7202
7203         work->pending_flip_obj = obj;
7204
7205         work->enable_stall_check = true;
7206
7207         atomic_inc(&intel_crtc->unpin_work_count);
7208
7209         ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7210         if (ret)
7211                 goto cleanup_pending;
7212
7213         intel_disable_fbc(dev);
7214         intel_mark_fb_busy(obj);
7215         mutex_unlock(&dev->struct_mutex);
7216
7217         trace_i915_flip_request(intel_crtc->plane, obj);
7218
7219         return 0;
7220
7221 cleanup_pending:
7222         atomic_dec(&intel_crtc->unpin_work_count);
7223         drm_gem_object_unreference(&work->old_fb_obj->base);
7224         drm_gem_object_unreference(&obj->base);
7225         mutex_unlock(&dev->struct_mutex);
7226
7227 cleanup:
7228         spin_lock_irqsave(&dev->event_lock, flags);
7229         intel_crtc->unpin_work = NULL;
7230         spin_unlock_irqrestore(&dev->event_lock, flags);
7231
7232         drm_vblank_put(dev, intel_crtc->pipe);
7233 free_work:
7234         kfree(work);
7235
7236         return ret;
7237 }
7238
7239 static struct drm_crtc_helper_funcs intel_helper_funcs = {
7240         .mode_set_base_atomic = intel_pipe_set_base_atomic,
7241         .load_lut = intel_crtc_load_lut,
7242         .disable = intel_crtc_noop,
7243 };
7244
7245 bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
7246 {
7247         struct intel_encoder *other_encoder;
7248         struct drm_crtc *crtc = &encoder->new_crtc->base;
7249
7250         if (WARN_ON(!crtc))
7251                 return false;
7252
7253         list_for_each_entry(other_encoder,
7254                             &crtc->dev->mode_config.encoder_list,
7255                             base.head) {
7256
7257                 if (&other_encoder->new_crtc->base != crtc ||
7258                     encoder == other_encoder)
7259                         continue;
7260                 else
7261                         return true;
7262         }
7263
7264         return false;
7265 }
7266
7267 static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
7268                                   struct drm_crtc *crtc)
7269 {
7270         struct drm_device *dev;
7271         struct drm_crtc *tmp;
7272         int crtc_mask = 1;
7273
7274         WARN(!crtc, "checking null crtc?\n");
7275
7276         dev = crtc->dev;
7277
7278         list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
7279                 if (tmp == crtc)
7280                         break;
7281                 crtc_mask <<= 1;
7282         }
7283
7284         if (encoder->possible_crtcs & crtc_mask)
7285                 return true;
7286         return false;
7287 }
7288
7289 /**
7290  * intel_modeset_update_staged_output_state
7291  *
7292  * Updates the staged output configuration state, e.g. after we've read out the
7293  * current hw state.
7294  */
7295 static void intel_modeset_update_staged_output_state(struct drm_device *dev)
7296 {
7297         struct intel_encoder *encoder;
7298         struct intel_connector *connector;
7299
7300         list_for_each_entry(connector, &dev->mode_config.connector_list,
7301                             base.head) {
7302                 connector->new_encoder =
7303                         to_intel_encoder(connector->base.encoder);
7304         }
7305
7306         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7307                             base.head) {
7308                 encoder->new_crtc =
7309                         to_intel_crtc(encoder->base.crtc);
7310         }
7311 }
7312
7313 /**
7314  * intel_modeset_commit_output_state
7315  *
7316  * This function copies the stage display pipe configuration to the real one.
7317  */
7318 static void intel_modeset_commit_output_state(struct drm_device *dev)
7319 {
7320         struct intel_encoder *encoder;
7321         struct intel_connector *connector;
7322
7323         list_for_each_entry(connector, &dev->mode_config.connector_list,
7324                             base.head) {
7325                 connector->base.encoder = &connector->new_encoder->base;
7326         }
7327
7328         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7329                             base.head) {
7330                 encoder->base.crtc = &encoder->new_crtc->base;
7331         }
7332 }
7333
7334 static struct drm_display_mode *
7335 intel_modeset_adjusted_mode(struct drm_crtc *crtc,
7336                             struct drm_display_mode *mode)
7337 {
7338         struct drm_device *dev = crtc->dev;
7339         struct drm_display_mode *adjusted_mode;
7340         struct drm_encoder_helper_funcs *encoder_funcs;
7341         struct intel_encoder *encoder;
7342
7343         adjusted_mode = drm_mode_duplicate(dev, mode);
7344         if (!adjusted_mode)
7345                 return ERR_PTR(-ENOMEM);
7346
7347         /* Pass our mode to the connectors and the CRTC to give them a chance to
7348          * adjust it according to limitations or connector properties, and also
7349          * a chance to reject the mode entirely.
7350          */
7351         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7352                             base.head) {
7353
7354                 if (&encoder->new_crtc->base != crtc)
7355                         continue;
7356                 encoder_funcs = encoder->base.helper_private;
7357                 if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
7358                                                 adjusted_mode))) {
7359                         DRM_DEBUG_KMS("Encoder fixup failed\n");
7360                         goto fail;
7361                 }
7362         }
7363
7364         if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
7365                 DRM_DEBUG_KMS("CRTC fixup failed\n");
7366                 goto fail;
7367         }
7368         DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
7369
7370         return adjusted_mode;
7371 fail:
7372         drm_mode_destroy(dev, adjusted_mode);
7373         return ERR_PTR(-EINVAL);
7374 }
7375
7376 /* Computes which crtcs are affected and sets the relevant bits in the mask. For
7377  * simplicity we use the crtc's pipe number (because it's easier to obtain). */
7378 static void
7379 intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
7380                              unsigned *prepare_pipes, unsigned *disable_pipes)
7381 {
7382         struct intel_crtc *intel_crtc;
7383         struct drm_device *dev = crtc->dev;
7384         struct intel_encoder *encoder;
7385         struct intel_connector *connector;
7386         struct drm_crtc *tmp_crtc;
7387
7388         *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
7389
7390         /* Check which crtcs have changed outputs connected to them, these need
7391          * to be part of the prepare_pipes mask. We don't (yet) support global
7392          * modeset across multiple crtcs, so modeset_pipes will only have one
7393          * bit set at most. */
7394         list_for_each_entry(connector, &dev->mode_config.connector_list,
7395                             base.head) {
7396                 if (connector->base.encoder == &connector->new_encoder->base)
7397                         continue;
7398
7399                 if (connector->base.encoder) {
7400                         tmp_crtc = connector->base.encoder->crtc;
7401
7402                         *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
7403                 }
7404
7405                 if (connector->new_encoder)
7406                         *prepare_pipes |=
7407                                 1 << connector->new_encoder->new_crtc->pipe;
7408         }
7409
7410         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7411                             base.head) {
7412                 if (encoder->base.crtc == &encoder->new_crtc->base)
7413                         continue;
7414
7415                 if (encoder->base.crtc) {
7416                         tmp_crtc = encoder->base.crtc;
7417
7418                         *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
7419                 }
7420
7421                 if (encoder->new_crtc)
7422                         *prepare_pipes |= 1 << encoder->new_crtc->pipe;
7423         }
7424
7425         /* Check for any pipes that will be fully disabled ... */
7426         list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
7427                             base.head) {
7428                 bool used = false;
7429
7430                 /* Don't try to disable disabled crtcs. */
7431                 if (!intel_crtc->base.enabled)
7432                         continue;
7433
7434                 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7435                                     base.head) {
7436                         if (encoder->new_crtc == intel_crtc)
7437                                 used = true;
7438                 }
7439
7440                 if (!used)
7441                         *disable_pipes |= 1 << intel_crtc->pipe;
7442         }
7443
7444
7445         /* set_mode is also used to update properties on life display pipes. */
7446         intel_crtc = to_intel_crtc(crtc);
7447         if (crtc->enabled)
7448                 *prepare_pipes |= 1 << intel_crtc->pipe;
7449
7450         /* We only support modeset on one single crtc, hence we need to do that
7451          * only for the passed in crtc iff we change anything else than just
7452          * disable crtcs.
7453          *
7454          * This is actually not true, to be fully compatible with the old crtc
7455          * helper we automatically disable _any_ output (i.e. doesn't need to be
7456          * connected to the crtc we're modesetting on) if it's disconnected.
7457          * Which is a rather nutty api (since changed the output configuration
7458          * without userspace's explicit request can lead to confusion), but
7459          * alas. Hence we currently need to modeset on all pipes we prepare. */
7460         if (*prepare_pipes)
7461                 *modeset_pipes = *prepare_pipes;
7462
7463         /* ... and mask these out. */
7464         *modeset_pipes &= ~(*disable_pipes);
7465         *prepare_pipes &= ~(*disable_pipes);
7466 }
7467
7468 static bool intel_crtc_in_use(struct drm_crtc *crtc)
7469 {
7470         struct drm_encoder *encoder;
7471         struct drm_device *dev = crtc->dev;
7472
7473         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
7474                 if (encoder->crtc == crtc)
7475                         return true;
7476
7477         return false;
7478 }
7479
7480 static void
7481 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
7482 {
7483         struct intel_encoder *intel_encoder;
7484         struct intel_crtc *intel_crtc;
7485         struct drm_connector *connector;
7486
7487         list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
7488                             base.head) {
7489                 if (!intel_encoder->base.crtc)
7490                         continue;
7491
7492                 intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
7493
7494                 if (prepare_pipes & (1 << intel_crtc->pipe))
7495                         intel_encoder->connectors_active = false;
7496         }
7497
7498         intel_modeset_commit_output_state(dev);
7499
7500         /* Update computed state. */
7501         list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
7502                             base.head) {
7503                 intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
7504         }
7505
7506         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7507                 if (!connector->encoder || !connector->encoder->crtc)
7508                         continue;
7509
7510                 intel_crtc = to_intel_crtc(connector->encoder->crtc);
7511
7512                 if (prepare_pipes & (1 << intel_crtc->pipe)) {
7513                         struct drm_property *dpms_property =
7514                                 dev->mode_config.dpms_property;
7515
7516                         connector->dpms = DRM_MODE_DPMS_ON;
7517                         drm_object_property_set_value(&connector->base,
7518                                                          dpms_property,
7519                                                          DRM_MODE_DPMS_ON);
7520
7521                         intel_encoder = to_intel_encoder(connector->encoder);
7522                         intel_encoder->connectors_active = true;
7523                 }
7524         }
7525
7526 }
7527
7528 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
7529         list_for_each_entry((intel_crtc), \
7530                             &(dev)->mode_config.crtc_list, \
7531                             base.head) \
7532                 if (mask & (1 <<(intel_crtc)->pipe)) \
7533
7534 void
7535 intel_modeset_check_state(struct drm_device *dev)
7536 {
7537         struct intel_crtc *crtc;
7538         struct intel_encoder *encoder;
7539         struct intel_connector *connector;
7540
7541         list_for_each_entry(connector, &dev->mode_config.connector_list,
7542                             base.head) {
7543                 /* This also checks the encoder/connector hw state with the
7544                  * ->get_hw_state callbacks. */
7545                 intel_connector_check_state(connector);
7546
7547                 WARN(&connector->new_encoder->base != connector->base.encoder,
7548                      "connector's staged encoder doesn't match current encoder\n");
7549         }
7550
7551         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7552                             base.head) {
7553                 bool enabled = false;
7554                 bool active = false;
7555                 enum pipe pipe, tracked_pipe;
7556
7557                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
7558                               encoder->base.base.id,
7559                               drm_get_encoder_name(&encoder->base));
7560
7561                 WARN(&encoder->new_crtc->base != encoder->base.crtc,
7562                      "encoder's stage crtc doesn't match current crtc\n");
7563                 WARN(encoder->connectors_active && !encoder->base.crtc,
7564                      "encoder's active_connectors set, but no crtc\n");
7565
7566                 list_for_each_entry(connector, &dev->mode_config.connector_list,
7567                                     base.head) {
7568                         if (connector->base.encoder != &encoder->base)
7569                                 continue;
7570                         enabled = true;
7571                         if (connector->base.dpms != DRM_MODE_DPMS_OFF)
7572                                 active = true;
7573                 }
7574                 WARN(!!encoder->base.crtc != enabled,
7575                      "encoder's enabled state mismatch "
7576                      "(expected %i, found %i)\n",
7577                      !!encoder->base.crtc, enabled);
7578                 WARN(active && !encoder->base.crtc,
7579                      "active encoder with no crtc\n");
7580
7581                 WARN(encoder->connectors_active != active,
7582                      "encoder's computed active state doesn't match tracked active state "
7583                      "(expected %i, found %i)\n", active, encoder->connectors_active);
7584
7585                 active = encoder->get_hw_state(encoder, &pipe);
7586                 WARN(active != encoder->connectors_active,
7587                      "encoder's hw state doesn't match sw tracking "
7588                      "(expected %i, found %i)\n",
7589                      encoder->connectors_active, active);
7590
7591                 if (!encoder->base.crtc)
7592                         continue;
7593
7594                 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
7595                 WARN(active && pipe != tracked_pipe,
7596                      "active encoder's pipe doesn't match"
7597                      "(expected %i, found %i)\n",
7598                      tracked_pipe, pipe);
7599
7600         }
7601
7602         list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7603                             base.head) {
7604                 bool enabled = false;
7605                 bool active = false;
7606
7607                 DRM_DEBUG_KMS("[CRTC:%d]\n",
7608                               crtc->base.base.id);
7609
7610                 WARN(crtc->active && !crtc->base.enabled,
7611                      "active crtc, but not enabled in sw tracking\n");
7612
7613                 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7614                                     base.head) {
7615                         if (encoder->base.crtc != &crtc->base)
7616                                 continue;
7617                         enabled = true;
7618                         if (encoder->connectors_active)
7619                                 active = true;
7620                 }
7621                 WARN(active != crtc->active,
7622                      "crtc's computed active state doesn't match tracked active state "
7623                      "(expected %i, found %i)\n", active, crtc->active);
7624                 WARN(enabled != crtc->base.enabled,
7625                      "crtc's computed enabled state doesn't match tracked enabled state "
7626                      "(expected %i, found %i)\n", enabled, crtc->base.enabled);
7627
7628                 assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
7629         }
7630 }
7631
7632 int intel_set_mode(struct drm_crtc *crtc,
7633                    struct drm_display_mode *mode,
7634                    int x, int y, struct drm_framebuffer *fb)
7635 {
7636         struct drm_device *dev = crtc->dev;
7637         drm_i915_private_t *dev_priv = dev->dev_private;
7638         struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode;
7639         struct intel_crtc *intel_crtc;
7640         unsigned disable_pipes, prepare_pipes, modeset_pipes;
7641         int ret = 0;
7642
7643         saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
7644         if (!saved_mode)
7645                 return -ENOMEM;
7646         saved_hwmode = saved_mode + 1;
7647
7648         intel_modeset_affected_pipes(crtc, &modeset_pipes,
7649                                      &prepare_pipes, &disable_pipes);
7650
7651         DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
7652                       modeset_pipes, prepare_pipes, disable_pipes);
7653
7654         for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7655                 intel_crtc_disable(&intel_crtc->base);
7656
7657         *saved_hwmode = crtc->hwmode;
7658         *saved_mode = crtc->mode;
7659
7660         /* Hack: Because we don't (yet) support global modeset on multiple
7661          * crtcs, we don't keep track of the new mode for more than one crtc.
7662          * Hence simply check whether any bit is set in modeset_pipes in all the
7663          * pieces of code that are not yet converted to deal with mutliple crtcs
7664          * changing their mode at the same time. */
7665         adjusted_mode = NULL;
7666         if (modeset_pipes) {
7667                 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
7668                 if (IS_ERR(adjusted_mode)) {
7669                         ret = PTR_ERR(adjusted_mode);
7670                         goto out;
7671                 }
7672         }
7673
7674         for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
7675                 if (intel_crtc->base.enabled)
7676                         dev_priv->display.crtc_disable(&intel_crtc->base);
7677         }
7678
7679         /* crtc->mode is already used by the ->mode_set callbacks, hence we need
7680          * to set it here already despite that we pass it down the callchain.
7681          */
7682         if (modeset_pipes)
7683                 crtc->mode = *mode;
7684
7685         /* Only after disabling all output pipelines that will be changed can we
7686          * update the the output configuration. */
7687         intel_modeset_update_state(dev, prepare_pipes);
7688
7689         if (dev_priv->display.modeset_global_resources)
7690                 dev_priv->display.modeset_global_resources(dev);
7691
7692         /* Set up the DPLL and any encoders state that needs to adjust or depend
7693          * on the DPLL.
7694          */
7695         for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
7696                 ret = intel_crtc_mode_set(&intel_crtc->base,
7697                                           mode, adjusted_mode,
7698                                           x, y, fb);
7699                 if (ret)
7700                         goto done;
7701         }
7702
7703         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
7704         for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
7705                 dev_priv->display.crtc_enable(&intel_crtc->base);
7706
7707         if (modeset_pipes) {
7708                 /* Store real post-adjustment hardware mode. */
7709                 crtc->hwmode = *adjusted_mode;
7710
7711                 /* Calculate and store various constants which
7712                  * are later needed by vblank and swap-completion
7713                  * timestamping. They are derived from true hwmode.
7714                  */
7715                 drm_calc_timestamping_constants(crtc);
7716         }
7717
7718         /* FIXME: add subpixel order */
7719 done:
7720         drm_mode_destroy(dev, adjusted_mode);
7721         if (ret && crtc->enabled) {
7722                 crtc->hwmode = *saved_hwmode;
7723                 crtc->mode = *saved_mode;
7724         } else {
7725                 intel_modeset_check_state(dev);
7726         }
7727
7728 out:
7729         kfree(saved_mode);
7730         return ret;
7731 }
7732
7733 void intel_crtc_restore_mode(struct drm_crtc *crtc)
7734 {
7735         intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
7736 }
7737
7738 #undef for_each_intel_crtc_masked
7739
7740 static void intel_set_config_free(struct intel_set_config *config)
7741 {
7742         if (!config)
7743                 return;
7744
7745         kfree(config->save_connector_encoders);
7746         kfree(config->save_encoder_crtcs);
7747         kfree(config);
7748 }
7749
7750 static int intel_set_config_save_state(struct drm_device *dev,
7751                                        struct intel_set_config *config)
7752 {
7753         struct drm_encoder *encoder;
7754         struct drm_connector *connector;
7755         int count;
7756
7757         config->save_encoder_crtcs =
7758                 kcalloc(dev->mode_config.num_encoder,
7759                         sizeof(struct drm_crtc *), GFP_KERNEL);
7760         if (!config->save_encoder_crtcs)
7761                 return -ENOMEM;
7762
7763         config->save_connector_encoders =
7764                 kcalloc(dev->mode_config.num_connector,
7765                         sizeof(struct drm_encoder *), GFP_KERNEL);
7766         if (!config->save_connector_encoders)
7767                 return -ENOMEM;
7768
7769         /* Copy data. Note that driver private data is not affected.
7770          * Should anything bad happen only the expected state is
7771          * restored, not the drivers personal bookkeeping.
7772          */
7773         count = 0;
7774         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
7775                 config->save_encoder_crtcs[count++] = encoder->crtc;
7776         }
7777
7778         count = 0;
7779         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7780                 config->save_connector_encoders[count++] = connector->encoder;
7781         }
7782
7783         return 0;
7784 }
7785
7786 static void intel_set_config_restore_state(struct drm_device *dev,
7787                                            struct intel_set_config *config)
7788 {
7789         struct intel_encoder *encoder;
7790         struct intel_connector *connector;
7791         int count;
7792
7793         count = 0;
7794         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7795                 encoder->new_crtc =
7796                         to_intel_crtc(config->save_encoder_crtcs[count++]);
7797         }
7798
7799         count = 0;
7800         list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
7801                 connector->new_encoder =
7802                         to_intel_encoder(config->save_connector_encoders[count++]);
7803         }
7804 }
7805
7806 static void
7807 intel_set_config_compute_mode_changes(struct drm_mode_set *set,
7808                                       struct intel_set_config *config)
7809 {
7810
7811         /* We should be able to check here if the fb has the same properties
7812          * and then just flip_or_move it */
7813         if (set->crtc->fb != set->fb) {
7814                 /* If we have no fb then treat it as a full mode set */
7815                 if (set->crtc->fb == NULL) {
7816                         DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
7817                         config->mode_changed = true;
7818                 } else if (set->fb == NULL) {
7819                         config->mode_changed = true;
7820                 } else if (set->fb->depth != set->crtc->fb->depth) {
7821                         config->mode_changed = true;
7822                 } else if (set->fb->bits_per_pixel !=
7823                            set->crtc->fb->bits_per_pixel) {
7824                         config->mode_changed = true;
7825                 } else
7826                         config->fb_changed = true;
7827         }
7828
7829         if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
7830                 config->fb_changed = true;
7831
7832         if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
7833                 DRM_DEBUG_KMS("modes are different, full mode set\n");
7834                 drm_mode_debug_printmodeline(&set->crtc->mode);
7835                 drm_mode_debug_printmodeline(set->mode);
7836                 config->mode_changed = true;
7837         }
7838 }
7839
7840 static int
7841 intel_modeset_stage_output_state(struct drm_device *dev,
7842                                  struct drm_mode_set *set,
7843                                  struct intel_set_config *config)
7844 {
7845         struct drm_crtc *new_crtc;
7846         struct intel_connector *connector;
7847         struct intel_encoder *encoder;
7848         int count, ro;
7849
7850         /* The upper layers ensure that we either disabl a crtc or have a list
7851          * of connectors. For paranoia, double-check this. */
7852         WARN_ON(!set->fb && (set->num_connectors != 0));
7853         WARN_ON(set->fb && (set->num_connectors == 0));
7854
7855         count = 0;
7856         list_for_each_entry(connector, &dev->mode_config.connector_list,
7857                             base.head) {
7858                 /* Otherwise traverse passed in connector list and get encoders
7859                  * for them. */
7860                 for (ro = 0; ro < set->num_connectors; ro++) {
7861                         if (set->connectors[ro] == &connector->base) {
7862                                 connector->new_encoder = connector->encoder;
7863                                 break;
7864                         }
7865                 }
7866
7867                 /* If we disable the crtc, disable all its connectors. Also, if
7868                  * the connector is on the changing crtc but not on the new
7869                  * connector list, disable it. */
7870                 if ((!set->fb || ro == set->num_connectors) &&
7871                     connector->base.encoder &&
7872                     connector->base.encoder->crtc == set->crtc) {
7873                         connector->new_encoder = NULL;
7874
7875                         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
7876                                 connector->base.base.id,
7877                                 drm_get_connector_name(&connector->base));
7878                 }
7879
7880
7881                 if (&connector->new_encoder->base != connector->base.encoder) {
7882                         DRM_DEBUG_KMS("encoder changed, full mode switch\n");
7883                         config->mode_changed = true;
7884                 }
7885         }
7886         /* connector->new_encoder is now updated for all connectors. */
7887
7888         /* Update crtc of enabled connectors. */
7889         count = 0;
7890         list_for_each_entry(connector, &dev->mode_config.connector_list,
7891                             base.head) {
7892                 if (!connector->new_encoder)
7893                         continue;
7894
7895                 new_crtc = connector->new_encoder->base.crtc;
7896
7897                 for (ro = 0; ro < set->num_connectors; ro++) {
7898                         if (set->connectors[ro] == &connector->base)
7899                                 new_crtc = set->crtc;
7900                 }
7901
7902                 /* Make sure the new CRTC will work with the encoder */
7903                 if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
7904                                            new_crtc)) {
7905                         return -EINVAL;
7906                 }
7907                 connector->encoder->new_crtc = to_intel_crtc(new_crtc);
7908
7909                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
7910                         connector->base.base.id,
7911                         drm_get_connector_name(&connector->base),
7912                         new_crtc->base.id);
7913         }
7914
7915         /* Check for any encoders that needs to be disabled. */
7916         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7917                             base.head) {
7918                 list_for_each_entry(connector,
7919                                     &dev->mode_config.connector_list,
7920                                     base.head) {
7921                         if (connector->new_encoder == encoder) {
7922                                 WARN_ON(!connector->new_encoder->new_crtc);
7923
7924                                 goto next_encoder;
7925                         }
7926                 }
7927                 encoder->new_crtc = NULL;
7928 next_encoder:
7929                 /* Only now check for crtc changes so we don't miss encoders
7930                  * that will be disabled. */
7931                 if (&encoder->new_crtc->base != encoder->base.crtc) {
7932                         DRM_DEBUG_KMS("crtc changed, full mode switch\n");
7933                         config->mode_changed = true;
7934                 }
7935         }
7936         /* Now we've also updated encoder->new_crtc for all encoders. */
7937
7938         return 0;
7939 }
7940
7941 static int intel_crtc_set_config(struct drm_mode_set *set)
7942 {
7943         struct drm_device *dev;
7944         struct drm_mode_set save_set;
7945         struct intel_set_config *config;
7946         int ret;
7947
7948         BUG_ON(!set);
7949         BUG_ON(!set->crtc);
7950         BUG_ON(!set->crtc->helper_private);
7951
7952         if (!set->mode)
7953                 set->fb = NULL;
7954
7955         /* The fb helper likes to play gross jokes with ->mode_set_config.
7956          * Unfortunately the crtc helper doesn't do much at all for this case,
7957          * so we have to cope with this madness until the fb helper is fixed up. */
7958         if (set->fb && set->num_connectors == 0)
7959                 return 0;
7960
7961         if (set->fb) {
7962                 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
7963                                 set->crtc->base.id, set->fb->base.id,
7964                                 (int)set->num_connectors, set->x, set->y);
7965         } else {
7966                 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
7967         }
7968
7969         dev = set->crtc->dev;
7970
7971         ret = -ENOMEM;
7972         config = kzalloc(sizeof(*config), GFP_KERNEL);
7973         if (!config)
7974                 goto out_config;
7975
7976         ret = intel_set_config_save_state(dev, config);
7977         if (ret)
7978                 goto out_config;
7979
7980         save_set.crtc = set->crtc;
7981         save_set.mode = &set->crtc->mode;
7982         save_set.x = set->crtc->x;
7983         save_set.y = set->crtc->y;
7984         save_set.fb = set->crtc->fb;
7985
7986         /* Compute whether we need a full modeset, only an fb base update or no
7987          * change at all. In the future we might also check whether only the
7988          * mode changed, e.g. for LVDS where we only change the panel fitter in
7989          * such cases. */
7990         intel_set_config_compute_mode_changes(set, config);
7991
7992         ret = intel_modeset_stage_output_state(dev, set, config);
7993         if (ret)
7994                 goto fail;
7995
7996         if (config->mode_changed) {
7997                 if (set->mode) {
7998                         DRM_DEBUG_KMS("attempting to set mode from"
7999                                         " userspace\n");
8000                         drm_mode_debug_printmodeline(set->mode);
8001                 }
8002
8003                 ret = intel_set_mode(set->crtc, set->mode,
8004                                      set->x, set->y, set->fb);
8005                 if (ret) {
8006                         DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
8007                                   set->crtc->base.id, ret);
8008                         goto fail;
8009                 }
8010         } else if (config->fb_changed) {
8011                 ret = intel_pipe_set_base(set->crtc,
8012                                           set->x, set->y, set->fb);
8013         }
8014
8015         intel_set_config_free(config);
8016
8017         return 0;
8018
8019 fail:
8020         intel_set_config_restore_state(dev, config);
8021
8022         /* Try to restore the config */
8023         if (config->mode_changed &&
8024             intel_set_mode(save_set.crtc, save_set.mode,
8025                            save_set.x, save_set.y, save_set.fb))
8026                 DRM_ERROR("failed to restore config after modeset failure\n");
8027
8028 out_config:
8029         intel_set_config_free(config);
8030         return ret;
8031 }
8032
8033 static const struct drm_crtc_funcs intel_crtc_funcs = {
8034         .cursor_set = intel_crtc_cursor_set,
8035         .cursor_move = intel_crtc_cursor_move,
8036         .gamma_set = intel_crtc_gamma_set,
8037         .set_config = intel_crtc_set_config,
8038         .destroy = intel_crtc_destroy,
8039         .page_flip = intel_crtc_page_flip,
8040 };
8041
8042 static void intel_cpu_pll_init(struct drm_device *dev)
8043 {
8044         if (HAS_DDI(dev))
8045                 intel_ddi_pll_init(dev);
8046 }
8047
8048 static void intel_pch_pll_init(struct drm_device *dev)
8049 {
8050         drm_i915_private_t *dev_priv = dev->dev_private;
8051         int i;
8052
8053         if (dev_priv->num_pch_pll == 0) {
8054                 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
8055                 return;
8056         }
8057
8058         for (i = 0; i < dev_priv->num_pch_pll; i++) {
8059                 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
8060                 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
8061                 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
8062         }
8063 }
8064
8065 static void intel_crtc_init(struct drm_device *dev, int pipe)
8066 {
8067         drm_i915_private_t *dev_priv = dev->dev_private;
8068         struct intel_crtc *intel_crtc;
8069         int i;
8070
8071         intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
8072         if (intel_crtc == NULL)
8073                 return;
8074
8075         drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
8076
8077         drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
8078         for (i = 0; i < 256; i++) {
8079                 intel_crtc->lut_r[i] = i;
8080                 intel_crtc->lut_g[i] = i;
8081                 intel_crtc->lut_b[i] = i;
8082         }
8083
8084         /* Swap pipes & planes for FBC on pre-965 */
8085         intel_crtc->pipe = pipe;
8086         intel_crtc->plane = pipe;
8087         intel_crtc->cpu_transcoder = pipe;
8088         if (IS_MOBILE(dev) && IS_GEN3(dev)) {
8089                 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
8090                 intel_crtc->plane = !pipe;
8091         }
8092
8093         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
8094                dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
8095         dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
8096         dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
8097
8098         intel_crtc->bpp = 24; /* default for pre-Ironlake */
8099
8100         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
8101 }
8102
8103 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
8104                                 struct drm_file *file)
8105 {
8106         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
8107         struct drm_mode_object *drmmode_obj;
8108         struct intel_crtc *crtc;
8109
8110         if (!drm_core_check_feature(dev, DRIVER_MODESET))
8111                 return -ENODEV;
8112
8113         drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
8114                         DRM_MODE_OBJECT_CRTC);
8115
8116         if (!drmmode_obj) {
8117                 DRM_ERROR("no such CRTC id\n");
8118                 return -EINVAL;
8119         }
8120
8121         crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
8122         pipe_from_crtc_id->pipe = crtc->pipe;
8123
8124         return 0;
8125 }
8126
8127 static int intel_encoder_clones(struct intel_encoder *encoder)
8128 {
8129         struct drm_device *dev = encoder->base.dev;
8130         struct intel_encoder *source_encoder;
8131         int index_mask = 0;
8132         int entry = 0;
8133
8134         list_for_each_entry(source_encoder,
8135                             &dev->mode_config.encoder_list, base.head) {
8136
8137                 if (encoder == source_encoder)
8138                         index_mask |= (1 << entry);
8139
8140                 /* Intel hw has only one MUX where enocoders could be cloned. */
8141                 if (encoder->cloneable && source_encoder->cloneable)
8142                         index_mask |= (1 << entry);
8143
8144                 entry++;
8145         }
8146
8147         return index_mask;
8148 }
8149
8150 static bool has_edp_a(struct drm_device *dev)
8151 {
8152         struct drm_i915_private *dev_priv = dev->dev_private;
8153
8154         if (!IS_MOBILE(dev))
8155                 return false;
8156
8157         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
8158                 return false;
8159
8160         if (IS_GEN5(dev) &&
8161             (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
8162                 return false;
8163
8164         return true;
8165 }
8166
8167 static void intel_setup_outputs(struct drm_device *dev)
8168 {
8169         struct drm_i915_private *dev_priv = dev->dev_private;
8170         struct intel_encoder *encoder;
8171         bool dpd_is_edp = false;
8172         bool has_lvds;
8173
8174         has_lvds = intel_lvds_init(dev);
8175         if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
8176                 /* disable the panel fitter on everything but LVDS */
8177                 I915_WRITE(PFIT_CONTROL, 0);
8178         }
8179
8180         if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
8181                 intel_crt_init(dev);
8182
8183         if (HAS_DDI(dev)) {
8184                 int found;
8185
8186                 /* Haswell uses DDI functions to detect digital outputs */
8187                 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
8188                 /* DDI A only supports eDP */
8189                 if (found)
8190                         intel_ddi_init(dev, PORT_A);
8191
8192                 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
8193                  * register */
8194                 found = I915_READ(SFUSE_STRAP);
8195
8196                 if (found & SFUSE_STRAP_DDIB_DETECTED)
8197                         intel_ddi_init(dev, PORT_B);
8198                 if (found & SFUSE_STRAP_DDIC_DETECTED)
8199                         intel_ddi_init(dev, PORT_C);
8200                 if (found & SFUSE_STRAP_DDID_DETECTED)
8201                         intel_ddi_init(dev, PORT_D);
8202         } else if (HAS_PCH_SPLIT(dev)) {
8203                 int found;
8204                 dpd_is_edp = intel_dpd_is_edp(dev);
8205
8206                 if (has_edp_a(dev))
8207                         intel_dp_init(dev, DP_A, PORT_A);
8208
8209                 if (I915_READ(HDMIB) & PORT_DETECTED) {
8210                         /* PCH SDVOB multiplex with HDMIB */
8211                         found = intel_sdvo_init(dev, PCH_SDVOB, true);
8212                         if (!found)
8213                                 intel_hdmi_init(dev, HDMIB, PORT_B);
8214                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
8215                                 intel_dp_init(dev, PCH_DP_B, PORT_B);
8216                 }
8217
8218                 if (I915_READ(HDMIC) & PORT_DETECTED)
8219                         intel_hdmi_init(dev, HDMIC, PORT_C);
8220
8221                 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
8222                         intel_hdmi_init(dev, HDMID, PORT_D);
8223
8224                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
8225                         intel_dp_init(dev, PCH_DP_C, PORT_C);
8226
8227                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
8228                         intel_dp_init(dev, PCH_DP_D, PORT_D);
8229         } else if (IS_VALLEYVIEW(dev)) {
8230                 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
8231                 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
8232                         intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
8233
8234                 if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) {
8235                         intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B);
8236                         if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
8237                                 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
8238                 }
8239
8240                 if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED)
8241                         intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C);
8242
8243         } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
8244                 bool found = false;
8245
8246                 if (I915_READ(SDVOB) & SDVO_DETECTED) {
8247                         DRM_DEBUG_KMS("probing SDVOB\n");
8248                         found = intel_sdvo_init(dev, SDVOB, true);
8249                         if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
8250                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
8251                                 intel_hdmi_init(dev, SDVOB, PORT_B);
8252                         }
8253
8254                         if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
8255                                 DRM_DEBUG_KMS("probing DP_B\n");
8256                                 intel_dp_init(dev, DP_B, PORT_B);
8257                         }
8258                 }
8259
8260                 /* Before G4X SDVOC doesn't have its own detect register */
8261
8262                 if (I915_READ(SDVOB) & SDVO_DETECTED) {
8263                         DRM_DEBUG_KMS("probing SDVOC\n");
8264                         found = intel_sdvo_init(dev, SDVOC, false);
8265                 }
8266
8267                 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
8268
8269                         if (SUPPORTS_INTEGRATED_HDMI(dev)) {
8270                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
8271                                 intel_hdmi_init(dev, SDVOC, PORT_C);
8272                         }
8273                         if (SUPPORTS_INTEGRATED_DP(dev)) {
8274                                 DRM_DEBUG_KMS("probing DP_C\n");
8275                                 intel_dp_init(dev, DP_C, PORT_C);
8276                         }
8277                 }
8278
8279                 if (SUPPORTS_INTEGRATED_DP(dev) &&
8280                     (I915_READ(DP_D) & DP_DETECTED)) {
8281                         DRM_DEBUG_KMS("probing DP_D\n");
8282                         intel_dp_init(dev, DP_D, PORT_D);
8283                 }
8284         } else if (IS_GEN2(dev))
8285                 intel_dvo_init(dev);
8286
8287         if (SUPPORTS_TV(dev))
8288                 intel_tv_init(dev);
8289
8290         list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
8291                 encoder->base.possible_crtcs = encoder->crtc_mask;
8292                 encoder->base.possible_clones =
8293                         intel_encoder_clones(encoder);
8294         }
8295
8296         intel_init_pch_refclk(dev);
8297
8298         drm_helper_move_panel_connectors_to_head(dev);
8299 }
8300
8301 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
8302 {
8303         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
8304
8305         drm_framebuffer_cleanup(fb);
8306         drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
8307
8308         kfree(intel_fb);
8309 }
8310
8311 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
8312                                                 struct drm_file *file,
8313                                                 unsigned int *handle)
8314 {
8315         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
8316         struct drm_i915_gem_object *obj = intel_fb->obj;
8317
8318         return drm_gem_handle_create(file, &obj->base, handle);
8319 }
8320
8321 static const struct drm_framebuffer_funcs intel_fb_funcs = {
8322         .destroy = intel_user_framebuffer_destroy,
8323         .create_handle = intel_user_framebuffer_create_handle,
8324 };
8325
8326 int intel_framebuffer_init(struct drm_device *dev,
8327                            struct intel_framebuffer *intel_fb,
8328                            struct drm_mode_fb_cmd2 *mode_cmd,
8329                            struct drm_i915_gem_object *obj)
8330 {
8331         int ret;
8332
8333         if (obj->tiling_mode == I915_TILING_Y)
8334                 return -EINVAL;
8335
8336         if (mode_cmd->pitches[0] & 63)
8337                 return -EINVAL;
8338
8339         /* FIXME <= Gen4 stride limits are bit unclear */
8340         if (mode_cmd->pitches[0] > 32768)
8341                 return -EINVAL;
8342
8343         if (obj->tiling_mode != I915_TILING_NONE &&
8344             mode_cmd->pitches[0] != obj->stride)
8345                 return -EINVAL;
8346
8347         /* Reject formats not supported by any plane early. */
8348         switch (mode_cmd->pixel_format) {
8349         case DRM_FORMAT_C8:
8350         case DRM_FORMAT_RGB565:
8351         case DRM_FORMAT_XRGB8888:
8352         case DRM_FORMAT_ARGB8888:
8353                 break;
8354         case DRM_FORMAT_XRGB1555:
8355         case DRM_FORMAT_ARGB1555:
8356                 if (INTEL_INFO(dev)->gen > 3)
8357                         return -EINVAL;
8358                 break;
8359         case DRM_FORMAT_XBGR8888:
8360         case DRM_FORMAT_ABGR8888:
8361         case DRM_FORMAT_XRGB2101010:
8362         case DRM_FORMAT_ARGB2101010:
8363         case DRM_FORMAT_XBGR2101010:
8364         case DRM_FORMAT_ABGR2101010:
8365                 if (INTEL_INFO(dev)->gen < 4)
8366                         return -EINVAL;
8367                 break;
8368         case DRM_FORMAT_YUYV:
8369         case DRM_FORMAT_UYVY:
8370         case DRM_FORMAT_YVYU:
8371         case DRM_FORMAT_VYUY:
8372                 if (INTEL_INFO(dev)->gen < 6)
8373                         return -EINVAL;
8374                 break;
8375         default:
8376                 DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
8377                 return -EINVAL;
8378         }
8379
8380         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
8381         if (mode_cmd->offsets[0] != 0)
8382                 return -EINVAL;
8383
8384         ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8385         if (ret) {
8386                 DRM_ERROR("framebuffer init failed %d\n", ret);
8387                 return ret;
8388         }
8389
8390         drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
8391         intel_fb->obj = obj;
8392         return 0;
8393 }
8394
8395 static struct drm_framebuffer *
8396 intel_user_framebuffer_create(struct drm_device *dev,
8397                               struct drm_file *filp,
8398                               struct drm_mode_fb_cmd2 *mode_cmd)
8399 {
8400         struct drm_i915_gem_object *obj;
8401
8402         obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
8403                                                 mode_cmd->handles[0]));
8404         if (&obj->base == NULL)
8405                 return ERR_PTR(-ENOENT);
8406
8407         return intel_framebuffer_create(dev, mode_cmd, obj);
8408 }
8409
8410 static const struct drm_mode_config_funcs intel_mode_funcs = {
8411         .fb_create = intel_user_framebuffer_create,
8412         .output_poll_changed = intel_fb_output_poll_changed,
8413 };
8414
8415 /* Set up chip specific display functions */
8416 static void intel_init_display(struct drm_device *dev)
8417 {
8418         struct drm_i915_private *dev_priv = dev->dev_private;
8419
8420         /* We always want a DPMS function */
8421         if (HAS_DDI(dev)) {
8422                 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
8423                 dev_priv->display.crtc_enable = haswell_crtc_enable;
8424                 dev_priv->display.crtc_disable = haswell_crtc_disable;
8425                 dev_priv->display.off = haswell_crtc_off;
8426                 dev_priv->display.update_plane = ironlake_update_plane;
8427         } else if (HAS_PCH_SPLIT(dev)) {
8428                 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
8429                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
8430                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
8431                 dev_priv->display.off = ironlake_crtc_off;
8432                 dev_priv->display.update_plane = ironlake_update_plane;
8433         } else {
8434                 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
8435                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
8436                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
8437                 dev_priv->display.off = i9xx_crtc_off;
8438                 dev_priv->display.update_plane = i9xx_update_plane;
8439         }
8440
8441         /* Returns the core display clock speed */
8442         if (IS_VALLEYVIEW(dev))
8443                 dev_priv->display.get_display_clock_speed =
8444                         valleyview_get_display_clock_speed;
8445         else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
8446                 dev_priv->display.get_display_clock_speed =
8447                         i945_get_display_clock_speed;
8448         else if (IS_I915G(dev))
8449                 dev_priv->display.get_display_clock_speed =
8450                         i915_get_display_clock_speed;
8451         else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
8452                 dev_priv->display.get_display_clock_speed =
8453                         i9xx_misc_get_display_clock_speed;
8454         else if (IS_I915GM(dev))
8455                 dev_priv->display.get_display_clock_speed =
8456                         i915gm_get_display_clock_speed;
8457         else if (IS_I865G(dev))
8458                 dev_priv->display.get_display_clock_speed =
8459                         i865_get_display_clock_speed;
8460         else if (IS_I85X(dev))
8461                 dev_priv->display.get_display_clock_speed =
8462                         i855_get_display_clock_speed;
8463         else /* 852, 830 */
8464                 dev_priv->display.get_display_clock_speed =
8465                         i830_get_display_clock_speed;
8466
8467         if (HAS_PCH_SPLIT(dev)) {
8468                 if (IS_GEN5(dev)) {
8469                         dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
8470                         dev_priv->display.write_eld = ironlake_write_eld;
8471                 } else if (IS_GEN6(dev)) {
8472                         dev_priv->display.fdi_link_train = gen6_fdi_link_train;
8473                         dev_priv->display.write_eld = ironlake_write_eld;
8474                 } else if (IS_IVYBRIDGE(dev)) {
8475                         /* FIXME: detect B0+ stepping and use auto training */
8476                         dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
8477                         dev_priv->display.write_eld = ironlake_write_eld;
8478                         dev_priv->display.modeset_global_resources =
8479                                 ivb_modeset_global_resources;
8480                 } else if (IS_HASWELL(dev)) {
8481                         dev_priv->display.fdi_link_train = hsw_fdi_link_train;
8482                         dev_priv->display.write_eld = haswell_write_eld;
8483                 }
8484         } else if (IS_G4X(dev)) {
8485                 dev_priv->display.write_eld = g4x_write_eld;
8486         }
8487
8488         /* Default just returns -ENODEV to indicate unsupported */
8489         dev_priv->display.queue_flip = intel_default_queue_flip;
8490
8491         switch (INTEL_INFO(dev)->gen) {
8492         case 2:
8493                 dev_priv->display.queue_flip = intel_gen2_queue_flip;
8494                 break;
8495
8496         case 3:
8497                 dev_priv->display.queue_flip = intel_gen3_queue_flip;
8498                 break;
8499
8500         case 4:
8501         case 5:
8502                 dev_priv->display.queue_flip = intel_gen4_queue_flip;
8503                 break;
8504
8505         case 6:
8506                 dev_priv->display.queue_flip = intel_gen6_queue_flip;
8507                 break;
8508         case 7:
8509                 dev_priv->display.queue_flip = intel_gen7_queue_flip;
8510                 break;
8511         }
8512 }
8513
8514 /*
8515  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
8516  * resume, or other times.  This quirk makes sure that's the case for
8517  * affected systems.
8518  */
8519 static void quirk_pipea_force(struct drm_device *dev)
8520 {
8521         struct drm_i915_private *dev_priv = dev->dev_private;
8522
8523         dev_priv->quirks |= QUIRK_PIPEA_FORCE;
8524         DRM_INFO("applying pipe a force quirk\n");
8525 }
8526
8527 /*
8528  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
8529  */
8530 static void quirk_ssc_force_disable(struct drm_device *dev)
8531 {
8532         struct drm_i915_private *dev_priv = dev->dev_private;
8533         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
8534         DRM_INFO("applying lvds SSC disable quirk\n");
8535 }
8536
8537 /*
8538  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
8539  * brightness value
8540  */
8541 static void quirk_invert_brightness(struct drm_device *dev)
8542 {
8543         struct drm_i915_private *dev_priv = dev->dev_private;
8544         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
8545         DRM_INFO("applying inverted panel brightness quirk\n");
8546 }
8547
8548 struct intel_quirk {
8549         int device;
8550         int subsystem_vendor;
8551         int subsystem_device;
8552         void (*hook)(struct drm_device *dev);
8553 };
8554
8555 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
8556 struct intel_dmi_quirk {
8557         void (*hook)(struct drm_device *dev);
8558         const struct dmi_system_id (*dmi_id_list)[];
8559 };
8560
8561 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
8562 {
8563         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
8564         return 1;
8565 }
8566
8567 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
8568         {
8569                 .dmi_id_list = &(const struct dmi_system_id[]) {
8570                         {
8571                                 .callback = intel_dmi_reverse_brightness,
8572                                 .ident = "NCR Corporation",
8573                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
8574                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
8575                                 },
8576                         },
8577                         { }  /* terminating entry */
8578                 },
8579                 .hook = quirk_invert_brightness,
8580         },
8581 };
8582
8583 static struct intel_quirk intel_quirks[] = {
8584         /* HP Mini needs pipe A force quirk (LP: #322104) */
8585         { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
8586
8587         /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
8588         { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
8589
8590         /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
8591         { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
8592
8593         /* 830/845 need to leave pipe A & dpll A up */
8594         { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8595         { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
8596
8597         /* Lenovo U160 cannot use SSC on LVDS */
8598         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
8599
8600         /* Sony Vaio Y cannot use SSC on LVDS */
8601         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
8602
8603         /* Acer Aspire 5734Z must invert backlight brightness */
8604         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
8605
8606         /* Acer/eMachines G725 */
8607         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
8608
8609         /* Acer/eMachines e725 */
8610         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
8611
8612         /* Acer/Packard Bell NCL20 */
8613         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
8614 };
8615
8616 static void intel_init_quirks(struct drm_device *dev)
8617 {
8618         struct pci_dev *d = dev->pdev;
8619         int i;
8620
8621         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
8622                 struct intel_quirk *q = &intel_quirks[i];
8623
8624                 if (d->device == q->device &&
8625                     (d->subsystem_vendor == q->subsystem_vendor ||
8626                      q->subsystem_vendor == PCI_ANY_ID) &&
8627                     (d->subsystem_device == q->subsystem_device ||
8628                      q->subsystem_device == PCI_ANY_ID))
8629                         q->hook(dev);
8630         }
8631         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
8632                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
8633                         intel_dmi_quirks[i].hook(dev);
8634         }
8635 }
8636
8637 /* Disable the VGA plane that we never use */
8638 static void i915_disable_vga(struct drm_device *dev)
8639 {
8640         struct drm_i915_private *dev_priv = dev->dev_private;
8641         u8 sr1;
8642         u32 vga_reg;
8643
8644         if (HAS_PCH_SPLIT(dev))
8645                 vga_reg = CPU_VGACNTRL;
8646         else
8647                 vga_reg = VGACNTRL;
8648
8649         vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
8650         outb(SR01, VGA_SR_INDEX);
8651         sr1 = inb(VGA_SR_DATA);
8652         outb(sr1 | 1<<5, VGA_SR_DATA);
8653         vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
8654         udelay(300);
8655
8656         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
8657         POSTING_READ(vga_reg);
8658 }
8659
8660 void intel_modeset_init_hw(struct drm_device *dev)
8661 {
8662         intel_init_power_well(dev);
8663
8664         intel_prepare_ddi(dev);
8665
8666         intel_init_clock_gating(dev);
8667
8668         mutex_lock(&dev->struct_mutex);
8669         intel_enable_gt_powersave(dev);
8670         mutex_unlock(&dev->struct_mutex);
8671 }
8672
8673 void intel_modeset_init(struct drm_device *dev)
8674 {
8675         struct drm_i915_private *dev_priv = dev->dev_private;
8676         int i, ret;
8677
8678         drm_mode_config_init(dev);
8679
8680         dev->mode_config.min_width = 0;
8681         dev->mode_config.min_height = 0;
8682
8683         dev->mode_config.preferred_depth = 24;
8684         dev->mode_config.prefer_shadow = 1;
8685
8686         dev->mode_config.funcs = &intel_mode_funcs;
8687
8688         intel_init_quirks(dev);
8689
8690         intel_init_pm(dev);
8691
8692         intel_init_display(dev);
8693
8694         if (IS_GEN2(dev)) {
8695                 dev->mode_config.max_width = 2048;
8696                 dev->mode_config.max_height = 2048;
8697         } else if (IS_GEN3(dev)) {
8698                 dev->mode_config.max_width = 4096;
8699                 dev->mode_config.max_height = 4096;
8700         } else {
8701                 dev->mode_config.max_width = 8192;
8702                 dev->mode_config.max_height = 8192;
8703         }
8704         dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
8705
8706         DRM_DEBUG_KMS("%d display pipe%s available.\n",
8707                       dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
8708
8709         for (i = 0; i < dev_priv->num_pipe; i++) {
8710                 intel_crtc_init(dev, i);
8711                 ret = intel_plane_init(dev, i);
8712                 if (ret)
8713                         DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
8714         }
8715
8716         intel_cpu_pll_init(dev);
8717         intel_pch_pll_init(dev);
8718
8719         /* Just disable it once at startup */
8720         i915_disable_vga(dev);
8721         intel_setup_outputs(dev);
8722
8723         /* Just in case the BIOS is doing something questionable. */
8724         intel_disable_fbc(dev);
8725 }
8726
8727 static void
8728 intel_connector_break_all_links(struct intel_connector *connector)
8729 {
8730         connector->base.dpms = DRM_MODE_DPMS_OFF;
8731         connector->base.encoder = NULL;
8732         connector->encoder->connectors_active = false;
8733         connector->encoder->base.crtc = NULL;
8734 }
8735
8736 static void intel_enable_pipe_a(struct drm_device *dev)
8737 {
8738         struct intel_connector *connector;
8739         struct drm_connector *crt = NULL;
8740         struct intel_load_detect_pipe load_detect_temp;
8741
8742         /* We can't just switch on the pipe A, we need to set things up with a
8743          * proper mode and output configuration. As a gross hack, enable pipe A
8744          * by enabling the load detect pipe once. */
8745         list_for_each_entry(connector,
8746                             &dev->mode_config.connector_list,
8747                             base.head) {
8748                 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
8749                         crt = &connector->base;
8750                         break;
8751                 }
8752         }
8753
8754         if (!crt)
8755                 return;
8756
8757         if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
8758                 intel_release_load_detect_pipe(crt, &load_detect_temp);
8759
8760
8761 }
8762
8763 static bool
8764 intel_check_plane_mapping(struct intel_crtc *crtc)
8765 {
8766         struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
8767         u32 reg, val;
8768
8769         if (dev_priv->num_pipe == 1)
8770                 return true;
8771
8772         reg = DSPCNTR(!crtc->plane);
8773         val = I915_READ(reg);
8774
8775         if ((val & DISPLAY_PLANE_ENABLE) &&
8776             (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
8777                 return false;
8778
8779         return true;
8780 }
8781
8782 static void intel_sanitize_crtc(struct intel_crtc *crtc)
8783 {
8784         struct drm_device *dev = crtc->base.dev;
8785         struct drm_i915_private *dev_priv = dev->dev_private;
8786         u32 reg;
8787
8788         /* Clear any frame start delays used for debugging left by the BIOS */
8789         reg = PIPECONF(crtc->cpu_transcoder);
8790         I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
8791
8792         /* We need to sanitize the plane -> pipe mapping first because this will
8793          * disable the crtc (and hence change the state) if it is wrong. Note
8794          * that gen4+ has a fixed plane -> pipe mapping.  */
8795         if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
8796                 struct intel_connector *connector;
8797                 bool plane;
8798
8799                 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
8800                               crtc->base.base.id);
8801
8802                 /* Pipe has the wrong plane attached and the plane is active.
8803                  * Temporarily change the plane mapping and disable everything
8804                  * ...  */
8805                 plane = crtc->plane;
8806                 crtc->plane = !plane;
8807                 dev_priv->display.crtc_disable(&crtc->base);
8808                 crtc->plane = plane;
8809
8810                 /* ... and break all links. */
8811                 list_for_each_entry(connector, &dev->mode_config.connector_list,
8812                                     base.head) {
8813                         if (connector->encoder->base.crtc != &crtc->base)
8814                                 continue;
8815
8816                         intel_connector_break_all_links(connector);
8817                 }
8818
8819                 WARN_ON(crtc->active);
8820                 crtc->base.enabled = false;
8821         }
8822
8823         if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
8824             crtc->pipe == PIPE_A && !crtc->active) {
8825                 /* BIOS forgot to enable pipe A, this mostly happens after
8826                  * resume. Force-enable the pipe to fix this, the update_dpms
8827                  * call below we restore the pipe to the right state, but leave
8828                  * the required bits on. */
8829                 intel_enable_pipe_a(dev);
8830         }
8831
8832         /* Adjust the state of the output pipe according to whether we
8833          * have active connectors/encoders. */
8834         intel_crtc_update_dpms(&crtc->base);
8835
8836         if (crtc->active != crtc->base.enabled) {
8837                 struct intel_encoder *encoder;
8838
8839                 /* This can happen either due to bugs in the get_hw_state
8840                  * functions or because the pipe is force-enabled due to the
8841                  * pipe A quirk. */
8842                 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
8843                               crtc->base.base.id,
8844                               crtc->base.enabled ? "enabled" : "disabled",
8845                               crtc->active ? "enabled" : "disabled");
8846
8847                 crtc->base.enabled = crtc->active;
8848
8849                 /* Because we only establish the connector -> encoder ->
8850                  * crtc links if something is active, this means the
8851                  * crtc is now deactivated. Break the links. connector
8852                  * -> encoder links are only establish when things are
8853                  *  actually up, hence no need to break them. */
8854                 WARN_ON(crtc->active);
8855
8856                 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
8857                         WARN_ON(encoder->connectors_active);
8858                         encoder->base.crtc = NULL;
8859                 }
8860         }
8861 }
8862
8863 static void intel_sanitize_encoder(struct intel_encoder *encoder)
8864 {
8865         struct intel_connector *connector;
8866         struct drm_device *dev = encoder->base.dev;
8867
8868         /* We need to check both for a crtc link (meaning that the
8869          * encoder is active and trying to read from a pipe) and the
8870          * pipe itself being active. */
8871         bool has_active_crtc = encoder->base.crtc &&
8872                 to_intel_crtc(encoder->base.crtc)->active;
8873
8874         if (encoder->connectors_active && !has_active_crtc) {
8875                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
8876                               encoder->base.base.id,
8877                               drm_get_encoder_name(&encoder->base));
8878
8879                 /* Connector is active, but has no active pipe. This is
8880                  * fallout from our resume register restoring. Disable
8881                  * the encoder manually again. */
8882                 if (encoder->base.crtc) {
8883                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
8884                                       encoder->base.base.id,
8885                                       drm_get_encoder_name(&encoder->base));
8886                         encoder->disable(encoder);
8887                 }
8888
8889                 /* Inconsistent output/port/pipe state happens presumably due to
8890                  * a bug in one of the get_hw_state functions. Or someplace else
8891                  * in our code, like the register restore mess on resume. Clamp
8892                  * things to off as a safer default. */
8893                 list_for_each_entry(connector,
8894                                     &dev->mode_config.connector_list,
8895                                     base.head) {
8896                         if (connector->encoder != encoder)
8897                                 continue;
8898
8899                         intel_connector_break_all_links(connector);
8900                 }
8901         }
8902         /* Enabled encoders without active connectors will be fixed in
8903          * the crtc fixup. */
8904 }
8905
8906 static void i915_redisable_vga(struct drm_device *dev)
8907 {
8908         struct drm_i915_private *dev_priv = dev->dev_private;
8909         u32 vga_reg;
8910
8911         if (HAS_PCH_SPLIT(dev))
8912                 vga_reg = CPU_VGACNTRL;
8913         else
8914                 vga_reg = VGACNTRL;
8915
8916         if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
8917                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
8918                 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
8919                 POSTING_READ(vga_reg);
8920         }
8921 }
8922
8923 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
8924  * and i915 state tracking structures. */
8925 void intel_modeset_setup_hw_state(struct drm_device *dev,
8926                                   bool force_restore)
8927 {
8928         struct drm_i915_private *dev_priv = dev->dev_private;
8929         enum pipe pipe;
8930         u32 tmp;
8931         struct intel_crtc *crtc;
8932         struct intel_encoder *encoder;
8933         struct intel_connector *connector;
8934
8935         if (HAS_DDI(dev)) {
8936                 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
8937
8938                 if (tmp & TRANS_DDI_FUNC_ENABLE) {
8939                         switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
8940                         case TRANS_DDI_EDP_INPUT_A_ON:
8941                         case TRANS_DDI_EDP_INPUT_A_ONOFF:
8942                                 pipe = PIPE_A;
8943                                 break;
8944                         case TRANS_DDI_EDP_INPUT_B_ONOFF:
8945                                 pipe = PIPE_B;
8946                                 break;
8947                         case TRANS_DDI_EDP_INPUT_C_ONOFF:
8948                                 pipe = PIPE_C;
8949                                 break;
8950                         }
8951
8952                         crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8953                         crtc->cpu_transcoder = TRANSCODER_EDP;
8954
8955                         DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
8956                                       pipe_name(pipe));
8957                 }
8958         }
8959
8960         for_each_pipe(pipe) {
8961                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8962
8963                 tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
8964                 if (tmp & PIPECONF_ENABLE)
8965                         crtc->active = true;
8966                 else
8967                         crtc->active = false;
8968
8969                 crtc->base.enabled = crtc->active;
8970
8971                 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
8972                               crtc->base.base.id,
8973                               crtc->active ? "enabled" : "disabled");
8974         }
8975
8976         if (HAS_DDI(dev))
8977                 intel_ddi_setup_hw_pll_state(dev);
8978
8979         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8980                             base.head) {
8981                 pipe = 0;
8982
8983                 if (encoder->get_hw_state(encoder, &pipe)) {
8984                         encoder->base.crtc =
8985                                 dev_priv->pipe_to_crtc_mapping[pipe];
8986                 } else {
8987                         encoder->base.crtc = NULL;
8988                 }
8989
8990                 encoder->connectors_active = false;
8991                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
8992                               encoder->base.base.id,
8993                               drm_get_encoder_name(&encoder->base),
8994                               encoder->base.crtc ? "enabled" : "disabled",
8995                               pipe);
8996         }
8997
8998         list_for_each_entry(connector, &dev->mode_config.connector_list,
8999                             base.head) {
9000                 if (connector->get_hw_state(connector)) {
9001                         connector->base.dpms = DRM_MODE_DPMS_ON;
9002                         connector->encoder->connectors_active = true;
9003                         connector->base.encoder = &connector->encoder->base;
9004                 } else {
9005                         connector->base.dpms = DRM_MODE_DPMS_OFF;
9006                         connector->base.encoder = NULL;
9007                 }
9008                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
9009                               connector->base.base.id,
9010                               drm_get_connector_name(&connector->base),
9011                               connector->base.encoder ? "enabled" : "disabled");
9012         }
9013
9014         /* HW state is read out, now we need to sanitize this mess. */
9015         list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9016                             base.head) {
9017                 intel_sanitize_encoder(encoder);
9018         }
9019
9020         for_each_pipe(pipe) {
9021                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9022                 intel_sanitize_crtc(crtc);
9023         }
9024
9025         if (force_restore) {
9026                 for_each_pipe(pipe) {
9027                         intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
9028                 }
9029
9030                 i915_redisable_vga(dev);
9031         } else {
9032                 intel_modeset_update_staged_output_state(dev);
9033         }
9034
9035         intel_modeset_check_state(dev);
9036
9037         drm_mode_config_reset(dev);
9038 }
9039
9040 void intel_modeset_gem_init(struct drm_device *dev)
9041 {
9042         intel_modeset_init_hw(dev);
9043
9044         intel_setup_overlay(dev);
9045
9046         intel_modeset_setup_hw_state(dev, false);
9047 }
9048
9049 void intel_modeset_cleanup(struct drm_device *dev)
9050 {
9051         struct drm_i915_private *dev_priv = dev->dev_private;
9052         struct drm_crtc *crtc;
9053         struct intel_crtc *intel_crtc;
9054
9055         drm_kms_helper_poll_fini(dev);
9056         mutex_lock(&dev->struct_mutex);
9057
9058         intel_unregister_dsm_handler();
9059
9060
9061         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9062                 /* Skip inactive CRTCs */
9063                 if (!crtc->fb)
9064                         continue;
9065
9066                 intel_crtc = to_intel_crtc(crtc);
9067                 intel_increase_pllclock(crtc);
9068         }
9069
9070         intel_disable_fbc(dev);
9071
9072         intel_disable_gt_powersave(dev);
9073
9074         ironlake_teardown_rc6(dev);
9075
9076         if (IS_VALLEYVIEW(dev))
9077                 vlv_init_dpio(dev);
9078
9079         mutex_unlock(&dev->struct_mutex);
9080
9081         /* Disable the irq before mode object teardown, for the irq might
9082          * enqueue unpin/hotplug work. */
9083         drm_irq_uninstall(dev);
9084         cancel_work_sync(&dev_priv->hotplug_work);
9085         cancel_work_sync(&dev_priv->rps.work);
9086
9087         /* flush any delayed tasks or pending work */
9088         flush_scheduled_work();
9089
9090         drm_mode_config_cleanup(dev);
9091
9092         intel_cleanup_overlay(dev);
9093 }
9094
9095 /*
9096  * Return which encoder is currently attached for connector.
9097  */
9098 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
9099 {
9100         return &intel_attached_encoder(connector)->base;
9101 }
9102
9103 void intel_connector_attach_encoder(struct intel_connector *connector,
9104                                     struct intel_encoder *encoder)
9105 {
9106         connector->encoder = encoder;
9107         drm_mode_connector_attach_encoder(&connector->base,
9108                                           &encoder->base);
9109 }
9110
9111 /*
9112  * set vga decode state - true == enable VGA decode
9113  */
9114 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
9115 {
9116         struct drm_i915_private *dev_priv = dev->dev_private;
9117         u16 gmch_ctrl;
9118
9119         pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
9120         if (state)
9121                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
9122         else
9123                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
9124         pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
9125         return 0;
9126 }
9127
9128 #ifdef CONFIG_DEBUG_FS
9129 #include <linux/seq_file.h>
9130
9131 struct intel_display_error_state {
9132         struct intel_cursor_error_state {
9133                 u32 control;
9134                 u32 position;
9135                 u32 base;
9136                 u32 size;
9137         } cursor[I915_MAX_PIPES];
9138
9139         struct intel_pipe_error_state {
9140                 u32 conf;
9141                 u32 source;
9142
9143                 u32 htotal;
9144                 u32 hblank;
9145                 u32 hsync;
9146                 u32 vtotal;
9147                 u32 vblank;
9148                 u32 vsync;
9149         } pipe[I915_MAX_PIPES];
9150
9151         struct intel_plane_error_state {
9152                 u32 control;
9153                 u32 stride;
9154                 u32 size;
9155                 u32 pos;
9156                 u32 addr;
9157                 u32 surface;
9158                 u32 tile_offset;
9159         } plane[I915_MAX_PIPES];
9160 };
9161
9162 struct intel_display_error_state *
9163 intel_display_capture_error_state(struct drm_device *dev)
9164 {
9165         drm_i915_private_t *dev_priv = dev->dev_private;
9166         struct intel_display_error_state *error;
9167         enum transcoder cpu_transcoder;
9168         int i;
9169
9170         error = kmalloc(sizeof(*error), GFP_ATOMIC);
9171         if (error == NULL)
9172                 return NULL;
9173
9174         for_each_pipe(i) {
9175                 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
9176
9177                 error->cursor[i].control = I915_READ(CURCNTR(i));
9178                 error->cursor[i].position = I915_READ(CURPOS(i));
9179                 error->cursor[i].base = I915_READ(CURBASE(i));
9180
9181                 error->plane[i].control = I915_READ(DSPCNTR(i));
9182                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
9183                 error->plane[i].size = I915_READ(DSPSIZE(i));
9184                 error->plane[i].pos = I915_READ(DSPPOS(i));
9185                 error->plane[i].addr = I915_READ(DSPADDR(i));
9186                 if (INTEL_INFO(dev)->gen >= 4) {
9187                         error->plane[i].surface = I915_READ(DSPSURF(i));
9188                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
9189                 }
9190
9191                 error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
9192                 error->pipe[i].source = I915_READ(PIPESRC(i));
9193                 error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
9194                 error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
9195                 error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
9196                 error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
9197                 error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
9198                 error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
9199         }
9200
9201         return error;
9202 }
9203
9204 void
9205 intel_display_print_error_state(struct seq_file *m,
9206                                 struct drm_device *dev,
9207                                 struct intel_display_error_state *error)
9208 {
9209         drm_i915_private_t *dev_priv = dev->dev_private;
9210         int i;
9211
9212         seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
9213         for_each_pipe(i) {
9214                 seq_printf(m, "Pipe [%d]:\n", i);
9215                 seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
9216                 seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
9217                 seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
9218                 seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
9219                 seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
9220                 seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
9221                 seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
9222                 seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
9223
9224                 seq_printf(m, "Plane [%d]:\n", i);
9225                 seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
9226                 seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
9227                 seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
9228                 seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
9229                 seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
9230                 if (INTEL_INFO(dev)->gen >= 4) {
9231                         seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
9232                         seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
9233                 }
9234
9235                 seq_printf(m, "Cursor [%d]:\n", i);
9236                 seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
9237                 seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
9238                 seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
9239         }
9240 }
9241 #endif