]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/gpu/drm/i915/i915_irq.c
drm/i915: Jump to at least RPe on VLV when increasing the GPU frequency
[linux-imx.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <drm/drmP.h>
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "i915_trace.h"
37 #include "intel_drv.h"
38
39 static const u32 hpd_ibx[] = {
40         [HPD_CRT] = SDE_CRT_HOTPLUG,
41         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45 };
46
47 static const u32 hpd_cpt[] = {
48         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
49         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53 };
54
55 static const u32 hpd_mask_i915[] = {
56         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62 };
63
64 static const u32 hpd_status_gen4[] = {
65         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71 };
72
73 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81
82 static void ibx_hpd_irq_setup(struct drm_device *dev);
83 static void i915_hpd_irq_setup(struct drm_device *dev);
84
85 /* For display hotplug interrupt */
86 static void
87 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
88 {
89         if ((dev_priv->irq_mask & mask) != 0) {
90                 dev_priv->irq_mask &= ~mask;
91                 I915_WRITE(DEIMR, dev_priv->irq_mask);
92                 POSTING_READ(DEIMR);
93         }
94 }
95
96 static void
97 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
98 {
99         if ((dev_priv->irq_mask & mask) != mask) {
100                 dev_priv->irq_mask |= mask;
101                 I915_WRITE(DEIMR, dev_priv->irq_mask);
102                 POSTING_READ(DEIMR);
103         }
104 }
105
106 static bool ivb_can_enable_err_int(struct drm_device *dev)
107 {
108         struct drm_i915_private *dev_priv = dev->dev_private;
109         struct intel_crtc *crtc;
110         enum pipe pipe;
111
112         for_each_pipe(pipe) {
113                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
114
115                 if (crtc->cpu_fifo_underrun_disabled)
116                         return false;
117         }
118
119         return true;
120 }
121
122 static bool cpt_can_enable_serr_int(struct drm_device *dev)
123 {
124         struct drm_i915_private *dev_priv = dev->dev_private;
125         enum pipe pipe;
126         struct intel_crtc *crtc;
127
128         for_each_pipe(pipe) {
129                 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
130
131                 if (crtc->pch_fifo_underrun_disabled)
132                         return false;
133         }
134
135         return true;
136 }
137
138 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
139                                                  enum pipe pipe, bool enable)
140 {
141         struct drm_i915_private *dev_priv = dev->dev_private;
142         uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
143                                           DE_PIPEB_FIFO_UNDERRUN;
144
145         if (enable)
146                 ironlake_enable_display_irq(dev_priv, bit);
147         else
148                 ironlake_disable_display_irq(dev_priv, bit);
149 }
150
151 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
152                                                   bool enable)
153 {
154         struct drm_i915_private *dev_priv = dev->dev_private;
155
156         if (enable) {
157                 if (!ivb_can_enable_err_int(dev))
158                         return;
159
160                 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
161                                          ERR_INT_FIFO_UNDERRUN_B |
162                                          ERR_INT_FIFO_UNDERRUN_C);
163
164                 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
165         } else {
166                 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
167         }
168 }
169
170 static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
171                                             bool enable)
172 {
173         struct drm_device *dev = crtc->base.dev;
174         struct drm_i915_private *dev_priv = dev->dev_private;
175         uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
176                                                 SDE_TRANSB_FIFO_UNDER;
177
178         if (enable)
179                 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
180         else
181                 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
182
183         POSTING_READ(SDEIMR);
184 }
185
186 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
187                                             enum transcoder pch_transcoder,
188                                             bool enable)
189 {
190         struct drm_i915_private *dev_priv = dev->dev_private;
191
192         if (enable) {
193                 if (!cpt_can_enable_serr_int(dev))
194                         return;
195
196                 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
197                                      SERR_INT_TRANS_B_FIFO_UNDERRUN |
198                                      SERR_INT_TRANS_C_FIFO_UNDERRUN);
199
200                 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
201         } else {
202                 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
203         }
204
205         POSTING_READ(SDEIMR);
206 }
207
208 /**
209  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
210  * @dev: drm device
211  * @pipe: pipe
212  * @enable: true if we want to report FIFO underrun errors, false otherwise
213  *
214  * This function makes us disable or enable CPU fifo underruns for a specific
215  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
216  * reporting for one pipe may also disable all the other CPU error interruts for
217  * the other pipes, due to the fact that there's just one interrupt mask/enable
218  * bit for all the pipes.
219  *
220  * Returns the previous state of underrun reporting.
221  */
222 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
223                                            enum pipe pipe, bool enable)
224 {
225         struct drm_i915_private *dev_priv = dev->dev_private;
226         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
227         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
228         unsigned long flags;
229         bool ret;
230
231         spin_lock_irqsave(&dev_priv->irq_lock, flags);
232
233         ret = !intel_crtc->cpu_fifo_underrun_disabled;
234
235         if (enable == ret)
236                 goto done;
237
238         intel_crtc->cpu_fifo_underrun_disabled = !enable;
239
240         if (IS_GEN5(dev) || IS_GEN6(dev))
241                 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
242         else if (IS_GEN7(dev))
243                 ivybridge_set_fifo_underrun_reporting(dev, enable);
244
245 done:
246         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
247         return ret;
248 }
249
250 /**
251  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
252  * @dev: drm device
253  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
254  * @enable: true if we want to report FIFO underrun errors, false otherwise
255  *
256  * This function makes us disable or enable PCH fifo underruns for a specific
257  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
258  * underrun reporting for one transcoder may also disable all the other PCH
259  * error interruts for the other transcoders, due to the fact that there's just
260  * one interrupt mask/enable bit for all the transcoders.
261  *
262  * Returns the previous state of underrun reporting.
263  */
264 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
265                                            enum transcoder pch_transcoder,
266                                            bool enable)
267 {
268         struct drm_i915_private *dev_priv = dev->dev_private;
269         enum pipe p;
270         struct drm_crtc *crtc;
271         struct intel_crtc *intel_crtc;
272         unsigned long flags;
273         bool ret;
274
275         if (HAS_PCH_LPT(dev)) {
276                 crtc = NULL;
277                 for_each_pipe(p) {
278                         struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
279                         if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
280                                 crtc = c;
281                                 break;
282                         }
283                 }
284                 if (!crtc) {
285                         DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
286                         return false;
287                 }
288         } else {
289                 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
290         }
291         intel_crtc = to_intel_crtc(crtc);
292
293         spin_lock_irqsave(&dev_priv->irq_lock, flags);
294
295         ret = !intel_crtc->pch_fifo_underrun_disabled;
296
297         if (enable == ret)
298                 goto done;
299
300         intel_crtc->pch_fifo_underrun_disabled = !enable;
301
302         if (HAS_PCH_IBX(dev))
303                 ibx_set_fifo_underrun_reporting(intel_crtc, enable);
304         else
305                 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
306
307 done:
308         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
309         return ret;
310 }
311
312
313 void
314 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
315 {
316         u32 reg = PIPESTAT(pipe);
317         u32 pipestat = I915_READ(reg) & 0x7fff0000;
318
319         if ((pipestat & mask) == mask)
320                 return;
321
322         /* Enable the interrupt, clear any pending status */
323         pipestat |= mask | (mask >> 16);
324         I915_WRITE(reg, pipestat);
325         POSTING_READ(reg);
326 }
327
328 void
329 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
330 {
331         u32 reg = PIPESTAT(pipe);
332         u32 pipestat = I915_READ(reg) & 0x7fff0000;
333
334         if ((pipestat & mask) == 0)
335                 return;
336
337         pipestat &= ~mask;
338         I915_WRITE(reg, pipestat);
339         POSTING_READ(reg);
340 }
341
342 /**
343  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
344  */
345 static void i915_enable_asle_pipestat(struct drm_device *dev)
346 {
347         drm_i915_private_t *dev_priv = dev->dev_private;
348         unsigned long irqflags;
349
350         if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
351                 return;
352
353         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
354
355         i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
356         if (INTEL_INFO(dev)->gen >= 4)
357                 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
358
359         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
360 }
361
362 /**
363  * i915_pipe_enabled - check if a pipe is enabled
364  * @dev: DRM device
365  * @pipe: pipe to check
366  *
367  * Reading certain registers when the pipe is disabled can hang the chip.
368  * Use this routine to make sure the PLL is running and the pipe is active
369  * before reading such registers if unsure.
370  */
371 static int
372 i915_pipe_enabled(struct drm_device *dev, int pipe)
373 {
374         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
375
376         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
377                 /* Locking is horribly broken here, but whatever. */
378                 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
379                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
380
381                 return intel_crtc->active;
382         } else {
383                 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
384         }
385 }
386
387 /* Called from drm generic code, passed a 'crtc', which
388  * we use as a pipe index
389  */
390 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
391 {
392         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
393         unsigned long high_frame;
394         unsigned long low_frame;
395         u32 high1, high2, low;
396
397         if (!i915_pipe_enabled(dev, pipe)) {
398                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
399                                 "pipe %c\n", pipe_name(pipe));
400                 return 0;
401         }
402
403         high_frame = PIPEFRAME(pipe);
404         low_frame = PIPEFRAMEPIXEL(pipe);
405
406         /*
407          * High & low register fields aren't synchronized, so make sure
408          * we get a low value that's stable across two reads of the high
409          * register.
410          */
411         do {
412                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
413                 low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
414                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
415         } while (high1 != high2);
416
417         high1 >>= PIPE_FRAME_HIGH_SHIFT;
418         low >>= PIPE_FRAME_LOW_SHIFT;
419         return (high1 << 8) | low;
420 }
421
422 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
423 {
424         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
425         int reg = PIPE_FRMCOUNT_GM45(pipe);
426
427         if (!i915_pipe_enabled(dev, pipe)) {
428                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
429                                  "pipe %c\n", pipe_name(pipe));
430                 return 0;
431         }
432
433         return I915_READ(reg);
434 }
435
436 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
437                              int *vpos, int *hpos)
438 {
439         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
440         u32 vbl = 0, position = 0;
441         int vbl_start, vbl_end, htotal, vtotal;
442         bool in_vbl = true;
443         int ret = 0;
444         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
445                                                                       pipe);
446
447         if (!i915_pipe_enabled(dev, pipe)) {
448                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
449                                  "pipe %c\n", pipe_name(pipe));
450                 return 0;
451         }
452
453         /* Get vtotal. */
454         vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
455
456         if (INTEL_INFO(dev)->gen >= 4) {
457                 /* No obvious pixelcount register. Only query vertical
458                  * scanout position from Display scan line register.
459                  */
460                 position = I915_READ(PIPEDSL(pipe));
461
462                 /* Decode into vertical scanout position. Don't have
463                  * horizontal scanout position.
464                  */
465                 *vpos = position & 0x1fff;
466                 *hpos = 0;
467         } else {
468                 /* Have access to pixelcount since start of frame.
469                  * We can split this into vertical and horizontal
470                  * scanout position.
471                  */
472                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
473
474                 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
475                 *vpos = position / htotal;
476                 *hpos = position - (*vpos * htotal);
477         }
478
479         /* Query vblank area. */
480         vbl = I915_READ(VBLANK(cpu_transcoder));
481
482         /* Test position against vblank region. */
483         vbl_start = vbl & 0x1fff;
484         vbl_end = (vbl >> 16) & 0x1fff;
485
486         if ((*vpos < vbl_start) || (*vpos > vbl_end))
487                 in_vbl = false;
488
489         /* Inside "upper part" of vblank area? Apply corrective offset: */
490         if (in_vbl && (*vpos >= vbl_start))
491                 *vpos = *vpos - vtotal;
492
493         /* Readouts valid? */
494         if (vbl > 0)
495                 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
496
497         /* In vblank? */
498         if (in_vbl)
499                 ret |= DRM_SCANOUTPOS_INVBL;
500
501         return ret;
502 }
503
504 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
505                               int *max_error,
506                               struct timeval *vblank_time,
507                               unsigned flags)
508 {
509         struct drm_crtc *crtc;
510
511         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
512                 DRM_ERROR("Invalid crtc %d\n", pipe);
513                 return -EINVAL;
514         }
515
516         /* Get drm_crtc to timestamp: */
517         crtc = intel_get_crtc_for_pipe(dev, pipe);
518         if (crtc == NULL) {
519                 DRM_ERROR("Invalid crtc %d\n", pipe);
520                 return -EINVAL;
521         }
522
523         if (!crtc->enabled) {
524                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
525                 return -EBUSY;
526         }
527
528         /* Helper routine in DRM core does all the work: */
529         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
530                                                      vblank_time, flags,
531                                                      crtc);
532 }
533
534 static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
535 {
536         enum drm_connector_status old_status;
537
538         WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
539         old_status = connector->status;
540
541         connector->status = connector->funcs->detect(connector, false);
542         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
543                       connector->base.id,
544                       drm_get_connector_name(connector),
545                       old_status, connector->status);
546         return (old_status != connector->status);
547 }
548
549 /*
550  * Handle hotplug events outside the interrupt handler proper.
551  */
552 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
553
554 static void i915_hotplug_work_func(struct work_struct *work)
555 {
556         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
557                                                     hotplug_work);
558         struct drm_device *dev = dev_priv->dev;
559         struct drm_mode_config *mode_config = &dev->mode_config;
560         struct intel_connector *intel_connector;
561         struct intel_encoder *intel_encoder;
562         struct drm_connector *connector;
563         unsigned long irqflags;
564         bool hpd_disabled = false;
565         bool changed = false;
566         u32 hpd_event_bits;
567
568         /* HPD irq before everything is fully set up. */
569         if (!dev_priv->enable_hotplug_processing)
570                 return;
571
572         mutex_lock(&mode_config->mutex);
573         DRM_DEBUG_KMS("running encoder hotplug functions\n");
574
575         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
576
577         hpd_event_bits = dev_priv->hpd_event_bits;
578         dev_priv->hpd_event_bits = 0;
579         list_for_each_entry(connector, &mode_config->connector_list, head) {
580                 intel_connector = to_intel_connector(connector);
581                 intel_encoder = intel_connector->encoder;
582                 if (intel_encoder->hpd_pin > HPD_NONE &&
583                     dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
584                     connector->polled == DRM_CONNECTOR_POLL_HPD) {
585                         DRM_INFO("HPD interrupt storm detected on connector %s: "
586                                  "switching from hotplug detection to polling\n",
587                                 drm_get_connector_name(connector));
588                         dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
589                         connector->polled = DRM_CONNECTOR_POLL_CONNECT
590                                 | DRM_CONNECTOR_POLL_DISCONNECT;
591                         hpd_disabled = true;
592                 }
593                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
594                         DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
595                                       drm_get_connector_name(connector), intel_encoder->hpd_pin);
596                 }
597         }
598          /* if there were no outputs to poll, poll was disabled,
599           * therefore make sure it's enabled when disabling HPD on
600           * some connectors */
601         if (hpd_disabled) {
602                 drm_kms_helper_poll_enable(dev);
603                 mod_timer(&dev_priv->hotplug_reenable_timer,
604                           jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
605         }
606
607         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
608
609         list_for_each_entry(connector, &mode_config->connector_list, head) {
610                 intel_connector = to_intel_connector(connector);
611                 intel_encoder = intel_connector->encoder;
612                 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
613                         if (intel_encoder->hot_plug)
614                                 intel_encoder->hot_plug(intel_encoder);
615                         if (intel_hpd_irq_event(dev, connector))
616                                 changed = true;
617                 }
618         }
619         mutex_unlock(&mode_config->mutex);
620
621         if (changed)
622                 drm_kms_helper_hotplug_event(dev);
623 }
624
625 static void ironlake_handle_rps_change(struct drm_device *dev)
626 {
627         drm_i915_private_t *dev_priv = dev->dev_private;
628         u32 busy_up, busy_down, max_avg, min_avg;
629         u8 new_delay;
630         unsigned long flags;
631
632         spin_lock_irqsave(&mchdev_lock, flags);
633
634         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
635
636         new_delay = dev_priv->ips.cur_delay;
637
638         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
639         busy_up = I915_READ(RCPREVBSYTUPAVG);
640         busy_down = I915_READ(RCPREVBSYTDNAVG);
641         max_avg = I915_READ(RCBMAXAVG);
642         min_avg = I915_READ(RCBMINAVG);
643
644         /* Handle RCS change request from hw */
645         if (busy_up > max_avg) {
646                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
647                         new_delay = dev_priv->ips.cur_delay - 1;
648                 if (new_delay < dev_priv->ips.max_delay)
649                         new_delay = dev_priv->ips.max_delay;
650         } else if (busy_down < min_avg) {
651                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
652                         new_delay = dev_priv->ips.cur_delay + 1;
653                 if (new_delay > dev_priv->ips.min_delay)
654                         new_delay = dev_priv->ips.min_delay;
655         }
656
657         if (ironlake_set_drps(dev, new_delay))
658                 dev_priv->ips.cur_delay = new_delay;
659
660         spin_unlock_irqrestore(&mchdev_lock, flags);
661
662         return;
663 }
664
665 static void notify_ring(struct drm_device *dev,
666                         struct intel_ring_buffer *ring)
667 {
668         struct drm_i915_private *dev_priv = dev->dev_private;
669
670         if (ring->obj == NULL)
671                 return;
672
673         trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
674
675         wake_up_all(&ring->irq_queue);
676         if (i915_enable_hangcheck) {
677                 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
678                           round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
679         }
680 }
681
682 static void gen6_pm_rps_work(struct work_struct *work)
683 {
684         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
685                                                     rps.work);
686         u32 pm_iir, pm_imr;
687         u8 new_delay;
688
689         spin_lock_irq(&dev_priv->rps.lock);
690         pm_iir = dev_priv->rps.pm_iir;
691         dev_priv->rps.pm_iir = 0;
692         pm_imr = I915_READ(GEN6_PMIMR);
693         /* Make sure not to corrupt PMIMR state used by ringbuffer code */
694         I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
695         spin_unlock_irq(&dev_priv->rps.lock);
696
697         if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
698                 return;
699
700         mutex_lock(&dev_priv->rps.hw_lock);
701
702         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
703                 new_delay = dev_priv->rps.cur_delay + 1;
704
705                 /*
706                  * For better performance, jump directly
707                  * to RPe if we're below it.
708                  */
709                 if (IS_VALLEYVIEW(dev_priv->dev) &&
710                     dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
711                         new_delay = dev_priv->rps.rpe_delay;
712         } else
713                 new_delay = dev_priv->rps.cur_delay - 1;
714
715         /* sysfs frequency interfaces may have snuck in while servicing the
716          * interrupt
717          */
718         if (new_delay >= dev_priv->rps.min_delay &&
719             new_delay <= dev_priv->rps.max_delay) {
720                 if (IS_VALLEYVIEW(dev_priv->dev))
721                         valleyview_set_rps(dev_priv->dev, new_delay);
722                 else
723                         gen6_set_rps(dev_priv->dev, new_delay);
724         }
725
726         if (IS_VALLEYVIEW(dev_priv->dev)) {
727                 /*
728                  * On VLV, when we enter RC6 we may not be at the minimum
729                  * voltage level, so arm a timer to check.  It should only
730                  * fire when there's activity or once after we've entered
731                  * RC6, and then won't be re-armed until the next RPS interrupt.
732                  */
733                 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
734                                  msecs_to_jiffies(100));
735         }
736
737         mutex_unlock(&dev_priv->rps.hw_lock);
738 }
739
740
741 /**
742  * ivybridge_parity_work - Workqueue called when a parity error interrupt
743  * occurred.
744  * @work: workqueue struct
745  *
746  * Doesn't actually do anything except notify userspace. As a consequence of
747  * this event, userspace should try to remap the bad rows since statistically
748  * it is likely the same row is more likely to go bad again.
749  */
750 static void ivybridge_parity_work(struct work_struct *work)
751 {
752         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
753                                                     l3_parity.error_work);
754         u32 error_status, row, bank, subbank;
755         char *parity_event[5];
756         uint32_t misccpctl;
757         unsigned long flags;
758
759         /* We must turn off DOP level clock gating to access the L3 registers.
760          * In order to prevent a get/put style interface, acquire struct mutex
761          * any time we access those registers.
762          */
763         mutex_lock(&dev_priv->dev->struct_mutex);
764
765         misccpctl = I915_READ(GEN7_MISCCPCTL);
766         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
767         POSTING_READ(GEN7_MISCCPCTL);
768
769         error_status = I915_READ(GEN7_L3CDERRST1);
770         row = GEN7_PARITY_ERROR_ROW(error_status);
771         bank = GEN7_PARITY_ERROR_BANK(error_status);
772         subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
773
774         I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
775                                     GEN7_L3CDERRST1_ENABLE);
776         POSTING_READ(GEN7_L3CDERRST1);
777
778         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
779
780         spin_lock_irqsave(&dev_priv->irq_lock, flags);
781         dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
782         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
783         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
784
785         mutex_unlock(&dev_priv->dev->struct_mutex);
786
787         parity_event[0] = "L3_PARITY_ERROR=1";
788         parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
789         parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
790         parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
791         parity_event[4] = NULL;
792
793         kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
794                            KOBJ_CHANGE, parity_event);
795
796         DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
797                   row, bank, subbank);
798
799         kfree(parity_event[3]);
800         kfree(parity_event[2]);
801         kfree(parity_event[1]);
802 }
803
804 static void ivybridge_handle_parity_error(struct drm_device *dev)
805 {
806         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
807         unsigned long flags;
808
809         if (!HAS_L3_GPU_CACHE(dev))
810                 return;
811
812         spin_lock_irqsave(&dev_priv->irq_lock, flags);
813         dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
814         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
815         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
816
817         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
818 }
819
820 static void snb_gt_irq_handler(struct drm_device *dev,
821                                struct drm_i915_private *dev_priv,
822                                u32 gt_iir)
823 {
824
825         if (gt_iir &
826             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
827                 notify_ring(dev, &dev_priv->ring[RCS]);
828         if (gt_iir & GT_BSD_USER_INTERRUPT)
829                 notify_ring(dev, &dev_priv->ring[VCS]);
830         if (gt_iir & GT_BLT_USER_INTERRUPT)
831                 notify_ring(dev, &dev_priv->ring[BCS]);
832
833         if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
834                       GT_BSD_CS_ERROR_INTERRUPT |
835                       GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
836                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
837                 i915_handle_error(dev, false);
838         }
839
840         if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
841                 ivybridge_handle_parity_error(dev);
842 }
843
844 /* Legacy way of handling PM interrupts */
845 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
846                                 u32 pm_iir)
847 {
848         unsigned long flags;
849
850         /*
851          * IIR bits should never already be set because IMR should
852          * prevent an interrupt from being shown in IIR. The warning
853          * displays a case where we've unsafely cleared
854          * dev_priv->rps.pm_iir. Although missing an interrupt of the same
855          * type is not a problem, it displays a problem in the logic.
856          *
857          * The mask bit in IMR is cleared by dev_priv->rps.work.
858          */
859
860         spin_lock_irqsave(&dev_priv->rps.lock, flags);
861         dev_priv->rps.pm_iir |= pm_iir;
862         I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
863         POSTING_READ(GEN6_PMIMR);
864         spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
865
866         queue_work(dev_priv->wq, &dev_priv->rps.work);
867 }
868
869 #define HPD_STORM_DETECT_PERIOD 1000
870 #define HPD_STORM_THRESHOLD 5
871
872 static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
873                                             u32 hotplug_trigger,
874                                             const u32 *hpd)
875 {
876         drm_i915_private_t *dev_priv = dev->dev_private;
877         unsigned long irqflags;
878         int i;
879         bool ret = false;
880
881         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
882
883         for (i = 1; i < HPD_NUM_PINS; i++) {
884
885                 if (!(hpd[i] & hotplug_trigger) ||
886                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
887                         continue;
888
889                 dev_priv->hpd_event_bits |= (1 << i);
890                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
891                                    dev_priv->hpd_stats[i].hpd_last_jiffies
892                                    + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
893                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
894                         dev_priv->hpd_stats[i].hpd_cnt = 0;
895                 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
896                         dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
897                         dev_priv->hpd_event_bits &= ~(1 << i);
898                         DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
899                         ret = true;
900                 } else {
901                         dev_priv->hpd_stats[i].hpd_cnt++;
902                 }
903         }
904
905         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
906
907         return ret;
908 }
909
910 static void gmbus_irq_handler(struct drm_device *dev)
911 {
912         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
913
914         wake_up_all(&dev_priv->gmbus_wait_queue);
915 }
916
917 static void dp_aux_irq_handler(struct drm_device *dev)
918 {
919         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
920
921         wake_up_all(&dev_priv->gmbus_wait_queue);
922 }
923
924 /* Unlike gen6_queue_rps_work() from which this function is originally derived,
925  * we must be able to deal with other PM interrupts. This is complicated because
926  * of the way in which we use the masks to defer the RPS work (which for
927  * posterity is necessary because of forcewake).
928  */
929 static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
930                                u32 pm_iir)
931 {
932         unsigned long flags;
933
934         spin_lock_irqsave(&dev_priv->rps.lock, flags);
935         dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
936         if (dev_priv->rps.pm_iir) {
937                 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
938                 /* never want to mask useful interrupts. (also posting read) */
939                 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
940                 /* TODO: if queue_work is slow, move it out of the spinlock */
941                 queue_work(dev_priv->wq, &dev_priv->rps.work);
942         }
943         spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
944
945         if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
946                 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
947                         notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
948
949                 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
950                         DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
951                         i915_handle_error(dev_priv->dev, false);
952                 }
953         }
954 }
955
956 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
957 {
958         struct drm_device *dev = (struct drm_device *) arg;
959         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
960         u32 iir, gt_iir, pm_iir;
961         irqreturn_t ret = IRQ_NONE;
962         unsigned long irqflags;
963         int pipe;
964         u32 pipe_stats[I915_MAX_PIPES];
965
966         atomic_inc(&dev_priv->irq_received);
967
968         while (true) {
969                 iir = I915_READ(VLV_IIR);
970                 gt_iir = I915_READ(GTIIR);
971                 pm_iir = I915_READ(GEN6_PMIIR);
972
973                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
974                         goto out;
975
976                 ret = IRQ_HANDLED;
977
978                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
979
980                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
981                 for_each_pipe(pipe) {
982                         int reg = PIPESTAT(pipe);
983                         pipe_stats[pipe] = I915_READ(reg);
984
985                         /*
986                          * Clear the PIPE*STAT regs before the IIR
987                          */
988                         if (pipe_stats[pipe] & 0x8000ffff) {
989                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
990                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
991                                                          pipe_name(pipe));
992                                 I915_WRITE(reg, pipe_stats[pipe]);
993                         }
994                 }
995                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
996
997                 for_each_pipe(pipe) {
998                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
999                                 drm_handle_vblank(dev, pipe);
1000
1001                         if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1002                                 intel_prepare_page_flip(dev, pipe);
1003                                 intel_finish_page_flip(dev, pipe);
1004                         }
1005                 }
1006
1007                 /* Consume port.  Then clear IIR or we'll miss events */
1008                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1009                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1010                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1011
1012                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1013                                          hotplug_status);
1014                         if (hotplug_trigger) {
1015                                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
1016                                         i915_hpd_irq_setup(dev);
1017                                 queue_work(dev_priv->wq,
1018                                            &dev_priv->hotplug_work);
1019                         }
1020                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1021                         I915_READ(PORT_HOTPLUG_STAT);
1022                 }
1023
1024                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1025                         gmbus_irq_handler(dev);
1026
1027                 if (pm_iir & GEN6_PM_RPS_EVENTS)
1028                         gen6_queue_rps_work(dev_priv, pm_iir);
1029
1030                 I915_WRITE(GTIIR, gt_iir);
1031                 I915_WRITE(GEN6_PMIIR, pm_iir);
1032                 I915_WRITE(VLV_IIR, iir);
1033         }
1034
1035 out:
1036         return ret;
1037 }
1038
1039 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1040 {
1041         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1042         int pipe;
1043         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1044
1045         if (hotplug_trigger) {
1046                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
1047                         ibx_hpd_irq_setup(dev);
1048                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1049         }
1050         if (pch_iir & SDE_AUDIO_POWER_MASK) {
1051                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1052                                SDE_AUDIO_POWER_SHIFT);
1053                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1054                                  port_name(port));
1055         }
1056
1057         if (pch_iir & SDE_AUX_MASK)
1058                 dp_aux_irq_handler(dev);
1059
1060         if (pch_iir & SDE_GMBUS)
1061                 gmbus_irq_handler(dev);
1062
1063         if (pch_iir & SDE_AUDIO_HDCP_MASK)
1064                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1065
1066         if (pch_iir & SDE_AUDIO_TRANS_MASK)
1067                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1068
1069         if (pch_iir & SDE_POISON)
1070                 DRM_ERROR("PCH poison interrupt\n");
1071
1072         if (pch_iir & SDE_FDI_MASK)
1073                 for_each_pipe(pipe)
1074                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1075                                          pipe_name(pipe),
1076                                          I915_READ(FDI_RX_IIR(pipe)));
1077
1078         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1079                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1080
1081         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1082                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1083
1084         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1085                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1086                                                           false))
1087                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1088
1089         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1090                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1091                                                           false))
1092                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1093 }
1094
1095 static void ivb_err_int_handler(struct drm_device *dev)
1096 {
1097         struct drm_i915_private *dev_priv = dev->dev_private;
1098         u32 err_int = I915_READ(GEN7_ERR_INT);
1099
1100         if (err_int & ERR_INT_POISON)
1101                 DRM_ERROR("Poison interrupt\n");
1102
1103         if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1104                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1105                         DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1106
1107         if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1108                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1109                         DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1110
1111         if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1112                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1113                         DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1114
1115         I915_WRITE(GEN7_ERR_INT, err_int);
1116 }
1117
1118 static void cpt_serr_int_handler(struct drm_device *dev)
1119 {
1120         struct drm_i915_private *dev_priv = dev->dev_private;
1121         u32 serr_int = I915_READ(SERR_INT);
1122
1123         if (serr_int & SERR_INT_POISON)
1124                 DRM_ERROR("PCH poison interrupt\n");
1125
1126         if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1127                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1128                                                           false))
1129                         DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1130
1131         if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1132                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1133                                                           false))
1134                         DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1135
1136         if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1137                 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1138                                                           false))
1139                         DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1140
1141         I915_WRITE(SERR_INT, serr_int);
1142 }
1143
1144 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1145 {
1146         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1147         int pipe;
1148         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1149
1150         if (hotplug_trigger) {
1151                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
1152                         ibx_hpd_irq_setup(dev);
1153                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1154         }
1155         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1156                 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1157                                SDE_AUDIO_POWER_SHIFT_CPT);
1158                 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1159                                  port_name(port));
1160         }
1161
1162         if (pch_iir & SDE_AUX_MASK_CPT)
1163                 dp_aux_irq_handler(dev);
1164
1165         if (pch_iir & SDE_GMBUS_CPT)
1166                 gmbus_irq_handler(dev);
1167
1168         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1169                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1170
1171         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1172                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1173
1174         if (pch_iir & SDE_FDI_MASK_CPT)
1175                 for_each_pipe(pipe)
1176                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1177                                          pipe_name(pipe),
1178                                          I915_READ(FDI_RX_IIR(pipe)));
1179
1180         if (pch_iir & SDE_ERROR_CPT)
1181                 cpt_serr_int_handler(dev);
1182 }
1183
1184 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1185 {
1186         struct drm_device *dev = (struct drm_device *) arg;
1187         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1188         u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
1189         irqreturn_t ret = IRQ_NONE;
1190         int i;
1191
1192         atomic_inc(&dev_priv->irq_received);
1193
1194         /* We get interrupts on unclaimed registers, so check for this before we
1195          * do any I915_{READ,WRITE}. */
1196         if (IS_HASWELL(dev) &&
1197             (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1198                 DRM_ERROR("Unclaimed register before interrupt\n");
1199                 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1200         }
1201
1202         /* disable master interrupt before clearing iir  */
1203         de_ier = I915_READ(DEIER);
1204         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1205
1206         /* Disable south interrupts. We'll only write to SDEIIR once, so further
1207          * interrupts will will be stored on its back queue, and then we'll be
1208          * able to process them after we restore SDEIER (as soon as we restore
1209          * it, we'll get an interrupt if SDEIIR still has something to process
1210          * due to its back queue). */
1211         if (!HAS_PCH_NOP(dev)) {
1212                 sde_ier = I915_READ(SDEIER);
1213                 I915_WRITE(SDEIER, 0);
1214                 POSTING_READ(SDEIER);
1215         }
1216
1217         /* On Haswell, also mask ERR_INT because we don't want to risk
1218          * generating "unclaimed register" interrupts from inside the interrupt
1219          * handler. */
1220         if (IS_HASWELL(dev))
1221                 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1222
1223         gt_iir = I915_READ(GTIIR);
1224         if (gt_iir) {
1225                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1226                 I915_WRITE(GTIIR, gt_iir);
1227                 ret = IRQ_HANDLED;
1228         }
1229
1230         de_iir = I915_READ(DEIIR);
1231         if (de_iir) {
1232                 if (de_iir & DE_ERR_INT_IVB)
1233                         ivb_err_int_handler(dev);
1234
1235                 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1236                         dp_aux_irq_handler(dev);
1237
1238                 if (de_iir & DE_GSE_IVB)
1239                         intel_opregion_asle_intr(dev);
1240
1241                 for (i = 0; i < 3; i++) {
1242                         if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1243                                 drm_handle_vblank(dev, i);
1244                         if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1245                                 intel_prepare_page_flip(dev, i);
1246                                 intel_finish_page_flip_plane(dev, i);
1247                         }
1248                 }
1249
1250                 /* check event from PCH */
1251                 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1252                         u32 pch_iir = I915_READ(SDEIIR);
1253
1254                         cpt_irq_handler(dev, pch_iir);
1255
1256                         /* clear PCH hotplug event before clear CPU irq */
1257                         I915_WRITE(SDEIIR, pch_iir);
1258                 }
1259
1260                 I915_WRITE(DEIIR, de_iir);
1261                 ret = IRQ_HANDLED;
1262         }
1263
1264         pm_iir = I915_READ(GEN6_PMIIR);
1265         if (pm_iir) {
1266                 if (IS_HASWELL(dev))
1267                         hsw_pm_irq_handler(dev_priv, pm_iir);
1268                 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1269                         gen6_queue_rps_work(dev_priv, pm_iir);
1270                 I915_WRITE(GEN6_PMIIR, pm_iir);
1271                 ret = IRQ_HANDLED;
1272         }
1273
1274         if (IS_HASWELL(dev) && ivb_can_enable_err_int(dev))
1275                 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1276
1277         I915_WRITE(DEIER, de_ier);
1278         POSTING_READ(DEIER);
1279         if (!HAS_PCH_NOP(dev)) {
1280                 I915_WRITE(SDEIER, sde_ier);
1281                 POSTING_READ(SDEIER);
1282         }
1283
1284         return ret;
1285 }
1286
1287 static void ilk_gt_irq_handler(struct drm_device *dev,
1288                                struct drm_i915_private *dev_priv,
1289                                u32 gt_iir)
1290 {
1291         if (gt_iir &
1292             (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1293                 notify_ring(dev, &dev_priv->ring[RCS]);
1294         if (gt_iir & ILK_BSD_USER_INTERRUPT)
1295                 notify_ring(dev, &dev_priv->ring[VCS]);
1296 }
1297
1298 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1299 {
1300         struct drm_device *dev = (struct drm_device *) arg;
1301         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1302         int ret = IRQ_NONE;
1303         u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
1304
1305         atomic_inc(&dev_priv->irq_received);
1306
1307         /* disable master interrupt before clearing iir  */
1308         de_ier = I915_READ(DEIER);
1309         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1310         POSTING_READ(DEIER);
1311
1312         /* Disable south interrupts. We'll only write to SDEIIR once, so further
1313          * interrupts will will be stored on its back queue, and then we'll be
1314          * able to process them after we restore SDEIER (as soon as we restore
1315          * it, we'll get an interrupt if SDEIIR still has something to process
1316          * due to its back queue). */
1317         sde_ier = I915_READ(SDEIER);
1318         I915_WRITE(SDEIER, 0);
1319         POSTING_READ(SDEIER);
1320
1321         de_iir = I915_READ(DEIIR);
1322         gt_iir = I915_READ(GTIIR);
1323         pm_iir = I915_READ(GEN6_PMIIR);
1324
1325         if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
1326                 goto done;
1327
1328         ret = IRQ_HANDLED;
1329
1330         if (IS_GEN5(dev))
1331                 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1332         else
1333                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1334
1335         if (de_iir & DE_AUX_CHANNEL_A)
1336                 dp_aux_irq_handler(dev);
1337
1338         if (de_iir & DE_GSE)
1339                 intel_opregion_asle_intr(dev);
1340
1341         if (de_iir & DE_PIPEA_VBLANK)
1342                 drm_handle_vblank(dev, 0);
1343
1344         if (de_iir & DE_PIPEB_VBLANK)
1345                 drm_handle_vblank(dev, 1);
1346
1347         if (de_iir & DE_POISON)
1348                 DRM_ERROR("Poison interrupt\n");
1349
1350         if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1351                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1352                         DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1353
1354         if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1355                 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1356                         DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1357
1358         if (de_iir & DE_PLANEA_FLIP_DONE) {
1359                 intel_prepare_page_flip(dev, 0);
1360                 intel_finish_page_flip_plane(dev, 0);
1361         }
1362
1363         if (de_iir & DE_PLANEB_FLIP_DONE) {
1364                 intel_prepare_page_flip(dev, 1);
1365                 intel_finish_page_flip_plane(dev, 1);
1366         }
1367
1368         /* check event from PCH */
1369         if (de_iir & DE_PCH_EVENT) {
1370                 u32 pch_iir = I915_READ(SDEIIR);
1371
1372                 if (HAS_PCH_CPT(dev))
1373                         cpt_irq_handler(dev, pch_iir);
1374                 else
1375                         ibx_irq_handler(dev, pch_iir);
1376
1377                 /* should clear PCH hotplug event before clear CPU irq */
1378                 I915_WRITE(SDEIIR, pch_iir);
1379         }
1380
1381         if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
1382                 ironlake_handle_rps_change(dev);
1383
1384         if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1385                 gen6_queue_rps_work(dev_priv, pm_iir);
1386
1387         I915_WRITE(GTIIR, gt_iir);
1388         I915_WRITE(DEIIR, de_iir);
1389         I915_WRITE(GEN6_PMIIR, pm_iir);
1390
1391 done:
1392         I915_WRITE(DEIER, de_ier);
1393         POSTING_READ(DEIER);
1394         I915_WRITE(SDEIER, sde_ier);
1395         POSTING_READ(SDEIER);
1396
1397         return ret;
1398 }
1399
1400 /**
1401  * i915_error_work_func - do process context error handling work
1402  * @work: work struct
1403  *
1404  * Fire an error uevent so userspace can see that a hang or error
1405  * was detected.
1406  */
1407 static void i915_error_work_func(struct work_struct *work)
1408 {
1409         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1410                                                     work);
1411         drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1412                                                     gpu_error);
1413         struct drm_device *dev = dev_priv->dev;
1414         struct intel_ring_buffer *ring;
1415         char *error_event[] = { "ERROR=1", NULL };
1416         char *reset_event[] = { "RESET=1", NULL };
1417         char *reset_done_event[] = { "ERROR=0", NULL };
1418         int i, ret;
1419
1420         kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1421
1422         /*
1423          * Note that there's only one work item which does gpu resets, so we
1424          * need not worry about concurrent gpu resets potentially incrementing
1425          * error->reset_counter twice. We only need to take care of another
1426          * racing irq/hangcheck declaring the gpu dead for a second time. A
1427          * quick check for that is good enough: schedule_work ensures the
1428          * correct ordering between hang detection and this work item, and since
1429          * the reset in-progress bit is only ever set by code outside of this
1430          * work we don't need to worry about any other races.
1431          */
1432         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1433                 DRM_DEBUG_DRIVER("resetting chip\n");
1434                 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1435                                    reset_event);
1436
1437                 ret = i915_reset(dev);
1438
1439                 if (ret == 0) {
1440                         /*
1441                          * After all the gem state is reset, increment the reset
1442                          * counter and wake up everyone waiting for the reset to
1443                          * complete.
1444                          *
1445                          * Since unlock operations are a one-sided barrier only,
1446                          * we need to insert a barrier here to order any seqno
1447                          * updates before
1448                          * the counter increment.
1449                          */
1450                         smp_mb__before_atomic_inc();
1451                         atomic_inc(&dev_priv->gpu_error.reset_counter);
1452
1453                         kobject_uevent_env(&dev->primary->kdev.kobj,
1454                                            KOBJ_CHANGE, reset_done_event);
1455                 } else {
1456                         atomic_set(&error->reset_counter, I915_WEDGED);
1457                 }
1458
1459                 for_each_ring(ring, dev_priv, i)
1460                         wake_up_all(&ring->irq_queue);
1461
1462                 intel_display_handle_reset(dev);
1463
1464                 wake_up_all(&dev_priv->gpu_error.reset_queue);
1465         }
1466 }
1467
1468 /* NB: please notice the memset */
1469 static void i915_get_extra_instdone(struct drm_device *dev,
1470                                     uint32_t *instdone)
1471 {
1472         struct drm_i915_private *dev_priv = dev->dev_private;
1473         memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1474
1475         switch(INTEL_INFO(dev)->gen) {
1476         case 2:
1477         case 3:
1478                 instdone[0] = I915_READ(INSTDONE);
1479                 break;
1480         case 4:
1481         case 5:
1482         case 6:
1483                 instdone[0] = I915_READ(INSTDONE_I965);
1484                 instdone[1] = I915_READ(INSTDONE1);
1485                 break;
1486         default:
1487                 WARN_ONCE(1, "Unsupported platform\n");
1488         case 7:
1489                 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1490                 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1491                 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1492                 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1493                 break;
1494         }
1495 }
1496
1497 #ifdef CONFIG_DEBUG_FS
1498 static struct drm_i915_error_object *
1499 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1500                                struct drm_i915_gem_object *src,
1501                                const int num_pages)
1502 {
1503         struct drm_i915_error_object *dst;
1504         int i;
1505         u32 reloc_offset;
1506
1507         if (src == NULL || src->pages == NULL)
1508                 return NULL;
1509
1510         dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1511         if (dst == NULL)
1512                 return NULL;
1513
1514         reloc_offset = src->gtt_offset;
1515         for (i = 0; i < num_pages; i++) {
1516                 unsigned long flags;
1517                 void *d;
1518
1519                 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1520                 if (d == NULL)
1521                         goto unwind;
1522
1523                 local_irq_save(flags);
1524                 if (reloc_offset < dev_priv->gtt.mappable_end &&
1525                     src->has_global_gtt_mapping) {
1526                         void __iomem *s;
1527
1528                         /* Simply ignore tiling or any overlapping fence.
1529                          * It's part of the error state, and this hopefully
1530                          * captures what the GPU read.
1531                          */
1532
1533                         s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1534                                                      reloc_offset);
1535                         memcpy_fromio(d, s, PAGE_SIZE);
1536                         io_mapping_unmap_atomic(s);
1537                 } else if (src->stolen) {
1538                         unsigned long offset;
1539
1540                         offset = dev_priv->mm.stolen_base;
1541                         offset += src->stolen->start;
1542                         offset += i << PAGE_SHIFT;
1543
1544                         memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1545                 } else {
1546                         struct page *page;
1547                         void *s;
1548
1549                         page = i915_gem_object_get_page(src, i);
1550
1551                         drm_clflush_pages(&page, 1);
1552
1553                         s = kmap_atomic(page);
1554                         memcpy(d, s, PAGE_SIZE);
1555                         kunmap_atomic(s);
1556
1557                         drm_clflush_pages(&page, 1);
1558                 }
1559                 local_irq_restore(flags);
1560
1561                 dst->pages[i] = d;
1562
1563                 reloc_offset += PAGE_SIZE;
1564         }
1565         dst->page_count = num_pages;
1566         dst->gtt_offset = src->gtt_offset;
1567
1568         return dst;
1569
1570 unwind:
1571         while (i--)
1572                 kfree(dst->pages[i]);
1573         kfree(dst);
1574         return NULL;
1575 }
1576 #define i915_error_object_create(dev_priv, src) \
1577         i915_error_object_create_sized((dev_priv), (src), \
1578                                        (src)->base.size>>PAGE_SHIFT)
1579
1580 static void
1581 i915_error_object_free(struct drm_i915_error_object *obj)
1582 {
1583         int page;
1584
1585         if (obj == NULL)
1586                 return;
1587
1588         for (page = 0; page < obj->page_count; page++)
1589                 kfree(obj->pages[page]);
1590
1591         kfree(obj);
1592 }
1593
1594 void
1595 i915_error_state_free(struct kref *error_ref)
1596 {
1597         struct drm_i915_error_state *error = container_of(error_ref,
1598                                                           typeof(*error), ref);
1599         int i;
1600
1601         for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1602                 i915_error_object_free(error->ring[i].batchbuffer);
1603                 i915_error_object_free(error->ring[i].ringbuffer);
1604                 i915_error_object_free(error->ring[i].ctx);
1605                 kfree(error->ring[i].requests);
1606         }
1607
1608         kfree(error->active_bo);
1609         kfree(error->overlay);
1610         kfree(error->display);
1611         kfree(error);
1612 }
1613 static void capture_bo(struct drm_i915_error_buffer *err,
1614                        struct drm_i915_gem_object *obj)
1615 {
1616         err->size = obj->base.size;
1617         err->name = obj->base.name;
1618         err->rseqno = obj->last_read_seqno;
1619         err->wseqno = obj->last_write_seqno;
1620         err->gtt_offset = obj->gtt_offset;
1621         err->read_domains = obj->base.read_domains;
1622         err->write_domain = obj->base.write_domain;
1623         err->fence_reg = obj->fence_reg;
1624         err->pinned = 0;
1625         if (obj->pin_count > 0)
1626                 err->pinned = 1;
1627         if (obj->user_pin_count > 0)
1628                 err->pinned = -1;
1629         err->tiling = obj->tiling_mode;
1630         err->dirty = obj->dirty;
1631         err->purgeable = obj->madv != I915_MADV_WILLNEED;
1632         err->ring = obj->ring ? obj->ring->id : -1;
1633         err->cache_level = obj->cache_level;
1634 }
1635
1636 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1637                              int count, struct list_head *head)
1638 {
1639         struct drm_i915_gem_object *obj;
1640         int i = 0;
1641
1642         list_for_each_entry(obj, head, mm_list) {
1643                 capture_bo(err++, obj);
1644                 if (++i == count)
1645                         break;
1646         }
1647
1648         return i;
1649 }
1650
1651 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1652                              int count, struct list_head *head)
1653 {
1654         struct drm_i915_gem_object *obj;
1655         int i = 0;
1656
1657         list_for_each_entry(obj, head, global_list) {
1658                 if (obj->pin_count == 0)
1659                         continue;
1660
1661                 capture_bo(err++, obj);
1662                 if (++i == count)
1663                         break;
1664         }
1665
1666         return i;
1667 }
1668
1669 static void i915_gem_record_fences(struct drm_device *dev,
1670                                    struct drm_i915_error_state *error)
1671 {
1672         struct drm_i915_private *dev_priv = dev->dev_private;
1673         int i;
1674
1675         /* Fences */
1676         switch (INTEL_INFO(dev)->gen) {
1677         case 7:
1678         case 6:
1679                 for (i = 0; i < dev_priv->num_fence_regs; i++)
1680                         error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1681                 break;
1682         case 5:
1683         case 4:
1684                 for (i = 0; i < 16; i++)
1685                         error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1686                 break;
1687         case 3:
1688                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1689                         for (i = 0; i < 8; i++)
1690                                 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1691         case 2:
1692                 for (i = 0; i < 8; i++)
1693                         error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1694                 break;
1695
1696         default:
1697                 BUG();
1698         }
1699 }
1700
1701 static struct drm_i915_error_object *
1702 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1703                              struct intel_ring_buffer *ring)
1704 {
1705         struct drm_i915_gem_object *obj;
1706         u32 seqno;
1707
1708         if (!ring->get_seqno)
1709                 return NULL;
1710
1711         if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1712                 u32 acthd = I915_READ(ACTHD);
1713
1714                 if (WARN_ON(ring->id != RCS))
1715                         return NULL;
1716
1717                 obj = ring->private;
1718                 if (acthd >= obj->gtt_offset &&
1719                     acthd < obj->gtt_offset + obj->base.size)
1720                         return i915_error_object_create(dev_priv, obj);
1721         }
1722
1723         seqno = ring->get_seqno(ring, false);
1724         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1725                 if (obj->ring != ring)
1726                         continue;
1727
1728                 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1729                         continue;
1730
1731                 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1732                         continue;
1733
1734                 /* We need to copy these to an anonymous buffer as the simplest
1735                  * method to avoid being overwritten by userspace.
1736                  */
1737                 return i915_error_object_create(dev_priv, obj);
1738         }
1739
1740         return NULL;
1741 }
1742
1743 static void i915_record_ring_state(struct drm_device *dev,
1744                                    struct drm_i915_error_state *error,
1745                                    struct intel_ring_buffer *ring)
1746 {
1747         struct drm_i915_private *dev_priv = dev->dev_private;
1748
1749         if (INTEL_INFO(dev)->gen >= 6) {
1750                 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1751                 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1752                 error->semaphore_mboxes[ring->id][0]
1753                         = I915_READ(RING_SYNC_0(ring->mmio_base));
1754                 error->semaphore_mboxes[ring->id][1]
1755                         = I915_READ(RING_SYNC_1(ring->mmio_base));
1756                 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1757                 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1758         }
1759
1760         if (INTEL_INFO(dev)->gen >= 4) {
1761                 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1762                 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1763                 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1764                 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1765                 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1766                 if (ring->id == RCS)
1767                         error->bbaddr = I915_READ64(BB_ADDR);
1768         } else {
1769                 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1770                 error->ipeir[ring->id] = I915_READ(IPEIR);
1771                 error->ipehr[ring->id] = I915_READ(IPEHR);
1772                 error->instdone[ring->id] = I915_READ(INSTDONE);
1773         }
1774
1775         error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1776         error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1777         error->seqno[ring->id] = ring->get_seqno(ring, false);
1778         error->acthd[ring->id] = intel_ring_get_active_head(ring);
1779         error->head[ring->id] = I915_READ_HEAD(ring);
1780         error->tail[ring->id] = I915_READ_TAIL(ring);
1781         error->ctl[ring->id] = I915_READ_CTL(ring);
1782
1783         error->cpu_ring_head[ring->id] = ring->head;
1784         error->cpu_ring_tail[ring->id] = ring->tail;
1785 }
1786
1787
1788 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1789                                            struct drm_i915_error_state *error,
1790                                            struct drm_i915_error_ring *ering)
1791 {
1792         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1793         struct drm_i915_gem_object *obj;
1794
1795         /* Currently render ring is the only HW context user */
1796         if (ring->id != RCS || !error->ccid)
1797                 return;
1798
1799         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1800                 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1801                         ering->ctx = i915_error_object_create_sized(dev_priv,
1802                                                                     obj, 1);
1803                 }
1804         }
1805 }
1806
1807 static void i915_gem_record_rings(struct drm_device *dev,
1808                                   struct drm_i915_error_state *error)
1809 {
1810         struct drm_i915_private *dev_priv = dev->dev_private;
1811         struct intel_ring_buffer *ring;
1812         struct drm_i915_gem_request *request;
1813         int i, count;
1814
1815         for_each_ring(ring, dev_priv, i) {
1816                 i915_record_ring_state(dev, error, ring);
1817
1818                 error->ring[i].batchbuffer =
1819                         i915_error_first_batchbuffer(dev_priv, ring);
1820
1821                 error->ring[i].ringbuffer =
1822                         i915_error_object_create(dev_priv, ring->obj);
1823
1824
1825                 i915_gem_record_active_context(ring, error, &error->ring[i]);
1826
1827                 count = 0;
1828                 list_for_each_entry(request, &ring->request_list, list)
1829                         count++;
1830
1831                 error->ring[i].num_requests = count;
1832                 error->ring[i].requests =
1833                         kmalloc(count*sizeof(struct drm_i915_error_request),
1834                                 GFP_ATOMIC);
1835                 if (error->ring[i].requests == NULL) {
1836                         error->ring[i].num_requests = 0;
1837                         continue;
1838                 }
1839
1840                 count = 0;
1841                 list_for_each_entry(request, &ring->request_list, list) {
1842                         struct drm_i915_error_request *erq;
1843
1844                         erq = &error->ring[i].requests[count++];
1845                         erq->seqno = request->seqno;
1846                         erq->jiffies = request->emitted_jiffies;
1847                         erq->tail = request->tail;
1848                 }
1849         }
1850 }
1851
1852 /**
1853  * i915_capture_error_state - capture an error record for later analysis
1854  * @dev: drm device
1855  *
1856  * Should be called when an error is detected (either a hang or an error
1857  * interrupt) to capture error state from the time of the error.  Fills
1858  * out a structure which becomes available in debugfs for user level tools
1859  * to pick up.
1860  */
1861 static void i915_capture_error_state(struct drm_device *dev)
1862 {
1863         struct drm_i915_private *dev_priv = dev->dev_private;
1864         struct drm_i915_gem_object *obj;
1865         struct drm_i915_error_state *error;
1866         unsigned long flags;
1867         int i, pipe;
1868
1869         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1870         error = dev_priv->gpu_error.first_error;
1871         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1872         if (error)
1873                 return;
1874
1875         /* Account for pipe specific data like PIPE*STAT */
1876         error = kzalloc(sizeof(*error), GFP_ATOMIC);
1877         if (!error) {
1878                 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1879                 return;
1880         }
1881
1882         DRM_INFO("capturing error event; look for more information in "
1883                  "/sys/kernel/debug/dri/%d/i915_error_state\n",
1884                  dev->primary->index);
1885
1886         kref_init(&error->ref);
1887         error->eir = I915_READ(EIR);
1888         error->pgtbl_er = I915_READ(PGTBL_ER);
1889         if (HAS_HW_CONTEXTS(dev))
1890                 error->ccid = I915_READ(CCID);
1891
1892         if (HAS_PCH_SPLIT(dev))
1893                 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1894         else if (IS_VALLEYVIEW(dev))
1895                 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1896         else if (IS_GEN2(dev))
1897                 error->ier = I915_READ16(IER);
1898         else
1899                 error->ier = I915_READ(IER);
1900
1901         if (INTEL_INFO(dev)->gen >= 6)
1902                 error->derrmr = I915_READ(DERRMR);
1903
1904         if (IS_VALLEYVIEW(dev))
1905                 error->forcewake = I915_READ(FORCEWAKE_VLV);
1906         else if (INTEL_INFO(dev)->gen >= 7)
1907                 error->forcewake = I915_READ(FORCEWAKE_MT);
1908         else if (INTEL_INFO(dev)->gen == 6)
1909                 error->forcewake = I915_READ(FORCEWAKE);
1910
1911         if (!HAS_PCH_SPLIT(dev))
1912                 for_each_pipe(pipe)
1913                         error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1914
1915         if (INTEL_INFO(dev)->gen >= 6) {
1916                 error->error = I915_READ(ERROR_GEN6);
1917                 error->done_reg = I915_READ(DONE_REG);
1918         }
1919
1920         if (INTEL_INFO(dev)->gen == 7)
1921                 error->err_int = I915_READ(GEN7_ERR_INT);
1922
1923         i915_get_extra_instdone(dev, error->extra_instdone);
1924
1925         i915_gem_record_fences(dev, error);
1926         i915_gem_record_rings(dev, error);
1927
1928         /* Record buffers on the active and pinned lists. */
1929         error->active_bo = NULL;
1930         error->pinned_bo = NULL;
1931
1932         i = 0;
1933         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1934                 i++;
1935         error->active_bo_count = i;
1936         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1937                 if (obj->pin_count)
1938                         i++;
1939         error->pinned_bo_count = i - error->active_bo_count;
1940
1941         error->active_bo = NULL;
1942         error->pinned_bo = NULL;
1943         if (i) {
1944                 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1945                                            GFP_ATOMIC);
1946                 if (error->active_bo)
1947                         error->pinned_bo =
1948                                 error->active_bo + error->active_bo_count;
1949         }
1950
1951         if (error->active_bo)
1952                 error->active_bo_count =
1953                         capture_active_bo(error->active_bo,
1954                                           error->active_bo_count,
1955                                           &dev_priv->mm.active_list);
1956
1957         if (error->pinned_bo)
1958                 error->pinned_bo_count =
1959                         capture_pinned_bo(error->pinned_bo,
1960                                           error->pinned_bo_count,
1961                                           &dev_priv->mm.bound_list);
1962
1963         do_gettimeofday(&error->time);
1964
1965         error->overlay = intel_overlay_capture_error_state(dev);
1966         error->display = intel_display_capture_error_state(dev);
1967
1968         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1969         if (dev_priv->gpu_error.first_error == NULL) {
1970                 dev_priv->gpu_error.first_error = error;
1971                 error = NULL;
1972         }
1973         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1974
1975         if (error)
1976                 i915_error_state_free(&error->ref);
1977 }
1978
1979 void i915_destroy_error_state(struct drm_device *dev)
1980 {
1981         struct drm_i915_private *dev_priv = dev->dev_private;
1982         struct drm_i915_error_state *error;
1983         unsigned long flags;
1984
1985         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1986         error = dev_priv->gpu_error.first_error;
1987         dev_priv->gpu_error.first_error = NULL;
1988         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1989
1990         if (error)
1991                 kref_put(&error->ref, i915_error_state_free);
1992 }
1993 #else
1994 #define i915_capture_error_state(x)
1995 #endif
1996
1997 static void i915_report_and_clear_eir(struct drm_device *dev)
1998 {
1999         struct drm_i915_private *dev_priv = dev->dev_private;
2000         uint32_t instdone[I915_NUM_INSTDONE_REG];
2001         u32 eir = I915_READ(EIR);
2002         int pipe, i;
2003
2004         if (!eir)
2005                 return;
2006
2007         pr_err("render error detected, EIR: 0x%08x\n", eir);
2008
2009         i915_get_extra_instdone(dev, instdone);
2010
2011         if (IS_G4X(dev)) {
2012                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2013                         u32 ipeir = I915_READ(IPEIR_I965);
2014
2015                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2016                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2017                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
2018                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2019                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2020                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2021                         I915_WRITE(IPEIR_I965, ipeir);
2022                         POSTING_READ(IPEIR_I965);
2023                 }
2024                 if (eir & GM45_ERROR_PAGE_TABLE) {
2025                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2026                         pr_err("page table error\n");
2027                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2028                         I915_WRITE(PGTBL_ER, pgtbl_err);
2029                         POSTING_READ(PGTBL_ER);
2030                 }
2031         }
2032
2033         if (!IS_GEN2(dev)) {
2034                 if (eir & I915_ERROR_PAGE_TABLE) {
2035                         u32 pgtbl_err = I915_READ(PGTBL_ER);
2036                         pr_err("page table error\n");
2037                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2038                         I915_WRITE(PGTBL_ER, pgtbl_err);
2039                         POSTING_READ(PGTBL_ER);
2040                 }
2041         }
2042
2043         if (eir & I915_ERROR_MEMORY_REFRESH) {
2044                 pr_err("memory refresh error:\n");
2045                 for_each_pipe(pipe)
2046                         pr_err("pipe %c stat: 0x%08x\n",
2047                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2048                 /* pipestat has already been acked */
2049         }
2050         if (eir & I915_ERROR_INSTRUCTION) {
2051                 pr_err("instruction error\n");
2052                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2053                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2054                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2055                 if (INTEL_INFO(dev)->gen < 4) {
2056                         u32 ipeir = I915_READ(IPEIR);
2057
2058                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2059                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2060                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2061                         I915_WRITE(IPEIR, ipeir);
2062                         POSTING_READ(IPEIR);
2063                 } else {
2064                         u32 ipeir = I915_READ(IPEIR_I965);
2065
2066                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2067                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2068                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2069                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2070                         I915_WRITE(IPEIR_I965, ipeir);
2071                         POSTING_READ(IPEIR_I965);
2072                 }
2073         }
2074
2075         I915_WRITE(EIR, eir);
2076         POSTING_READ(EIR);
2077         eir = I915_READ(EIR);
2078         if (eir) {
2079                 /*
2080                  * some errors might have become stuck,
2081                  * mask them.
2082                  */
2083                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2084                 I915_WRITE(EMR, I915_READ(EMR) | eir);
2085                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2086         }
2087 }
2088
2089 /**
2090  * i915_handle_error - handle an error interrupt
2091  * @dev: drm device
2092  *
2093  * Do some basic checking of regsiter state at error interrupt time and
2094  * dump it to the syslog.  Also call i915_capture_error_state() to make
2095  * sure we get a record and make it available in debugfs.  Fire a uevent
2096  * so userspace knows something bad happened (should trigger collection
2097  * of a ring dump etc.).
2098  */
2099 void i915_handle_error(struct drm_device *dev, bool wedged)
2100 {
2101         struct drm_i915_private *dev_priv = dev->dev_private;
2102         struct intel_ring_buffer *ring;
2103         int i;
2104
2105         i915_capture_error_state(dev);
2106         i915_report_and_clear_eir(dev);
2107
2108         if (wedged) {
2109                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2110                                 &dev_priv->gpu_error.reset_counter);
2111
2112                 /*
2113                  * Wakeup waiting processes so that the reset work item
2114                  * doesn't deadlock trying to grab various locks.
2115                  */
2116                 for_each_ring(ring, dev_priv, i)
2117                         wake_up_all(&ring->irq_queue);
2118         }
2119
2120         queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
2121 }
2122
2123 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2124 {
2125         drm_i915_private_t *dev_priv = dev->dev_private;
2126         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2127         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2128         struct drm_i915_gem_object *obj;
2129         struct intel_unpin_work *work;
2130         unsigned long flags;
2131         bool stall_detected;
2132
2133         /* Ignore early vblank irqs */
2134         if (intel_crtc == NULL)
2135                 return;
2136
2137         spin_lock_irqsave(&dev->event_lock, flags);
2138         work = intel_crtc->unpin_work;
2139
2140         if (work == NULL ||
2141             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2142             !work->enable_stall_check) {
2143                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2144                 spin_unlock_irqrestore(&dev->event_lock, flags);
2145                 return;
2146         }
2147
2148         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2149         obj = work->pending_flip_obj;
2150         if (INTEL_INFO(dev)->gen >= 4) {
2151                 int dspsurf = DSPSURF(intel_crtc->plane);
2152                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2153                                         obj->gtt_offset;
2154         } else {
2155                 int dspaddr = DSPADDR(intel_crtc->plane);
2156                 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
2157                                                         crtc->y * crtc->fb->pitches[0] +
2158                                                         crtc->x * crtc->fb->bits_per_pixel/8);
2159         }
2160
2161         spin_unlock_irqrestore(&dev->event_lock, flags);
2162
2163         if (stall_detected) {
2164                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2165                 intel_prepare_page_flip(dev, intel_crtc->plane);
2166         }
2167 }
2168
2169 /* Called from drm generic code, passed 'crtc' which
2170  * we use as a pipe index
2171  */
2172 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2173 {
2174         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2175         unsigned long irqflags;
2176
2177         if (!i915_pipe_enabled(dev, pipe))
2178                 return -EINVAL;
2179
2180         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2181         if (INTEL_INFO(dev)->gen >= 4)
2182                 i915_enable_pipestat(dev_priv, pipe,
2183                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
2184         else
2185                 i915_enable_pipestat(dev_priv, pipe,
2186                                      PIPE_VBLANK_INTERRUPT_ENABLE);
2187
2188         /* maintain vblank delivery even in deep C-states */
2189         if (dev_priv->info->gen == 3)
2190                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2191         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2192
2193         return 0;
2194 }
2195
2196 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2197 {
2198         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2199         unsigned long irqflags;
2200
2201         if (!i915_pipe_enabled(dev, pipe))
2202                 return -EINVAL;
2203
2204         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2205         ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
2206                                     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2207         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2208
2209         return 0;
2210 }
2211
2212 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
2213 {
2214         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2215         unsigned long irqflags;
2216
2217         if (!i915_pipe_enabled(dev, pipe))
2218                 return -EINVAL;
2219
2220         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2221         ironlake_enable_display_irq(dev_priv,
2222                                     DE_PIPEA_VBLANK_IVB << (5 * pipe));
2223         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2224
2225         return 0;
2226 }
2227
2228 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2229 {
2230         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2231         unsigned long irqflags;
2232         u32 imr;
2233
2234         if (!i915_pipe_enabled(dev, pipe))
2235                 return -EINVAL;
2236
2237         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2238         imr = I915_READ(VLV_IMR);
2239         if (pipe == 0)
2240                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2241         else
2242                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2243         I915_WRITE(VLV_IMR, imr);
2244         i915_enable_pipestat(dev_priv, pipe,
2245                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
2246         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2247
2248         return 0;
2249 }
2250
2251 /* Called from drm generic code, passed 'crtc' which
2252  * we use as a pipe index
2253  */
2254 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2255 {
2256         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2257         unsigned long irqflags;
2258
2259         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2260         if (dev_priv->info->gen == 3)
2261                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2262
2263         i915_disable_pipestat(dev_priv, pipe,
2264                               PIPE_VBLANK_INTERRUPT_ENABLE |
2265                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
2266         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2267 }
2268
2269 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2270 {
2271         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2272         unsigned long irqflags;
2273
2274         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2275         ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
2276                                      DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2277         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2278 }
2279
2280 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
2281 {
2282         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2283         unsigned long irqflags;
2284
2285         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2286         ironlake_disable_display_irq(dev_priv,
2287                                      DE_PIPEA_VBLANK_IVB << (pipe * 5));
2288         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2289 }
2290
2291 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2292 {
2293         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2294         unsigned long irqflags;
2295         u32 imr;
2296
2297         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2298         i915_disable_pipestat(dev_priv, pipe,
2299                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
2300         imr = I915_READ(VLV_IMR);
2301         if (pipe == 0)
2302                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2303         else
2304                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2305         I915_WRITE(VLV_IMR, imr);
2306         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2307 }
2308
2309 static u32
2310 ring_last_seqno(struct intel_ring_buffer *ring)
2311 {
2312         return list_entry(ring->request_list.prev,
2313                           struct drm_i915_gem_request, list)->seqno;
2314 }
2315
2316 static bool
2317 ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2318 {
2319         return (list_empty(&ring->request_list) ||
2320                 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2321 }
2322
2323 static struct intel_ring_buffer *
2324 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2325 {
2326         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2327         u32 cmd, ipehr, acthd, acthd_min;
2328
2329         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2330         if ((ipehr & ~(0x3 << 16)) !=
2331             (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2332                 return NULL;
2333
2334         /* ACTHD is likely pointing to the dword after the actual command,
2335          * so scan backwards until we find the MBOX.
2336          */
2337         acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2338         acthd_min = max((int)acthd - 3 * 4, 0);
2339         do {
2340                 cmd = ioread32(ring->virtual_start + acthd);
2341                 if (cmd == ipehr)
2342                         break;
2343
2344                 acthd -= 4;
2345                 if (acthd < acthd_min)
2346                         return NULL;
2347         } while (1);
2348
2349         *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2350         return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2351 }
2352
2353 static int semaphore_passed(struct intel_ring_buffer *ring)
2354 {
2355         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2356         struct intel_ring_buffer *signaller;
2357         u32 seqno, ctl;
2358
2359         ring->hangcheck.deadlock = true;
2360
2361         signaller = semaphore_waits_for(ring, &seqno);
2362         if (signaller == NULL || signaller->hangcheck.deadlock)
2363                 return -1;
2364
2365         /* cursory check for an unkickable deadlock */
2366         ctl = I915_READ_CTL(signaller);
2367         if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2368                 return -1;
2369
2370         return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2371 }
2372
2373 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2374 {
2375         struct intel_ring_buffer *ring;
2376         int i;
2377
2378         for_each_ring(ring, dev_priv, i)
2379                 ring->hangcheck.deadlock = false;
2380 }
2381
2382 static enum intel_ring_hangcheck_action
2383 ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2384 {
2385         struct drm_device *dev = ring->dev;
2386         struct drm_i915_private *dev_priv = dev->dev_private;
2387         u32 tmp;
2388
2389         if (ring->hangcheck.acthd != acthd)
2390                 return active;
2391
2392         if (IS_GEN2(dev))
2393                 return hung;
2394
2395         /* Is the chip hanging on a WAIT_FOR_EVENT?
2396          * If so we can simply poke the RB_WAIT bit
2397          * and break the hang. This should work on
2398          * all but the second generation chipsets.
2399          */
2400         tmp = I915_READ_CTL(ring);
2401         if (tmp & RING_WAIT) {
2402                 DRM_ERROR("Kicking stuck wait on %s\n",
2403                           ring->name);
2404                 I915_WRITE_CTL(ring, tmp);
2405                 return kick;
2406         }
2407
2408         if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2409                 switch (semaphore_passed(ring)) {
2410                 default:
2411                         return hung;
2412                 case 1:
2413                         DRM_ERROR("Kicking stuck semaphore on %s\n",
2414                                   ring->name);
2415                         I915_WRITE_CTL(ring, tmp);
2416                         return kick;
2417                 case 0:
2418                         return wait;
2419                 }
2420         }
2421
2422         return hung;
2423 }
2424
2425 /**
2426  * This is called when the chip hasn't reported back with completed
2427  * batchbuffers in a long time. We keep track per ring seqno progress and
2428  * if there are no progress, hangcheck score for that ring is increased.
2429  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2430  * we kick the ring. If we see no progress on three subsequent calls
2431  * we assume chip is wedged and try to fix it by resetting the chip.
2432  */
2433 void i915_hangcheck_elapsed(unsigned long data)
2434 {
2435         struct drm_device *dev = (struct drm_device *)data;
2436         drm_i915_private_t *dev_priv = dev->dev_private;
2437         struct intel_ring_buffer *ring;
2438         int i;
2439         int busy_count = 0, rings_hung = 0;
2440         bool stuck[I915_NUM_RINGS] = { 0 };
2441 #define BUSY 1
2442 #define KICK 5
2443 #define HUNG 20
2444 #define FIRE 30
2445
2446         if (!i915_enable_hangcheck)
2447                 return;
2448
2449         for_each_ring(ring, dev_priv, i) {
2450                 u32 seqno, acthd;
2451                 bool busy = true;
2452
2453                 semaphore_clear_deadlocks(dev_priv);
2454
2455                 seqno = ring->get_seqno(ring, false);
2456                 acthd = intel_ring_get_active_head(ring);
2457
2458                 if (ring->hangcheck.seqno == seqno) {
2459                         if (ring_idle(ring, seqno)) {
2460                                 if (waitqueue_active(&ring->irq_queue)) {
2461                                         /* Issue a wake-up to catch stuck h/w. */
2462                                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2463                                                   ring->name);
2464                                         wake_up_all(&ring->irq_queue);
2465                                         ring->hangcheck.score += HUNG;
2466                                 } else
2467                                         busy = false;
2468                         } else {
2469                                 int score;
2470
2471                                 /* We always increment the hangcheck score
2472                                  * if the ring is busy and still processing
2473                                  * the same request, so that no single request
2474                                  * can run indefinitely (such as a chain of
2475                                  * batches). The only time we do not increment
2476                                  * the hangcheck score on this ring, if this
2477                                  * ring is in a legitimate wait for another
2478                                  * ring. In that case the waiting ring is a
2479                                  * victim and we want to be sure we catch the
2480                                  * right culprit. Then every time we do kick
2481                                  * the ring, add a small increment to the
2482                                  * score so that we can catch a batch that is
2483                                  * being repeatedly kicked and so responsible
2484                                  * for stalling the machine.
2485                                  */
2486                                 ring->hangcheck.action = ring_stuck(ring,
2487                                                                     acthd);
2488
2489                                 switch (ring->hangcheck.action) {
2490                                 case wait:
2491                                         score = 0;
2492                                         break;
2493                                 case active:
2494                                         score = BUSY;
2495                                         break;
2496                                 case kick:
2497                                         score = KICK;
2498                                         break;
2499                                 case hung:
2500                                         score = HUNG;
2501                                         stuck[i] = true;
2502                                         break;
2503                                 }
2504                                 ring->hangcheck.score += score;
2505                         }
2506                 } else {
2507                         /* Gradually reduce the count so that we catch DoS
2508                          * attempts across multiple batches.
2509                          */
2510                         if (ring->hangcheck.score > 0)
2511                                 ring->hangcheck.score--;
2512                 }
2513
2514                 ring->hangcheck.seqno = seqno;
2515                 ring->hangcheck.acthd = acthd;
2516                 busy_count += busy;
2517         }
2518
2519         for_each_ring(ring, dev_priv, i) {
2520                 if (ring->hangcheck.score > FIRE) {
2521                         DRM_ERROR("%s on %s\n",
2522                                   stuck[i] ? "stuck" : "no progress",
2523                                   ring->name);
2524                         rings_hung++;
2525                 }
2526         }
2527
2528         if (rings_hung)
2529                 return i915_handle_error(dev, true);
2530
2531         if (busy_count)
2532                 /* Reset timer case chip hangs without another request
2533                  * being added */
2534                 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2535                           round_jiffies_up(jiffies +
2536                                            DRM_I915_HANGCHECK_JIFFIES));
2537 }
2538
2539 static void ibx_irq_preinstall(struct drm_device *dev)
2540 {
2541         struct drm_i915_private *dev_priv = dev->dev_private;
2542
2543         if (HAS_PCH_NOP(dev))
2544                 return;
2545
2546         /* south display irq */
2547         I915_WRITE(SDEIMR, 0xffffffff);
2548         /*
2549          * SDEIER is also touched by the interrupt handler to work around missed
2550          * PCH interrupts. Hence we can't update it after the interrupt handler
2551          * is enabled - instead we unconditionally enable all PCH interrupt
2552          * sources here, but then only unmask them as needed with SDEIMR.
2553          */
2554         I915_WRITE(SDEIER, 0xffffffff);
2555         POSTING_READ(SDEIER);
2556 }
2557
2558 /* drm_dma.h hooks
2559 */
2560 static void ironlake_irq_preinstall(struct drm_device *dev)
2561 {
2562         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2563
2564         atomic_set(&dev_priv->irq_received, 0);
2565
2566         I915_WRITE(HWSTAM, 0xeffe);
2567
2568         /* XXX hotplug from PCH */
2569
2570         I915_WRITE(DEIMR, 0xffffffff);
2571         I915_WRITE(DEIER, 0x0);
2572         POSTING_READ(DEIER);
2573
2574         /* and GT */
2575         I915_WRITE(GTIMR, 0xffffffff);
2576         I915_WRITE(GTIER, 0x0);
2577         POSTING_READ(GTIER);
2578
2579         ibx_irq_preinstall(dev);
2580 }
2581
2582 static void ivybridge_irq_preinstall(struct drm_device *dev)
2583 {
2584         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2585
2586         atomic_set(&dev_priv->irq_received, 0);
2587
2588         I915_WRITE(HWSTAM, 0xeffe);
2589
2590         /* XXX hotplug from PCH */
2591
2592         I915_WRITE(DEIMR, 0xffffffff);
2593         I915_WRITE(DEIER, 0x0);
2594         POSTING_READ(DEIER);
2595
2596         /* and GT */
2597         I915_WRITE(GTIMR, 0xffffffff);
2598         I915_WRITE(GTIER, 0x0);
2599         POSTING_READ(GTIER);
2600
2601         /* Power management */
2602         I915_WRITE(GEN6_PMIMR, 0xffffffff);
2603         I915_WRITE(GEN6_PMIER, 0x0);
2604         POSTING_READ(GEN6_PMIER);
2605
2606         ibx_irq_preinstall(dev);
2607 }
2608
2609 static void valleyview_irq_preinstall(struct drm_device *dev)
2610 {
2611         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2612         int pipe;
2613
2614         atomic_set(&dev_priv->irq_received, 0);
2615
2616         /* VLV magic */
2617         I915_WRITE(VLV_IMR, 0);
2618         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2619         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2620         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2621
2622         /* and GT */
2623         I915_WRITE(GTIIR, I915_READ(GTIIR));
2624         I915_WRITE(GTIIR, I915_READ(GTIIR));
2625         I915_WRITE(GTIMR, 0xffffffff);
2626         I915_WRITE(GTIER, 0x0);
2627         POSTING_READ(GTIER);
2628
2629         I915_WRITE(DPINVGTT, 0xff);
2630
2631         I915_WRITE(PORT_HOTPLUG_EN, 0);
2632         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2633         for_each_pipe(pipe)
2634                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2635         I915_WRITE(VLV_IIR, 0xffffffff);
2636         I915_WRITE(VLV_IMR, 0xffffffff);
2637         I915_WRITE(VLV_IER, 0x0);
2638         POSTING_READ(VLV_IER);
2639 }
2640
2641 static void ibx_hpd_irq_setup(struct drm_device *dev)
2642 {
2643         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2644         struct drm_mode_config *mode_config = &dev->mode_config;
2645         struct intel_encoder *intel_encoder;
2646         u32 mask = ~I915_READ(SDEIMR);
2647         u32 hotplug;
2648
2649         if (HAS_PCH_IBX(dev)) {
2650                 mask &= ~SDE_HOTPLUG_MASK;
2651                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2652                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2653                                 mask |= hpd_ibx[intel_encoder->hpd_pin];
2654         } else {
2655                 mask &= ~SDE_HOTPLUG_MASK_CPT;
2656                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2657                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2658                                 mask |= hpd_cpt[intel_encoder->hpd_pin];
2659         }
2660
2661         I915_WRITE(SDEIMR, ~mask);
2662
2663         /*
2664          * Enable digital hotplug on the PCH, and configure the DP short pulse
2665          * duration to 2ms (which is the minimum in the Display Port spec)
2666          *
2667          * This register is the same on all known PCH chips.
2668          */
2669         hotplug = I915_READ(PCH_PORT_HOTPLUG);
2670         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2671         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2672         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2673         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2674         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2675 }
2676
2677 static void ibx_irq_postinstall(struct drm_device *dev)
2678 {
2679         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2680         u32 mask;
2681
2682         if (HAS_PCH_NOP(dev))
2683                 return;
2684
2685         if (HAS_PCH_IBX(dev)) {
2686                 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2687                        SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2688         } else {
2689                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2690
2691                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2692         }
2693
2694         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2695         I915_WRITE(SDEIMR, ~mask);
2696 }
2697
2698 static int ironlake_irq_postinstall(struct drm_device *dev)
2699 {
2700         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2701         /* enable kind of interrupts always enabled */
2702         u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2703                            DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2704                            DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2705                            DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
2706         u32 gt_irqs;
2707
2708         dev_priv->irq_mask = ~display_mask;
2709
2710         /* should always can generate irq */
2711         I915_WRITE(DEIIR, I915_READ(DEIIR));
2712         I915_WRITE(DEIMR, dev_priv->irq_mask);
2713         I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
2714         POSTING_READ(DEIER);
2715
2716         dev_priv->gt_irq_mask = ~0;
2717
2718         I915_WRITE(GTIIR, I915_READ(GTIIR));
2719         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2720
2721         gt_irqs = GT_RENDER_USER_INTERRUPT;
2722
2723         if (IS_GEN6(dev))
2724                 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2725         else
2726                 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2727                            ILK_BSD_USER_INTERRUPT;
2728
2729         I915_WRITE(GTIER, gt_irqs);
2730         POSTING_READ(GTIER);
2731
2732         ibx_irq_postinstall(dev);
2733
2734         if (IS_IRONLAKE_M(dev)) {
2735                 /* Clear & enable PCU event interrupts */
2736                 I915_WRITE(DEIIR, DE_PCU_EVENT);
2737                 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2738                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2739         }
2740
2741         return 0;
2742 }
2743
2744 static int ivybridge_irq_postinstall(struct drm_device *dev)
2745 {
2746         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2747         /* enable kind of interrupts always enabled */
2748         u32 display_mask =
2749                 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2750                 DE_PLANEC_FLIP_DONE_IVB |
2751                 DE_PLANEB_FLIP_DONE_IVB |
2752                 DE_PLANEA_FLIP_DONE_IVB |
2753                 DE_AUX_CHANNEL_A_IVB |
2754                 DE_ERR_INT_IVB;
2755         u32 pm_irqs = GEN6_PM_RPS_EVENTS;
2756         u32 gt_irqs;
2757
2758         dev_priv->irq_mask = ~display_mask;
2759
2760         /* should always can generate irq */
2761         I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2762         I915_WRITE(DEIIR, I915_READ(DEIIR));
2763         I915_WRITE(DEIMR, dev_priv->irq_mask);
2764         I915_WRITE(DEIER,
2765                    display_mask |
2766                    DE_PIPEC_VBLANK_IVB |
2767                    DE_PIPEB_VBLANK_IVB |
2768                    DE_PIPEA_VBLANK_IVB);
2769         POSTING_READ(DEIER);
2770
2771         dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2772
2773         I915_WRITE(GTIIR, I915_READ(GTIIR));
2774         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2775
2776         gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2777                   GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2778         I915_WRITE(GTIER, gt_irqs);
2779         POSTING_READ(GTIER);
2780
2781         I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2782         if (HAS_VEBOX(dev))
2783                 pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2784                         PM_VEBOX_CS_ERROR_INTERRUPT;
2785
2786         /* Our enable/disable rps functions may touch these registers so
2787          * make sure to set a known state for only the non-RPS bits.
2788          * The RMW is extra paranoia since this should be called after being set
2789          * to a known state in preinstall.
2790          * */
2791         I915_WRITE(GEN6_PMIMR,
2792                    (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2793         I915_WRITE(GEN6_PMIER,
2794                    (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2795         POSTING_READ(GEN6_PMIER);
2796
2797         ibx_irq_postinstall(dev);
2798
2799         return 0;
2800 }
2801
2802 static int valleyview_irq_postinstall(struct drm_device *dev)
2803 {
2804         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2805         u32 gt_irqs;
2806         u32 enable_mask;
2807         u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2808
2809         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2810         enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2811                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2812                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2813                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2814
2815         /*
2816          *Leave vblank interrupts masked initially.  enable/disable will
2817          * toggle them based on usage.
2818          */
2819         dev_priv->irq_mask = (~enable_mask) |
2820                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2821                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2822
2823         I915_WRITE(PORT_HOTPLUG_EN, 0);
2824         POSTING_READ(PORT_HOTPLUG_EN);
2825
2826         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2827         I915_WRITE(VLV_IER, enable_mask);
2828         I915_WRITE(VLV_IIR, 0xffffffff);
2829         I915_WRITE(PIPESTAT(0), 0xffff);
2830         I915_WRITE(PIPESTAT(1), 0xffff);
2831         POSTING_READ(VLV_IER);
2832
2833         i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2834         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2835         i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2836
2837         I915_WRITE(VLV_IIR, 0xffffffff);
2838         I915_WRITE(VLV_IIR, 0xffffffff);
2839
2840         I915_WRITE(GTIIR, I915_READ(GTIIR));
2841         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2842
2843         gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2844                 GT_BLT_USER_INTERRUPT;
2845         I915_WRITE(GTIER, gt_irqs);
2846         POSTING_READ(GTIER);
2847
2848         /* ack & enable invalid PTE error interrupts */
2849 #if 0 /* FIXME: add support to irq handler for checking these bits */
2850         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2851         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2852 #endif
2853
2854         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2855
2856         return 0;
2857 }
2858
2859 static void valleyview_irq_uninstall(struct drm_device *dev)
2860 {
2861         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2862         int pipe;
2863
2864         if (!dev_priv)
2865                 return;
2866
2867         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2868
2869         for_each_pipe(pipe)
2870                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2871
2872         I915_WRITE(HWSTAM, 0xffffffff);
2873         I915_WRITE(PORT_HOTPLUG_EN, 0);
2874         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2875         for_each_pipe(pipe)
2876                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2877         I915_WRITE(VLV_IIR, 0xffffffff);
2878         I915_WRITE(VLV_IMR, 0xffffffff);
2879         I915_WRITE(VLV_IER, 0x0);
2880         POSTING_READ(VLV_IER);
2881 }
2882
2883 static void ironlake_irq_uninstall(struct drm_device *dev)
2884 {
2885         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2886
2887         if (!dev_priv)
2888                 return;
2889
2890         del_timer_sync(&dev_priv->hotplug_reenable_timer);
2891
2892         I915_WRITE(HWSTAM, 0xffffffff);
2893
2894         I915_WRITE(DEIMR, 0xffffffff);
2895         I915_WRITE(DEIER, 0x0);
2896         I915_WRITE(DEIIR, I915_READ(DEIIR));
2897         if (IS_GEN7(dev))
2898                 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2899
2900         I915_WRITE(GTIMR, 0xffffffff);
2901         I915_WRITE(GTIER, 0x0);
2902         I915_WRITE(GTIIR, I915_READ(GTIIR));
2903
2904         if (HAS_PCH_NOP(dev))
2905                 return;
2906
2907         I915_WRITE(SDEIMR, 0xffffffff);
2908         I915_WRITE(SDEIER, 0x0);
2909         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2910         if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2911                 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2912 }
2913
2914 static void i8xx_irq_preinstall(struct drm_device * dev)
2915 {
2916         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2917         int pipe;
2918
2919         atomic_set(&dev_priv->irq_received, 0);
2920
2921         for_each_pipe(pipe)
2922                 I915_WRITE(PIPESTAT(pipe), 0);
2923         I915_WRITE16(IMR, 0xffff);
2924         I915_WRITE16(IER, 0x0);
2925         POSTING_READ16(IER);
2926 }
2927
2928 static int i8xx_irq_postinstall(struct drm_device *dev)
2929 {
2930         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2931
2932         I915_WRITE16(EMR,
2933                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2934
2935         /* Unmask the interrupts that we always want on. */
2936         dev_priv->irq_mask =
2937                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2938                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2939                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2940                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2941                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2942         I915_WRITE16(IMR, dev_priv->irq_mask);
2943
2944         I915_WRITE16(IER,
2945                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2946                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2947                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2948                      I915_USER_INTERRUPT);
2949         POSTING_READ16(IER);
2950
2951         return 0;
2952 }
2953
2954 /*
2955  * Returns true when a page flip has completed.
2956  */
2957 static bool i8xx_handle_vblank(struct drm_device *dev,
2958                                int pipe, u16 iir)
2959 {
2960         drm_i915_private_t *dev_priv = dev->dev_private;
2961         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2962
2963         if (!drm_handle_vblank(dev, pipe))
2964                 return false;
2965
2966         if ((iir & flip_pending) == 0)
2967                 return false;
2968
2969         intel_prepare_page_flip(dev, pipe);
2970
2971         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2972          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2973          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2974          * the flip is completed (no longer pending). Since this doesn't raise
2975          * an interrupt per se, we watch for the change at vblank.
2976          */
2977         if (I915_READ16(ISR) & flip_pending)
2978                 return false;
2979
2980         intel_finish_page_flip(dev, pipe);
2981
2982         return true;
2983 }
2984
2985 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2986 {
2987         struct drm_device *dev = (struct drm_device *) arg;
2988         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2989         u16 iir, new_iir;
2990         u32 pipe_stats[2];
2991         unsigned long irqflags;
2992         int irq_received;
2993         int pipe;
2994         u16 flip_mask =
2995                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2996                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2997
2998         atomic_inc(&dev_priv->irq_received);
2999
3000         iir = I915_READ16(IIR);
3001         if (iir == 0)
3002                 return IRQ_NONE;
3003
3004         while (iir & ~flip_mask) {
3005                 /* Can't rely on pipestat interrupt bit in iir as it might
3006                  * have been cleared after the pipestat interrupt was received.
3007                  * It doesn't set the bit in iir again, but it still produces
3008                  * interrupts (for non-MSI).
3009                  */
3010                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3011                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3012                         i915_handle_error(dev, false);
3013
3014                 for_each_pipe(pipe) {
3015                         int reg = PIPESTAT(pipe);
3016                         pipe_stats[pipe] = I915_READ(reg);
3017
3018                         /*
3019                          * Clear the PIPE*STAT regs before the IIR
3020                          */
3021                         if (pipe_stats[pipe] & 0x8000ffff) {
3022                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3023                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
3024                                                          pipe_name(pipe));
3025                                 I915_WRITE(reg, pipe_stats[pipe]);
3026                                 irq_received = 1;
3027                         }
3028                 }
3029                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3030
3031                 I915_WRITE16(IIR, iir & ~flip_mask);
3032                 new_iir = I915_READ16(IIR); /* Flush posted writes */
3033
3034                 i915_update_dri1_breadcrumb(dev);
3035
3036                 if (iir & I915_USER_INTERRUPT)
3037                         notify_ring(dev, &dev_priv->ring[RCS]);
3038
3039                 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
3040                     i8xx_handle_vblank(dev, 0, iir))
3041                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
3042
3043                 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
3044                     i8xx_handle_vblank(dev, 1, iir))
3045                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
3046
3047                 iir = new_iir;
3048         }
3049
3050         return IRQ_HANDLED;
3051 }
3052
3053 static void i8xx_irq_uninstall(struct drm_device * dev)
3054 {
3055         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3056         int pipe;
3057
3058         for_each_pipe(pipe) {
3059                 /* Clear enable bits; then clear status bits */
3060                 I915_WRITE(PIPESTAT(pipe), 0);
3061                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3062         }
3063         I915_WRITE16(IMR, 0xffff);
3064         I915_WRITE16(IER, 0x0);
3065         I915_WRITE16(IIR, I915_READ16(IIR));
3066 }
3067
3068 static void i915_irq_preinstall(struct drm_device * dev)
3069 {
3070         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3071         int pipe;
3072
3073         atomic_set(&dev_priv->irq_received, 0);
3074
3075         if (I915_HAS_HOTPLUG(dev)) {
3076                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3077                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3078         }
3079
3080         I915_WRITE16(HWSTAM, 0xeffe);
3081         for_each_pipe(pipe)
3082                 I915_WRITE(PIPESTAT(pipe), 0);
3083         I915_WRITE(IMR, 0xffffffff);
3084         I915_WRITE(IER, 0x0);
3085         POSTING_READ(IER);
3086 }
3087
3088 static int i915_irq_postinstall(struct drm_device *dev)
3089 {
3090         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3091         u32 enable_mask;
3092
3093         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3094
3095         /* Unmask the interrupts that we always want on. */
3096         dev_priv->irq_mask =
3097                 ~(I915_ASLE_INTERRUPT |
3098                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3099                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3100                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3101                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3102                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3103
3104         enable_mask =
3105                 I915_ASLE_INTERRUPT |
3106                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3107                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3108                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3109                 I915_USER_INTERRUPT;
3110
3111         if (I915_HAS_HOTPLUG(dev)) {
3112                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3113                 POSTING_READ(PORT_HOTPLUG_EN);
3114
3115                 /* Enable in IER... */
3116                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3117                 /* and unmask in IMR */
3118                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3119         }
3120
3121         I915_WRITE(IMR, dev_priv->irq_mask);
3122         I915_WRITE(IER, enable_mask);
3123         POSTING_READ(IER);
3124
3125         i915_enable_asle_pipestat(dev);
3126
3127         return 0;
3128 }
3129
3130 /*
3131  * Returns true when a page flip has completed.
3132  */
3133 static bool i915_handle_vblank(struct drm_device *dev,
3134                                int plane, int pipe, u32 iir)
3135 {
3136         drm_i915_private_t *dev_priv = dev->dev_private;
3137         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3138
3139         if (!drm_handle_vblank(dev, pipe))
3140                 return false;
3141
3142         if ((iir & flip_pending) == 0)
3143                 return false;
3144
3145         intel_prepare_page_flip(dev, plane);
3146
3147         /* We detect FlipDone by looking for the change in PendingFlip from '1'
3148          * to '0' on the following vblank, i.e. IIR has the Pendingflip
3149          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3150          * the flip is completed (no longer pending). Since this doesn't raise
3151          * an interrupt per se, we watch for the change at vblank.
3152          */
3153         if (I915_READ(ISR) & flip_pending)
3154                 return false;
3155
3156         intel_finish_page_flip(dev, pipe);
3157
3158         return true;
3159 }
3160
3161 static irqreturn_t i915_irq_handler(int irq, void *arg)
3162 {
3163         struct drm_device *dev = (struct drm_device *) arg;
3164         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3165         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3166         unsigned long irqflags;
3167         u32 flip_mask =
3168                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3169                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3170         int pipe, ret = IRQ_NONE;
3171
3172         atomic_inc(&dev_priv->irq_received);
3173
3174         iir = I915_READ(IIR);
3175         do {
3176                 bool irq_received = (iir & ~flip_mask) != 0;
3177                 bool blc_event = false;
3178
3179                 /* Can't rely on pipestat interrupt bit in iir as it might
3180                  * have been cleared after the pipestat interrupt was received.
3181                  * It doesn't set the bit in iir again, but it still produces
3182                  * interrupts (for non-MSI).
3183                  */
3184                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3185                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3186                         i915_handle_error(dev, false);
3187
3188                 for_each_pipe(pipe) {
3189                         int reg = PIPESTAT(pipe);
3190                         pipe_stats[pipe] = I915_READ(reg);
3191
3192                         /* Clear the PIPE*STAT regs before the IIR */
3193                         if (pipe_stats[pipe] & 0x8000ffff) {
3194                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3195                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
3196                                                          pipe_name(pipe));
3197                                 I915_WRITE(reg, pipe_stats[pipe]);
3198                                 irq_received = true;
3199                         }
3200                 }
3201                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3202
3203                 if (!irq_received)
3204                         break;
3205
3206                 /* Consume port.  Then clear IIR or we'll miss events */
3207                 if ((I915_HAS_HOTPLUG(dev)) &&
3208                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3209                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3210                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3211
3212                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3213                                   hotplug_status);
3214                         if (hotplug_trigger) {
3215                                 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
3216                                         i915_hpd_irq_setup(dev);
3217                                 queue_work(dev_priv->wq,
3218                                            &dev_priv->hotplug_work);
3219                         }
3220                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3221                         POSTING_READ(PORT_HOTPLUG_STAT);
3222                 }
3223
3224                 I915_WRITE(IIR, iir & ~flip_mask);
3225                 new_iir = I915_READ(IIR); /* Flush posted writes */
3226
3227                 if (iir & I915_USER_INTERRUPT)
3228                         notify_ring(dev, &dev_priv->ring[RCS]);
3229
3230                 for_each_pipe(pipe) {
3231                         int plane = pipe;
3232                         if (IS_MOBILE(dev))
3233                                 plane = !plane;
3234
3235                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3236                             i915_handle_vblank(dev, plane, pipe, iir))
3237                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3238
3239                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3240                                 blc_event = true;
3241                 }
3242
3243                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3244                         intel_opregion_asle_intr(dev);
3245
3246                 /* With MSI, interrupts are only generated when iir
3247                  * transitions from zero to nonzero.  If another bit got
3248                  * set while we were handling the existing iir bits, then
3249                  * we would never get another interrupt.
3250                  *
3251                  * This is fine on non-MSI as well, as if we hit this path
3252                  * we avoid exiting the interrupt handler only to generate
3253                  * another one.
3254                  *
3255                  * Note that for MSI this could cause a stray interrupt report
3256                  * if an interrupt landed in the time between writing IIR and
3257                  * the posting read.  This should be rare enough to never
3258                  * trigger the 99% of 100,000 interrupts test for disabling
3259                  * stray interrupts.
3260                  */
3261                 ret = IRQ_HANDLED;
3262                 iir = new_iir;
3263         } while (iir & ~flip_mask);
3264
3265         i915_update_dri1_breadcrumb(dev);
3266
3267         return ret;
3268 }
3269
3270 static void i915_irq_uninstall(struct drm_device * dev)
3271 {
3272         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3273         int pipe;
3274
3275         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3276
3277         if (I915_HAS_HOTPLUG(dev)) {
3278                 I915_WRITE(PORT_HOTPLUG_EN, 0);
3279                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3280         }
3281
3282         I915_WRITE16(HWSTAM, 0xffff);
3283         for_each_pipe(pipe) {
3284                 /* Clear enable bits; then clear status bits */
3285                 I915_WRITE(PIPESTAT(pipe), 0);
3286                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3287         }
3288         I915_WRITE(IMR, 0xffffffff);
3289         I915_WRITE(IER, 0x0);
3290
3291         I915_WRITE(IIR, I915_READ(IIR));
3292 }
3293
3294 static void i965_irq_preinstall(struct drm_device * dev)
3295 {
3296         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3297         int pipe;
3298
3299         atomic_set(&dev_priv->irq_received, 0);
3300
3301         I915_WRITE(PORT_HOTPLUG_EN, 0);
3302         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3303
3304         I915_WRITE(HWSTAM, 0xeffe);
3305         for_each_pipe(pipe)
3306                 I915_WRITE(PIPESTAT(pipe), 0);
3307         I915_WRITE(IMR, 0xffffffff);
3308         I915_WRITE(IER, 0x0);
3309         POSTING_READ(IER);
3310 }
3311
3312 static int i965_irq_postinstall(struct drm_device *dev)
3313 {
3314         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3315         u32 enable_mask;
3316         u32 error_mask;
3317
3318         /* Unmask the interrupts that we always want on. */
3319         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3320                                I915_DISPLAY_PORT_INTERRUPT |
3321                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3322                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3323                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3324                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3325                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3326
3327         enable_mask = ~dev_priv->irq_mask;
3328         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3329                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3330         enable_mask |= I915_USER_INTERRUPT;
3331
3332         if (IS_G4X(dev))
3333                 enable_mask |= I915_BSD_USER_INTERRUPT;
3334
3335         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3336
3337         /*
3338          * Enable some error detection, note the instruction error mask
3339          * bit is reserved, so we leave it masked.
3340          */
3341         if (IS_G4X(dev)) {
3342                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3343                                GM45_ERROR_MEM_PRIV |
3344                                GM45_ERROR_CP_PRIV |
3345                                I915_ERROR_MEMORY_REFRESH);
3346         } else {
3347                 error_mask = ~(I915_ERROR_PAGE_TABLE |
3348                                I915_ERROR_MEMORY_REFRESH);
3349         }
3350         I915_WRITE(EMR, error_mask);
3351
3352         I915_WRITE(IMR, dev_priv->irq_mask);
3353         I915_WRITE(IER, enable_mask);
3354         POSTING_READ(IER);
3355
3356         I915_WRITE(PORT_HOTPLUG_EN, 0);
3357         POSTING_READ(PORT_HOTPLUG_EN);
3358
3359         i915_enable_asle_pipestat(dev);
3360
3361         return 0;
3362 }
3363
3364 static void i915_hpd_irq_setup(struct drm_device *dev)
3365 {
3366         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3367         struct drm_mode_config *mode_config = &dev->mode_config;
3368         struct intel_encoder *intel_encoder;
3369         u32 hotplug_en;
3370
3371         if (I915_HAS_HOTPLUG(dev)) {
3372                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3373                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3374                 /* Note HDMI and DP share hotplug bits */
3375                 /* enable bits are the same for all generations */
3376                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3377                         if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3378                                 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3379                 /* Programming the CRT detection parameters tends
3380                    to generate a spurious hotplug event about three
3381                    seconds later.  So just do it once.
3382                 */
3383                 if (IS_G4X(dev))
3384                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3385                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3386                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3387
3388                 /* Ignore TV since it's buggy */
3389                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3390         }
3391 }
3392
3393 static irqreturn_t i965_irq_handler(int irq, void *arg)
3394 {
3395         struct drm_device *dev = (struct drm_device *) arg;
3396         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3397         u32 iir, new_iir;
3398         u32 pipe_stats[I915_MAX_PIPES];
3399         unsigned long irqflags;
3400         int irq_received;
3401         int ret = IRQ_NONE, pipe;
3402         u32 flip_mask =
3403                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3404                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3405
3406         atomic_inc(&dev_priv->irq_received);
3407
3408         iir = I915_READ(IIR);
3409
3410         for (;;) {
3411                 bool blc_event = false;
3412
3413                 irq_received = (iir & ~flip_mask) != 0;
3414
3415                 /* Can't rely on pipestat interrupt bit in iir as it might
3416                  * have been cleared after the pipestat interrupt was received.
3417                  * It doesn't set the bit in iir again, but it still produces
3418                  * interrupts (for non-MSI).
3419                  */
3420                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3421                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3422                         i915_handle_error(dev, false);
3423
3424                 for_each_pipe(pipe) {
3425                         int reg = PIPESTAT(pipe);
3426                         pipe_stats[pipe] = I915_READ(reg);
3427
3428                         /*
3429                          * Clear the PIPE*STAT regs before the IIR
3430                          */
3431                         if (pipe_stats[pipe] & 0x8000ffff) {
3432                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3433                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
3434                                                          pipe_name(pipe));
3435                                 I915_WRITE(reg, pipe_stats[pipe]);
3436                                 irq_received = 1;
3437                         }
3438                 }
3439                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3440
3441                 if (!irq_received)
3442                         break;
3443
3444                 ret = IRQ_HANDLED;
3445
3446                 /* Consume port.  Then clear IIR or we'll miss events */
3447                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3448                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3449                         u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3450                                                                   HOTPLUG_INT_STATUS_G4X :
3451                                                                   HOTPLUG_INT_STATUS_I915);
3452
3453                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3454                                   hotplug_status);
3455                         if (hotplug_trigger) {
3456                                 if (hotplug_irq_storm_detect(dev, hotplug_trigger,
3457                                                             IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
3458                                         i915_hpd_irq_setup(dev);
3459                                 queue_work(dev_priv->wq,
3460                                            &dev_priv->hotplug_work);
3461                         }
3462                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3463                         I915_READ(PORT_HOTPLUG_STAT);
3464                 }
3465
3466                 I915_WRITE(IIR, iir & ~flip_mask);
3467                 new_iir = I915_READ(IIR); /* Flush posted writes */
3468
3469                 if (iir & I915_USER_INTERRUPT)
3470                         notify_ring(dev, &dev_priv->ring[RCS]);
3471                 if (iir & I915_BSD_USER_INTERRUPT)
3472                         notify_ring(dev, &dev_priv->ring[VCS]);
3473
3474                 for_each_pipe(pipe) {
3475                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3476                             i915_handle_vblank(dev, pipe, pipe, iir))
3477                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3478
3479                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3480                                 blc_event = true;
3481                 }
3482
3483
3484                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3485                         intel_opregion_asle_intr(dev);
3486
3487                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3488                         gmbus_irq_handler(dev);
3489
3490                 /* With MSI, interrupts are only generated when iir
3491                  * transitions from zero to nonzero.  If another bit got
3492                  * set while we were handling the existing iir bits, then
3493                  * we would never get another interrupt.
3494                  *
3495                  * This is fine on non-MSI as well, as if we hit this path
3496                  * we avoid exiting the interrupt handler only to generate
3497                  * another one.
3498                  *
3499                  * Note that for MSI this could cause a stray interrupt report
3500                  * if an interrupt landed in the time between writing IIR and
3501                  * the posting read.  This should be rare enough to never
3502                  * trigger the 99% of 100,000 interrupts test for disabling
3503                  * stray interrupts.
3504                  */
3505                 iir = new_iir;
3506         }
3507
3508         i915_update_dri1_breadcrumb(dev);
3509
3510         return ret;
3511 }
3512
3513 static void i965_irq_uninstall(struct drm_device * dev)
3514 {
3515         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3516         int pipe;
3517
3518         if (!dev_priv)
3519                 return;
3520
3521         del_timer_sync(&dev_priv->hotplug_reenable_timer);
3522
3523         I915_WRITE(PORT_HOTPLUG_EN, 0);
3524         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3525
3526         I915_WRITE(HWSTAM, 0xffffffff);
3527         for_each_pipe(pipe)
3528                 I915_WRITE(PIPESTAT(pipe), 0);
3529         I915_WRITE(IMR, 0xffffffff);
3530         I915_WRITE(IER, 0x0);
3531
3532         for_each_pipe(pipe)
3533                 I915_WRITE(PIPESTAT(pipe),
3534                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3535         I915_WRITE(IIR, I915_READ(IIR));
3536 }
3537
3538 static void i915_reenable_hotplug_timer_func(unsigned long data)
3539 {
3540         drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3541         struct drm_device *dev = dev_priv->dev;
3542         struct drm_mode_config *mode_config = &dev->mode_config;
3543         unsigned long irqflags;
3544         int i;
3545
3546         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3547         for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3548                 struct drm_connector *connector;
3549
3550                 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3551                         continue;
3552
3553                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3554
3555                 list_for_each_entry(connector, &mode_config->connector_list, head) {
3556                         struct intel_connector *intel_connector = to_intel_connector(connector);
3557
3558                         if (intel_connector->encoder->hpd_pin == i) {
3559                                 if (connector->polled != intel_connector->polled)
3560                                         DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3561                                                          drm_get_connector_name(connector));
3562                                 connector->polled = intel_connector->polled;
3563                                 if (!connector->polled)
3564                                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3565                         }
3566                 }
3567         }
3568         if (dev_priv->display.hpd_irq_setup)
3569                 dev_priv->display.hpd_irq_setup(dev);
3570         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3571 }
3572
3573 void intel_irq_init(struct drm_device *dev)
3574 {
3575         struct drm_i915_private *dev_priv = dev->dev_private;
3576
3577         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3578         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3579         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3580         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3581
3582         setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3583                     i915_hangcheck_elapsed,
3584                     (unsigned long) dev);
3585         setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3586                     (unsigned long) dev_priv);
3587
3588         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3589
3590         dev->driver->get_vblank_counter = i915_get_vblank_counter;
3591         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3592         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3593                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3594                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3595         }
3596
3597         if (drm_core_check_feature(dev, DRIVER_MODESET))
3598                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3599         else
3600                 dev->driver->get_vblank_timestamp = NULL;
3601         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3602
3603         if (IS_VALLEYVIEW(dev)) {
3604                 dev->driver->irq_handler = valleyview_irq_handler;
3605                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3606                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3607                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3608                 dev->driver->enable_vblank = valleyview_enable_vblank;
3609                 dev->driver->disable_vblank = valleyview_disable_vblank;
3610                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3611         } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3612                 /* Share uninstall handlers with ILK/SNB */
3613                 dev->driver->irq_handler = ivybridge_irq_handler;
3614                 dev->driver->irq_preinstall = ivybridge_irq_preinstall;
3615                 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3616                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3617                 dev->driver->enable_vblank = ivybridge_enable_vblank;
3618                 dev->driver->disable_vblank = ivybridge_disable_vblank;
3619                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3620         } else if (HAS_PCH_SPLIT(dev)) {
3621                 dev->driver->irq_handler = ironlake_irq_handler;
3622                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3623                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3624                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3625                 dev->driver->enable_vblank = ironlake_enable_vblank;
3626                 dev->driver->disable_vblank = ironlake_disable_vblank;
3627                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3628         } else {
3629                 if (INTEL_INFO(dev)->gen == 2) {
3630                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
3631                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
3632                         dev->driver->irq_handler = i8xx_irq_handler;
3633                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
3634                 } else if (INTEL_INFO(dev)->gen == 3) {
3635                         dev->driver->irq_preinstall = i915_irq_preinstall;
3636                         dev->driver->irq_postinstall = i915_irq_postinstall;
3637                         dev->driver->irq_uninstall = i915_irq_uninstall;
3638                         dev->driver->irq_handler = i915_irq_handler;
3639                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3640                 } else {
3641                         dev->driver->irq_preinstall = i965_irq_preinstall;
3642                         dev->driver->irq_postinstall = i965_irq_postinstall;
3643                         dev->driver->irq_uninstall = i965_irq_uninstall;
3644                         dev->driver->irq_handler = i965_irq_handler;
3645                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3646                 }
3647                 dev->driver->enable_vblank = i915_enable_vblank;
3648                 dev->driver->disable_vblank = i915_disable_vblank;
3649         }
3650 }
3651
3652 void intel_hpd_init(struct drm_device *dev)
3653 {
3654         struct drm_i915_private *dev_priv = dev->dev_private;
3655         struct drm_mode_config *mode_config = &dev->mode_config;
3656         struct drm_connector *connector;
3657         int i;
3658
3659         for (i = 1; i < HPD_NUM_PINS; i++) {
3660                 dev_priv->hpd_stats[i].hpd_cnt = 0;
3661                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3662         }
3663         list_for_each_entry(connector, &mode_config->connector_list, head) {
3664                 struct intel_connector *intel_connector = to_intel_connector(connector);
3665                 connector->polled = intel_connector->polled;
3666                 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3667                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3668         }
3669         if (dev_priv->display.hpd_irq_setup)
3670                 dev_priv->display.hpd_irq_setup(dev);
3671 }