]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/gpu/drm/i915/i915_irq.c
drm/i915: (re)init HPD interrupt storm statistics
[linux-imx.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <drm/drmP.h>
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "i915_trace.h"
37 #include "intel_drv.h"
38
39 static const u32 hpd_ibx[] = {
40         [HPD_CRT] = SDE_CRT_HOTPLUG,
41         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45 };
46
47 static const u32 hpd_cpt[] = {
48         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
49         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53 };
54
55 static const u32 hpd_mask_i915[] = {
56         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62 };
63
64 static const u32 hpd_status_gen4[] = {
65         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71 };
72
73 static const u32 hpd_status_i965[] = {
74          [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75          [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
76          [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
77          [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78          [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79          [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81
82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89 };
90
91
92
93 /* For display hotplug interrupt */
94 static void
95 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
96 {
97         if ((dev_priv->irq_mask & mask) != 0) {
98                 dev_priv->irq_mask &= ~mask;
99                 I915_WRITE(DEIMR, dev_priv->irq_mask);
100                 POSTING_READ(DEIMR);
101         }
102 }
103
104 static void
105 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
106 {
107         if ((dev_priv->irq_mask & mask) != mask) {
108                 dev_priv->irq_mask |= mask;
109                 I915_WRITE(DEIMR, dev_priv->irq_mask);
110                 POSTING_READ(DEIMR);
111         }
112 }
113
114 void
115 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
116 {
117         u32 reg = PIPESTAT(pipe);
118         u32 pipestat = I915_READ(reg) & 0x7fff0000;
119
120         if ((pipestat & mask) == mask)
121                 return;
122
123         /* Enable the interrupt, clear any pending status */
124         pipestat |= mask | (mask >> 16);
125         I915_WRITE(reg, pipestat);
126         POSTING_READ(reg);
127 }
128
129 void
130 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
131 {
132         u32 reg = PIPESTAT(pipe);
133         u32 pipestat = I915_READ(reg) & 0x7fff0000;
134
135         if ((pipestat & mask) == 0)
136                 return;
137
138         pipestat &= ~mask;
139         I915_WRITE(reg, pipestat);
140         POSTING_READ(reg);
141 }
142
143 /**
144  * intel_enable_asle - enable ASLE interrupt for OpRegion
145  */
146 void intel_enable_asle(struct drm_device *dev)
147 {
148         drm_i915_private_t *dev_priv = dev->dev_private;
149         unsigned long irqflags;
150
151         /* FIXME: opregion/asle for VLV */
152         if (IS_VALLEYVIEW(dev))
153                 return;
154
155         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
156
157         if (HAS_PCH_SPLIT(dev))
158                 ironlake_enable_display_irq(dev_priv, DE_GSE);
159         else {
160                 i915_enable_pipestat(dev_priv, 1,
161                                      PIPE_LEGACY_BLC_EVENT_ENABLE);
162                 if (INTEL_INFO(dev)->gen >= 4)
163                         i915_enable_pipestat(dev_priv, 0,
164                                              PIPE_LEGACY_BLC_EVENT_ENABLE);
165         }
166
167         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
168 }
169
170 /**
171  * i915_pipe_enabled - check if a pipe is enabled
172  * @dev: DRM device
173  * @pipe: pipe to check
174  *
175  * Reading certain registers when the pipe is disabled can hang the chip.
176  * Use this routine to make sure the PLL is running and the pipe is active
177  * before reading such registers if unsure.
178  */
179 static int
180 i915_pipe_enabled(struct drm_device *dev, int pipe)
181 {
182         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
183         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
184                                                                       pipe);
185
186         return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
187 }
188
189 /* Called from drm generic code, passed a 'crtc', which
190  * we use as a pipe index
191  */
192 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
193 {
194         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
195         unsigned long high_frame;
196         unsigned long low_frame;
197         u32 high1, high2, low;
198
199         if (!i915_pipe_enabled(dev, pipe)) {
200                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
201                                 "pipe %c\n", pipe_name(pipe));
202                 return 0;
203         }
204
205         high_frame = PIPEFRAME(pipe);
206         low_frame = PIPEFRAMEPIXEL(pipe);
207
208         /*
209          * High & low register fields aren't synchronized, so make sure
210          * we get a low value that's stable across two reads of the high
211          * register.
212          */
213         do {
214                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
215                 low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
216                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
217         } while (high1 != high2);
218
219         high1 >>= PIPE_FRAME_HIGH_SHIFT;
220         low >>= PIPE_FRAME_LOW_SHIFT;
221         return (high1 << 8) | low;
222 }
223
224 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
225 {
226         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
227         int reg = PIPE_FRMCOUNT_GM45(pipe);
228
229         if (!i915_pipe_enabled(dev, pipe)) {
230                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
231                                  "pipe %c\n", pipe_name(pipe));
232                 return 0;
233         }
234
235         return I915_READ(reg);
236 }
237
238 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
239                              int *vpos, int *hpos)
240 {
241         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
242         u32 vbl = 0, position = 0;
243         int vbl_start, vbl_end, htotal, vtotal;
244         bool in_vbl = true;
245         int ret = 0;
246         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
247                                                                       pipe);
248
249         if (!i915_pipe_enabled(dev, pipe)) {
250                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
251                                  "pipe %c\n", pipe_name(pipe));
252                 return 0;
253         }
254
255         /* Get vtotal. */
256         vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
257
258         if (INTEL_INFO(dev)->gen >= 4) {
259                 /* No obvious pixelcount register. Only query vertical
260                  * scanout position from Display scan line register.
261                  */
262                 position = I915_READ(PIPEDSL(pipe));
263
264                 /* Decode into vertical scanout position. Don't have
265                  * horizontal scanout position.
266                  */
267                 *vpos = position & 0x1fff;
268                 *hpos = 0;
269         } else {
270                 /* Have access to pixelcount since start of frame.
271                  * We can split this into vertical and horizontal
272                  * scanout position.
273                  */
274                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
275
276                 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
277                 *vpos = position / htotal;
278                 *hpos = position - (*vpos * htotal);
279         }
280
281         /* Query vblank area. */
282         vbl = I915_READ(VBLANK(cpu_transcoder));
283
284         /* Test position against vblank region. */
285         vbl_start = vbl & 0x1fff;
286         vbl_end = (vbl >> 16) & 0x1fff;
287
288         if ((*vpos < vbl_start) || (*vpos > vbl_end))
289                 in_vbl = false;
290
291         /* Inside "upper part" of vblank area? Apply corrective offset: */
292         if (in_vbl && (*vpos >= vbl_start))
293                 *vpos = *vpos - vtotal;
294
295         /* Readouts valid? */
296         if (vbl > 0)
297                 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
298
299         /* In vblank? */
300         if (in_vbl)
301                 ret |= DRM_SCANOUTPOS_INVBL;
302
303         return ret;
304 }
305
306 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
307                               int *max_error,
308                               struct timeval *vblank_time,
309                               unsigned flags)
310 {
311         struct drm_crtc *crtc;
312
313         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
314                 DRM_ERROR("Invalid crtc %d\n", pipe);
315                 return -EINVAL;
316         }
317
318         /* Get drm_crtc to timestamp: */
319         crtc = intel_get_crtc_for_pipe(dev, pipe);
320         if (crtc == NULL) {
321                 DRM_ERROR("Invalid crtc %d\n", pipe);
322                 return -EINVAL;
323         }
324
325         if (!crtc->enabled) {
326                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
327                 return -EBUSY;
328         }
329
330         /* Helper routine in DRM core does all the work: */
331         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
332                                                      vblank_time, flags,
333                                                      crtc);
334 }
335
336 /*
337  * Handle hotplug events outside the interrupt handler proper.
338  */
339 static void i915_hotplug_work_func(struct work_struct *work)
340 {
341         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
342                                                     hotplug_work);
343         struct drm_device *dev = dev_priv->dev;
344         struct drm_mode_config *mode_config = &dev->mode_config;
345         struct intel_encoder *encoder;
346
347         /* HPD irq before everything is fully set up. */
348         if (!dev_priv->enable_hotplug_processing)
349                 return;
350
351         mutex_lock(&mode_config->mutex);
352         DRM_DEBUG_KMS("running encoder hotplug functions\n");
353
354         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
355                 if (encoder->hot_plug)
356                         encoder->hot_plug(encoder);
357
358         mutex_unlock(&mode_config->mutex);
359
360         /* Just fire off a uevent and let userspace tell us what to do */
361         drm_helper_hpd_irq_event(dev);
362 }
363
364 static void ironlake_handle_rps_change(struct drm_device *dev)
365 {
366         drm_i915_private_t *dev_priv = dev->dev_private;
367         u32 busy_up, busy_down, max_avg, min_avg;
368         u8 new_delay;
369         unsigned long flags;
370
371         spin_lock_irqsave(&mchdev_lock, flags);
372
373         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
374
375         new_delay = dev_priv->ips.cur_delay;
376
377         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
378         busy_up = I915_READ(RCPREVBSYTUPAVG);
379         busy_down = I915_READ(RCPREVBSYTDNAVG);
380         max_avg = I915_READ(RCBMAXAVG);
381         min_avg = I915_READ(RCBMINAVG);
382
383         /* Handle RCS change request from hw */
384         if (busy_up > max_avg) {
385                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
386                         new_delay = dev_priv->ips.cur_delay - 1;
387                 if (new_delay < dev_priv->ips.max_delay)
388                         new_delay = dev_priv->ips.max_delay;
389         } else if (busy_down < min_avg) {
390                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
391                         new_delay = dev_priv->ips.cur_delay + 1;
392                 if (new_delay > dev_priv->ips.min_delay)
393                         new_delay = dev_priv->ips.min_delay;
394         }
395
396         if (ironlake_set_drps(dev, new_delay))
397                 dev_priv->ips.cur_delay = new_delay;
398
399         spin_unlock_irqrestore(&mchdev_lock, flags);
400
401         return;
402 }
403
404 static void notify_ring(struct drm_device *dev,
405                         struct intel_ring_buffer *ring)
406 {
407         struct drm_i915_private *dev_priv = dev->dev_private;
408
409         if (ring->obj == NULL)
410                 return;
411
412         trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
413
414         wake_up_all(&ring->irq_queue);
415         if (i915_enable_hangcheck) {
416                 dev_priv->gpu_error.hangcheck_count = 0;
417                 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
418                           round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
419         }
420 }
421
422 static void gen6_pm_rps_work(struct work_struct *work)
423 {
424         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
425                                                     rps.work);
426         u32 pm_iir, pm_imr;
427         u8 new_delay;
428
429         spin_lock_irq(&dev_priv->rps.lock);
430         pm_iir = dev_priv->rps.pm_iir;
431         dev_priv->rps.pm_iir = 0;
432         pm_imr = I915_READ(GEN6_PMIMR);
433         I915_WRITE(GEN6_PMIMR, 0);
434         spin_unlock_irq(&dev_priv->rps.lock);
435
436         if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
437                 return;
438
439         mutex_lock(&dev_priv->rps.hw_lock);
440
441         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
442                 new_delay = dev_priv->rps.cur_delay + 1;
443         else
444                 new_delay = dev_priv->rps.cur_delay - 1;
445
446         /* sysfs frequency interfaces may have snuck in while servicing the
447          * interrupt
448          */
449         if (!(new_delay > dev_priv->rps.max_delay ||
450               new_delay < dev_priv->rps.min_delay)) {
451                 gen6_set_rps(dev_priv->dev, new_delay);
452         }
453
454         mutex_unlock(&dev_priv->rps.hw_lock);
455 }
456
457
458 /**
459  * ivybridge_parity_work - Workqueue called when a parity error interrupt
460  * occurred.
461  * @work: workqueue struct
462  *
463  * Doesn't actually do anything except notify userspace. As a consequence of
464  * this event, userspace should try to remap the bad rows since statistically
465  * it is likely the same row is more likely to go bad again.
466  */
467 static void ivybridge_parity_work(struct work_struct *work)
468 {
469         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
470                                                     l3_parity.error_work);
471         u32 error_status, row, bank, subbank;
472         char *parity_event[5];
473         uint32_t misccpctl;
474         unsigned long flags;
475
476         /* We must turn off DOP level clock gating to access the L3 registers.
477          * In order to prevent a get/put style interface, acquire struct mutex
478          * any time we access those registers.
479          */
480         mutex_lock(&dev_priv->dev->struct_mutex);
481
482         misccpctl = I915_READ(GEN7_MISCCPCTL);
483         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
484         POSTING_READ(GEN7_MISCCPCTL);
485
486         error_status = I915_READ(GEN7_L3CDERRST1);
487         row = GEN7_PARITY_ERROR_ROW(error_status);
488         bank = GEN7_PARITY_ERROR_BANK(error_status);
489         subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
490
491         I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
492                                     GEN7_L3CDERRST1_ENABLE);
493         POSTING_READ(GEN7_L3CDERRST1);
494
495         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
496
497         spin_lock_irqsave(&dev_priv->irq_lock, flags);
498         dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
499         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
500         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
501
502         mutex_unlock(&dev_priv->dev->struct_mutex);
503
504         parity_event[0] = "L3_PARITY_ERROR=1";
505         parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
506         parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
507         parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
508         parity_event[4] = NULL;
509
510         kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
511                            KOBJ_CHANGE, parity_event);
512
513         DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
514                   row, bank, subbank);
515
516         kfree(parity_event[3]);
517         kfree(parity_event[2]);
518         kfree(parity_event[1]);
519 }
520
521 static void ivybridge_handle_parity_error(struct drm_device *dev)
522 {
523         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
524         unsigned long flags;
525
526         if (!HAS_L3_GPU_CACHE(dev))
527                 return;
528
529         spin_lock_irqsave(&dev_priv->irq_lock, flags);
530         dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
531         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
532         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
533
534         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
535 }
536
537 static void snb_gt_irq_handler(struct drm_device *dev,
538                                struct drm_i915_private *dev_priv,
539                                u32 gt_iir)
540 {
541
542         if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
543                       GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
544                 notify_ring(dev, &dev_priv->ring[RCS]);
545         if (gt_iir & GEN6_BSD_USER_INTERRUPT)
546                 notify_ring(dev, &dev_priv->ring[VCS]);
547         if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
548                 notify_ring(dev, &dev_priv->ring[BCS]);
549
550         if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
551                       GT_GEN6_BSD_CS_ERROR_INTERRUPT |
552                       GT_RENDER_CS_ERROR_INTERRUPT)) {
553                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
554                 i915_handle_error(dev, false);
555         }
556
557         if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
558                 ivybridge_handle_parity_error(dev);
559 }
560
561 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
562                                 u32 pm_iir)
563 {
564         unsigned long flags;
565
566         /*
567          * IIR bits should never already be set because IMR should
568          * prevent an interrupt from being shown in IIR. The warning
569          * displays a case where we've unsafely cleared
570          * dev_priv->rps.pm_iir. Although missing an interrupt of the same
571          * type is not a problem, it displays a problem in the logic.
572          *
573          * The mask bit in IMR is cleared by dev_priv->rps.work.
574          */
575
576         spin_lock_irqsave(&dev_priv->rps.lock, flags);
577         dev_priv->rps.pm_iir |= pm_iir;
578         I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
579         POSTING_READ(GEN6_PMIMR);
580         spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
581
582         queue_work(dev_priv->wq, &dev_priv->rps.work);
583 }
584
585 #define HPD_STORM_DETECT_PERIOD 1000
586 #define HPD_STORM_THRESHOLD 5
587
588 static inline void hotplug_irq_storm_detect(struct drm_device *dev,
589                                             u32 hotplug_trigger,
590                                             const u32 *hpd)
591 {
592         drm_i915_private_t *dev_priv = dev->dev_private;
593         unsigned long irqflags;
594         int i;
595
596         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
597
598         for (i = 1; i < HPD_NUM_PINS; i++) {
599
600                 if (!(hpd[i] & hotplug_trigger) ||
601                     dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
602                         continue;
603
604                 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
605                                    dev_priv->hpd_stats[i].hpd_last_jiffies
606                                    + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
607                         dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
608                         dev_priv->hpd_stats[i].hpd_cnt = 0;
609                 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
610                         dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
611                         DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
612                 } else {
613                         dev_priv->hpd_stats[i].hpd_cnt++;
614                 }
615         }
616
617         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
618 }
619
620 static void gmbus_irq_handler(struct drm_device *dev)
621 {
622         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
623
624         wake_up_all(&dev_priv->gmbus_wait_queue);
625 }
626
627 static void dp_aux_irq_handler(struct drm_device *dev)
628 {
629         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
630
631         wake_up_all(&dev_priv->gmbus_wait_queue);
632 }
633
634 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
635 {
636         struct drm_device *dev = (struct drm_device *) arg;
637         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
638         u32 iir, gt_iir, pm_iir;
639         irqreturn_t ret = IRQ_NONE;
640         unsigned long irqflags;
641         int pipe;
642         u32 pipe_stats[I915_MAX_PIPES];
643
644         atomic_inc(&dev_priv->irq_received);
645
646         while (true) {
647                 iir = I915_READ(VLV_IIR);
648                 gt_iir = I915_READ(GTIIR);
649                 pm_iir = I915_READ(GEN6_PMIIR);
650
651                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
652                         goto out;
653
654                 ret = IRQ_HANDLED;
655
656                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
657
658                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
659                 for_each_pipe(pipe) {
660                         int reg = PIPESTAT(pipe);
661                         pipe_stats[pipe] = I915_READ(reg);
662
663                         /*
664                          * Clear the PIPE*STAT regs before the IIR
665                          */
666                         if (pipe_stats[pipe] & 0x8000ffff) {
667                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
668                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
669                                                          pipe_name(pipe));
670                                 I915_WRITE(reg, pipe_stats[pipe]);
671                         }
672                 }
673                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
674
675                 for_each_pipe(pipe) {
676                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
677                                 drm_handle_vblank(dev, pipe);
678
679                         if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
680                                 intel_prepare_page_flip(dev, pipe);
681                                 intel_finish_page_flip(dev, pipe);
682                         }
683                 }
684
685                 /* Consume port.  Then clear IIR or we'll miss events */
686                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
687                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
688                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
689
690                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
691                                          hotplug_status);
692                         if (hotplug_trigger) {
693                                 hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915);
694                                 queue_work(dev_priv->wq,
695                                            &dev_priv->hotplug_work);
696                         }
697                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
698                         I915_READ(PORT_HOTPLUG_STAT);
699                 }
700
701                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
702                         gmbus_irq_handler(dev);
703
704                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
705                         gen6_queue_rps_work(dev_priv, pm_iir);
706
707                 I915_WRITE(GTIIR, gt_iir);
708                 I915_WRITE(GEN6_PMIIR, pm_iir);
709                 I915_WRITE(VLV_IIR, iir);
710         }
711
712 out:
713         return ret;
714 }
715
716 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
717 {
718         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
719         int pipe;
720         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
721
722         if (hotplug_trigger) {
723                 hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx);
724                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
725         }
726         if (pch_iir & SDE_AUDIO_POWER_MASK)
727                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
728                                  (pch_iir & SDE_AUDIO_POWER_MASK) >>
729                                  SDE_AUDIO_POWER_SHIFT);
730
731         if (pch_iir & SDE_AUX_MASK)
732                 dp_aux_irq_handler(dev);
733
734         if (pch_iir & SDE_GMBUS)
735                 gmbus_irq_handler(dev);
736
737         if (pch_iir & SDE_AUDIO_HDCP_MASK)
738                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
739
740         if (pch_iir & SDE_AUDIO_TRANS_MASK)
741                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
742
743         if (pch_iir & SDE_POISON)
744                 DRM_ERROR("PCH poison interrupt\n");
745
746         if (pch_iir & SDE_FDI_MASK)
747                 for_each_pipe(pipe)
748                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
749                                          pipe_name(pipe),
750                                          I915_READ(FDI_RX_IIR(pipe)));
751
752         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
753                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
754
755         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
756                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
757
758         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
759                 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
760         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
761                 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
762 }
763
764 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
765 {
766         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
767         int pipe;
768         u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
769
770         if (hotplug_trigger) {
771                 hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt);
772                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
773         }
774         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
775                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
776                                  (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
777                                  SDE_AUDIO_POWER_SHIFT_CPT);
778
779         if (pch_iir & SDE_AUX_MASK_CPT)
780                 dp_aux_irq_handler(dev);
781
782         if (pch_iir & SDE_GMBUS_CPT)
783                 gmbus_irq_handler(dev);
784
785         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
786                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
787
788         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
789                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
790
791         if (pch_iir & SDE_FDI_MASK_CPT)
792                 for_each_pipe(pipe)
793                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
794                                          pipe_name(pipe),
795                                          I915_READ(FDI_RX_IIR(pipe)));
796 }
797
798 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
799 {
800         struct drm_device *dev = (struct drm_device *) arg;
801         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
802         u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
803         irqreturn_t ret = IRQ_NONE;
804         int i;
805
806         atomic_inc(&dev_priv->irq_received);
807
808         /* disable master interrupt before clearing iir  */
809         de_ier = I915_READ(DEIER);
810         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
811
812         /* Disable south interrupts. We'll only write to SDEIIR once, so further
813          * interrupts will will be stored on its back queue, and then we'll be
814          * able to process them after we restore SDEIER (as soon as we restore
815          * it, we'll get an interrupt if SDEIIR still has something to process
816          * due to its back queue). */
817         if (!HAS_PCH_NOP(dev)) {
818                 sde_ier = I915_READ(SDEIER);
819                 I915_WRITE(SDEIER, 0);
820                 POSTING_READ(SDEIER);
821         }
822
823         gt_iir = I915_READ(GTIIR);
824         if (gt_iir) {
825                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
826                 I915_WRITE(GTIIR, gt_iir);
827                 ret = IRQ_HANDLED;
828         }
829
830         de_iir = I915_READ(DEIIR);
831         if (de_iir) {
832                 if (de_iir & DE_AUX_CHANNEL_A_IVB)
833                         dp_aux_irq_handler(dev);
834
835                 if (de_iir & DE_GSE_IVB)
836                         intel_opregion_gse_intr(dev);
837
838                 for (i = 0; i < 3; i++) {
839                         if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
840                                 drm_handle_vblank(dev, i);
841                         if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
842                                 intel_prepare_page_flip(dev, i);
843                                 intel_finish_page_flip_plane(dev, i);
844                         }
845                 }
846
847                 /* check event from PCH */
848                 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
849                         u32 pch_iir = I915_READ(SDEIIR);
850
851                         cpt_irq_handler(dev, pch_iir);
852
853                         /* clear PCH hotplug event before clear CPU irq */
854                         I915_WRITE(SDEIIR, pch_iir);
855                 }
856
857                 I915_WRITE(DEIIR, de_iir);
858                 ret = IRQ_HANDLED;
859         }
860
861         pm_iir = I915_READ(GEN6_PMIIR);
862         if (pm_iir) {
863                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
864                         gen6_queue_rps_work(dev_priv, pm_iir);
865                 I915_WRITE(GEN6_PMIIR, pm_iir);
866                 ret = IRQ_HANDLED;
867         }
868
869         I915_WRITE(DEIER, de_ier);
870         POSTING_READ(DEIER);
871         if (!HAS_PCH_NOP(dev)) {
872                 I915_WRITE(SDEIER, sde_ier);
873                 POSTING_READ(SDEIER);
874         }
875
876         return ret;
877 }
878
879 static void ilk_gt_irq_handler(struct drm_device *dev,
880                                struct drm_i915_private *dev_priv,
881                                u32 gt_iir)
882 {
883         if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
884                 notify_ring(dev, &dev_priv->ring[RCS]);
885         if (gt_iir & GT_BSD_USER_INTERRUPT)
886                 notify_ring(dev, &dev_priv->ring[VCS]);
887 }
888
889 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
890 {
891         struct drm_device *dev = (struct drm_device *) arg;
892         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
893         int ret = IRQ_NONE;
894         u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
895
896         atomic_inc(&dev_priv->irq_received);
897
898         /* disable master interrupt before clearing iir  */
899         de_ier = I915_READ(DEIER);
900         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
901         POSTING_READ(DEIER);
902
903         /* Disable south interrupts. We'll only write to SDEIIR once, so further
904          * interrupts will will be stored on its back queue, and then we'll be
905          * able to process them after we restore SDEIER (as soon as we restore
906          * it, we'll get an interrupt if SDEIIR still has something to process
907          * due to its back queue). */
908         sde_ier = I915_READ(SDEIER);
909         I915_WRITE(SDEIER, 0);
910         POSTING_READ(SDEIER);
911
912         de_iir = I915_READ(DEIIR);
913         gt_iir = I915_READ(GTIIR);
914         pm_iir = I915_READ(GEN6_PMIIR);
915
916         if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
917                 goto done;
918
919         ret = IRQ_HANDLED;
920
921         if (IS_GEN5(dev))
922                 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
923         else
924                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
925
926         if (de_iir & DE_AUX_CHANNEL_A)
927                 dp_aux_irq_handler(dev);
928
929         if (de_iir & DE_GSE)
930                 intel_opregion_gse_intr(dev);
931
932         if (de_iir & DE_PIPEA_VBLANK)
933                 drm_handle_vblank(dev, 0);
934
935         if (de_iir & DE_PIPEB_VBLANK)
936                 drm_handle_vblank(dev, 1);
937
938         if (de_iir & DE_PLANEA_FLIP_DONE) {
939                 intel_prepare_page_flip(dev, 0);
940                 intel_finish_page_flip_plane(dev, 0);
941         }
942
943         if (de_iir & DE_PLANEB_FLIP_DONE) {
944                 intel_prepare_page_flip(dev, 1);
945                 intel_finish_page_flip_plane(dev, 1);
946         }
947
948         /* check event from PCH */
949         if (de_iir & DE_PCH_EVENT) {
950                 u32 pch_iir = I915_READ(SDEIIR);
951
952                 if (HAS_PCH_CPT(dev))
953                         cpt_irq_handler(dev, pch_iir);
954                 else
955                         ibx_irq_handler(dev, pch_iir);
956
957                 /* should clear PCH hotplug event before clear CPU irq */
958                 I915_WRITE(SDEIIR, pch_iir);
959         }
960
961         if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
962                 ironlake_handle_rps_change(dev);
963
964         if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
965                 gen6_queue_rps_work(dev_priv, pm_iir);
966
967         I915_WRITE(GTIIR, gt_iir);
968         I915_WRITE(DEIIR, de_iir);
969         I915_WRITE(GEN6_PMIIR, pm_iir);
970
971 done:
972         I915_WRITE(DEIER, de_ier);
973         POSTING_READ(DEIER);
974         I915_WRITE(SDEIER, sde_ier);
975         POSTING_READ(SDEIER);
976
977         return ret;
978 }
979
980 /**
981  * i915_error_work_func - do process context error handling work
982  * @work: work struct
983  *
984  * Fire an error uevent so userspace can see that a hang or error
985  * was detected.
986  */
987 static void i915_error_work_func(struct work_struct *work)
988 {
989         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
990                                                     work);
991         drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
992                                                     gpu_error);
993         struct drm_device *dev = dev_priv->dev;
994         struct intel_ring_buffer *ring;
995         char *error_event[] = { "ERROR=1", NULL };
996         char *reset_event[] = { "RESET=1", NULL };
997         char *reset_done_event[] = { "ERROR=0", NULL };
998         int i, ret;
999
1000         kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1001
1002         /*
1003          * Note that there's only one work item which does gpu resets, so we
1004          * need not worry about concurrent gpu resets potentially incrementing
1005          * error->reset_counter twice. We only need to take care of another
1006          * racing irq/hangcheck declaring the gpu dead for a second time. A
1007          * quick check for that is good enough: schedule_work ensures the
1008          * correct ordering between hang detection and this work item, and since
1009          * the reset in-progress bit is only ever set by code outside of this
1010          * work we don't need to worry about any other races.
1011          */
1012         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1013                 DRM_DEBUG_DRIVER("resetting chip\n");
1014                 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1015                                    reset_event);
1016
1017                 ret = i915_reset(dev);
1018
1019                 if (ret == 0) {
1020                         /*
1021                          * After all the gem state is reset, increment the reset
1022                          * counter and wake up everyone waiting for the reset to
1023                          * complete.
1024                          *
1025                          * Since unlock operations are a one-sided barrier only,
1026                          * we need to insert a barrier here to order any seqno
1027                          * updates before
1028                          * the counter increment.
1029                          */
1030                         smp_mb__before_atomic_inc();
1031                         atomic_inc(&dev_priv->gpu_error.reset_counter);
1032
1033                         kobject_uevent_env(&dev->primary->kdev.kobj,
1034                                            KOBJ_CHANGE, reset_done_event);
1035                 } else {
1036                         atomic_set(&error->reset_counter, I915_WEDGED);
1037                 }
1038
1039                 for_each_ring(ring, dev_priv, i)
1040                         wake_up_all(&ring->irq_queue);
1041
1042                 intel_display_handle_reset(dev);
1043
1044                 wake_up_all(&dev_priv->gpu_error.reset_queue);
1045         }
1046 }
1047
1048 /* NB: please notice the memset */
1049 static void i915_get_extra_instdone(struct drm_device *dev,
1050                                     uint32_t *instdone)
1051 {
1052         struct drm_i915_private *dev_priv = dev->dev_private;
1053         memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1054
1055         switch(INTEL_INFO(dev)->gen) {
1056         case 2:
1057         case 3:
1058                 instdone[0] = I915_READ(INSTDONE);
1059                 break;
1060         case 4:
1061         case 5:
1062         case 6:
1063                 instdone[0] = I915_READ(INSTDONE_I965);
1064                 instdone[1] = I915_READ(INSTDONE1);
1065                 break;
1066         default:
1067                 WARN_ONCE(1, "Unsupported platform\n");
1068         case 7:
1069                 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1070                 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1071                 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1072                 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1073                 break;
1074         }
1075 }
1076
1077 #ifdef CONFIG_DEBUG_FS
1078 static struct drm_i915_error_object *
1079 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1080                                struct drm_i915_gem_object *src,
1081                                const int num_pages)
1082 {
1083         struct drm_i915_error_object *dst;
1084         int i;
1085         u32 reloc_offset;
1086
1087         if (src == NULL || src->pages == NULL)
1088                 return NULL;
1089
1090         dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1091         if (dst == NULL)
1092                 return NULL;
1093
1094         reloc_offset = src->gtt_offset;
1095         for (i = 0; i < num_pages; i++) {
1096                 unsigned long flags;
1097                 void *d;
1098
1099                 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1100                 if (d == NULL)
1101                         goto unwind;
1102
1103                 local_irq_save(flags);
1104                 if (reloc_offset < dev_priv->gtt.mappable_end &&
1105                     src->has_global_gtt_mapping) {
1106                         void __iomem *s;
1107
1108                         /* Simply ignore tiling or any overlapping fence.
1109                          * It's part of the error state, and this hopefully
1110                          * captures what the GPU read.
1111                          */
1112
1113                         s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1114                                                      reloc_offset);
1115                         memcpy_fromio(d, s, PAGE_SIZE);
1116                         io_mapping_unmap_atomic(s);
1117                 } else if (src->stolen) {
1118                         unsigned long offset;
1119
1120                         offset = dev_priv->mm.stolen_base;
1121                         offset += src->stolen->start;
1122                         offset += i << PAGE_SHIFT;
1123
1124                         memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1125                 } else {
1126                         struct page *page;
1127                         void *s;
1128
1129                         page = i915_gem_object_get_page(src, i);
1130
1131                         drm_clflush_pages(&page, 1);
1132
1133                         s = kmap_atomic(page);
1134                         memcpy(d, s, PAGE_SIZE);
1135                         kunmap_atomic(s);
1136
1137                         drm_clflush_pages(&page, 1);
1138                 }
1139                 local_irq_restore(flags);
1140
1141                 dst->pages[i] = d;
1142
1143                 reloc_offset += PAGE_SIZE;
1144         }
1145         dst->page_count = num_pages;
1146         dst->gtt_offset = src->gtt_offset;
1147
1148         return dst;
1149
1150 unwind:
1151         while (i--)
1152                 kfree(dst->pages[i]);
1153         kfree(dst);
1154         return NULL;
1155 }
1156 #define i915_error_object_create(dev_priv, src) \
1157         i915_error_object_create_sized((dev_priv), (src), \
1158                                        (src)->base.size>>PAGE_SHIFT)
1159
1160 static void
1161 i915_error_object_free(struct drm_i915_error_object *obj)
1162 {
1163         int page;
1164
1165         if (obj == NULL)
1166                 return;
1167
1168         for (page = 0; page < obj->page_count; page++)
1169                 kfree(obj->pages[page]);
1170
1171         kfree(obj);
1172 }
1173
1174 void
1175 i915_error_state_free(struct kref *error_ref)
1176 {
1177         struct drm_i915_error_state *error = container_of(error_ref,
1178                                                           typeof(*error), ref);
1179         int i;
1180
1181         for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1182                 i915_error_object_free(error->ring[i].batchbuffer);
1183                 i915_error_object_free(error->ring[i].ringbuffer);
1184                 kfree(error->ring[i].requests);
1185         }
1186
1187         kfree(error->active_bo);
1188         kfree(error->overlay);
1189         kfree(error);
1190 }
1191 static void capture_bo(struct drm_i915_error_buffer *err,
1192                        struct drm_i915_gem_object *obj)
1193 {
1194         err->size = obj->base.size;
1195         err->name = obj->base.name;
1196         err->rseqno = obj->last_read_seqno;
1197         err->wseqno = obj->last_write_seqno;
1198         err->gtt_offset = obj->gtt_offset;
1199         err->read_domains = obj->base.read_domains;
1200         err->write_domain = obj->base.write_domain;
1201         err->fence_reg = obj->fence_reg;
1202         err->pinned = 0;
1203         if (obj->pin_count > 0)
1204                 err->pinned = 1;
1205         if (obj->user_pin_count > 0)
1206                 err->pinned = -1;
1207         err->tiling = obj->tiling_mode;
1208         err->dirty = obj->dirty;
1209         err->purgeable = obj->madv != I915_MADV_WILLNEED;
1210         err->ring = obj->ring ? obj->ring->id : -1;
1211         err->cache_level = obj->cache_level;
1212 }
1213
1214 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1215                              int count, struct list_head *head)
1216 {
1217         struct drm_i915_gem_object *obj;
1218         int i = 0;
1219
1220         list_for_each_entry(obj, head, mm_list) {
1221                 capture_bo(err++, obj);
1222                 if (++i == count)
1223                         break;
1224         }
1225
1226         return i;
1227 }
1228
1229 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1230                              int count, struct list_head *head)
1231 {
1232         struct drm_i915_gem_object *obj;
1233         int i = 0;
1234
1235         list_for_each_entry(obj, head, gtt_list) {
1236                 if (obj->pin_count == 0)
1237                         continue;
1238
1239                 capture_bo(err++, obj);
1240                 if (++i == count)
1241                         break;
1242         }
1243
1244         return i;
1245 }
1246
1247 static void i915_gem_record_fences(struct drm_device *dev,
1248                                    struct drm_i915_error_state *error)
1249 {
1250         struct drm_i915_private *dev_priv = dev->dev_private;
1251         int i;
1252
1253         /* Fences */
1254         switch (INTEL_INFO(dev)->gen) {
1255         case 7:
1256         case 6:
1257                 for (i = 0; i < dev_priv->num_fence_regs; i++)
1258                         error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1259                 break;
1260         case 5:
1261         case 4:
1262                 for (i = 0; i < 16; i++)
1263                         error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1264                 break;
1265         case 3:
1266                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1267                         for (i = 0; i < 8; i++)
1268                                 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1269         case 2:
1270                 for (i = 0; i < 8; i++)
1271                         error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1272                 break;
1273
1274         default:
1275                 BUG();
1276         }
1277 }
1278
1279 static struct drm_i915_error_object *
1280 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1281                              struct intel_ring_buffer *ring)
1282 {
1283         struct drm_i915_gem_object *obj;
1284         u32 seqno;
1285
1286         if (!ring->get_seqno)
1287                 return NULL;
1288
1289         if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1290                 u32 acthd = I915_READ(ACTHD);
1291
1292                 if (WARN_ON(ring->id != RCS))
1293                         return NULL;
1294
1295                 obj = ring->private;
1296                 if (acthd >= obj->gtt_offset &&
1297                     acthd < obj->gtt_offset + obj->base.size)
1298                         return i915_error_object_create(dev_priv, obj);
1299         }
1300
1301         seqno = ring->get_seqno(ring, false);
1302         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1303                 if (obj->ring != ring)
1304                         continue;
1305
1306                 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1307                         continue;
1308
1309                 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1310                         continue;
1311
1312                 /* We need to copy these to an anonymous buffer as the simplest
1313                  * method to avoid being overwritten by userspace.
1314                  */
1315                 return i915_error_object_create(dev_priv, obj);
1316         }
1317
1318         return NULL;
1319 }
1320
1321 static void i915_record_ring_state(struct drm_device *dev,
1322                                    struct drm_i915_error_state *error,
1323                                    struct intel_ring_buffer *ring)
1324 {
1325         struct drm_i915_private *dev_priv = dev->dev_private;
1326
1327         if (INTEL_INFO(dev)->gen >= 6) {
1328                 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1329                 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1330                 error->semaphore_mboxes[ring->id][0]
1331                         = I915_READ(RING_SYNC_0(ring->mmio_base));
1332                 error->semaphore_mboxes[ring->id][1]
1333                         = I915_READ(RING_SYNC_1(ring->mmio_base));
1334                 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1335                 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1336         }
1337
1338         if (INTEL_INFO(dev)->gen >= 4) {
1339                 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1340                 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1341                 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1342                 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1343                 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1344                 if (ring->id == RCS)
1345                         error->bbaddr = I915_READ64(BB_ADDR);
1346         } else {
1347                 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1348                 error->ipeir[ring->id] = I915_READ(IPEIR);
1349                 error->ipehr[ring->id] = I915_READ(IPEHR);
1350                 error->instdone[ring->id] = I915_READ(INSTDONE);
1351         }
1352
1353         error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1354         error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1355         error->seqno[ring->id] = ring->get_seqno(ring, false);
1356         error->acthd[ring->id] = intel_ring_get_active_head(ring);
1357         error->head[ring->id] = I915_READ_HEAD(ring);
1358         error->tail[ring->id] = I915_READ_TAIL(ring);
1359         error->ctl[ring->id] = I915_READ_CTL(ring);
1360
1361         error->cpu_ring_head[ring->id] = ring->head;
1362         error->cpu_ring_tail[ring->id] = ring->tail;
1363 }
1364
1365
1366 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1367                                            struct drm_i915_error_state *error,
1368                                            struct drm_i915_error_ring *ering)
1369 {
1370         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1371         struct drm_i915_gem_object *obj;
1372
1373         /* Currently render ring is the only HW context user */
1374         if (ring->id != RCS || !error->ccid)
1375                 return;
1376
1377         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
1378                 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1379                         ering->ctx = i915_error_object_create_sized(dev_priv,
1380                                                                     obj, 1);
1381                 }
1382         }
1383 }
1384
1385 static void i915_gem_record_rings(struct drm_device *dev,
1386                                   struct drm_i915_error_state *error)
1387 {
1388         struct drm_i915_private *dev_priv = dev->dev_private;
1389         struct intel_ring_buffer *ring;
1390         struct drm_i915_gem_request *request;
1391         int i, count;
1392
1393         for_each_ring(ring, dev_priv, i) {
1394                 i915_record_ring_state(dev, error, ring);
1395
1396                 error->ring[i].batchbuffer =
1397                         i915_error_first_batchbuffer(dev_priv, ring);
1398
1399                 error->ring[i].ringbuffer =
1400                         i915_error_object_create(dev_priv, ring->obj);
1401
1402
1403                 i915_gem_record_active_context(ring, error, &error->ring[i]);
1404
1405                 count = 0;
1406                 list_for_each_entry(request, &ring->request_list, list)
1407                         count++;
1408
1409                 error->ring[i].num_requests = count;
1410                 error->ring[i].requests =
1411                         kmalloc(count*sizeof(struct drm_i915_error_request),
1412                                 GFP_ATOMIC);
1413                 if (error->ring[i].requests == NULL) {
1414                         error->ring[i].num_requests = 0;
1415                         continue;
1416                 }
1417
1418                 count = 0;
1419                 list_for_each_entry(request, &ring->request_list, list) {
1420                         struct drm_i915_error_request *erq;
1421
1422                         erq = &error->ring[i].requests[count++];
1423                         erq->seqno = request->seqno;
1424                         erq->jiffies = request->emitted_jiffies;
1425                         erq->tail = request->tail;
1426                 }
1427         }
1428 }
1429
1430 /**
1431  * i915_capture_error_state - capture an error record for later analysis
1432  * @dev: drm device
1433  *
1434  * Should be called when an error is detected (either a hang or an error
1435  * interrupt) to capture error state from the time of the error.  Fills
1436  * out a structure which becomes available in debugfs for user level tools
1437  * to pick up.
1438  */
1439 static void i915_capture_error_state(struct drm_device *dev)
1440 {
1441         struct drm_i915_private *dev_priv = dev->dev_private;
1442         struct drm_i915_gem_object *obj;
1443         struct drm_i915_error_state *error;
1444         unsigned long flags;
1445         int i, pipe;
1446
1447         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1448         error = dev_priv->gpu_error.first_error;
1449         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1450         if (error)
1451                 return;
1452
1453         /* Account for pipe specific data like PIPE*STAT */
1454         error = kzalloc(sizeof(*error), GFP_ATOMIC);
1455         if (!error) {
1456                 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1457                 return;
1458         }
1459
1460         DRM_INFO("capturing error event; look for more information in "
1461                  "/sys/kernel/debug/dri/%d/i915_error_state\n",
1462                  dev->primary->index);
1463
1464         kref_init(&error->ref);
1465         error->eir = I915_READ(EIR);
1466         error->pgtbl_er = I915_READ(PGTBL_ER);
1467         if (HAS_HW_CONTEXTS(dev))
1468                 error->ccid = I915_READ(CCID);
1469
1470         if (HAS_PCH_SPLIT(dev))
1471                 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1472         else if (IS_VALLEYVIEW(dev))
1473                 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1474         else if (IS_GEN2(dev))
1475                 error->ier = I915_READ16(IER);
1476         else
1477                 error->ier = I915_READ(IER);
1478
1479         if (INTEL_INFO(dev)->gen >= 6)
1480                 error->derrmr = I915_READ(DERRMR);
1481
1482         if (IS_VALLEYVIEW(dev))
1483                 error->forcewake = I915_READ(FORCEWAKE_VLV);
1484         else if (INTEL_INFO(dev)->gen >= 7)
1485                 error->forcewake = I915_READ(FORCEWAKE_MT);
1486         else if (INTEL_INFO(dev)->gen == 6)
1487                 error->forcewake = I915_READ(FORCEWAKE);
1488
1489         if (!HAS_PCH_SPLIT(dev))
1490                 for_each_pipe(pipe)
1491                         error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1492
1493         if (INTEL_INFO(dev)->gen >= 6) {
1494                 error->error = I915_READ(ERROR_GEN6);
1495                 error->done_reg = I915_READ(DONE_REG);
1496         }
1497
1498         if (INTEL_INFO(dev)->gen == 7)
1499                 error->err_int = I915_READ(GEN7_ERR_INT);
1500
1501         i915_get_extra_instdone(dev, error->extra_instdone);
1502
1503         i915_gem_record_fences(dev, error);
1504         i915_gem_record_rings(dev, error);
1505
1506         /* Record buffers on the active and pinned lists. */
1507         error->active_bo = NULL;
1508         error->pinned_bo = NULL;
1509
1510         i = 0;
1511         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1512                 i++;
1513         error->active_bo_count = i;
1514         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1515                 if (obj->pin_count)
1516                         i++;
1517         error->pinned_bo_count = i - error->active_bo_count;
1518
1519         error->active_bo = NULL;
1520         error->pinned_bo = NULL;
1521         if (i) {
1522                 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1523                                            GFP_ATOMIC);
1524                 if (error->active_bo)
1525                         error->pinned_bo =
1526                                 error->active_bo + error->active_bo_count;
1527         }
1528
1529         if (error->active_bo)
1530                 error->active_bo_count =
1531                         capture_active_bo(error->active_bo,
1532                                           error->active_bo_count,
1533                                           &dev_priv->mm.active_list);
1534
1535         if (error->pinned_bo)
1536                 error->pinned_bo_count =
1537                         capture_pinned_bo(error->pinned_bo,
1538                                           error->pinned_bo_count,
1539                                           &dev_priv->mm.bound_list);
1540
1541         do_gettimeofday(&error->time);
1542
1543         error->overlay = intel_overlay_capture_error_state(dev);
1544         error->display = intel_display_capture_error_state(dev);
1545
1546         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1547         if (dev_priv->gpu_error.first_error == NULL) {
1548                 dev_priv->gpu_error.first_error = error;
1549                 error = NULL;
1550         }
1551         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1552
1553         if (error)
1554                 i915_error_state_free(&error->ref);
1555 }
1556
1557 void i915_destroy_error_state(struct drm_device *dev)
1558 {
1559         struct drm_i915_private *dev_priv = dev->dev_private;
1560         struct drm_i915_error_state *error;
1561         unsigned long flags;
1562
1563         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1564         error = dev_priv->gpu_error.first_error;
1565         dev_priv->gpu_error.first_error = NULL;
1566         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1567
1568         if (error)
1569                 kref_put(&error->ref, i915_error_state_free);
1570 }
1571 #else
1572 #define i915_capture_error_state(x)
1573 #endif
1574
1575 static void i915_report_and_clear_eir(struct drm_device *dev)
1576 {
1577         struct drm_i915_private *dev_priv = dev->dev_private;
1578         uint32_t instdone[I915_NUM_INSTDONE_REG];
1579         u32 eir = I915_READ(EIR);
1580         int pipe, i;
1581
1582         if (!eir)
1583                 return;
1584
1585         pr_err("render error detected, EIR: 0x%08x\n", eir);
1586
1587         i915_get_extra_instdone(dev, instdone);
1588
1589         if (IS_G4X(dev)) {
1590                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1591                         u32 ipeir = I915_READ(IPEIR_I965);
1592
1593                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1594                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1595                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
1596                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1597                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1598                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1599                         I915_WRITE(IPEIR_I965, ipeir);
1600                         POSTING_READ(IPEIR_I965);
1601                 }
1602                 if (eir & GM45_ERROR_PAGE_TABLE) {
1603                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1604                         pr_err("page table error\n");
1605                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1606                         I915_WRITE(PGTBL_ER, pgtbl_err);
1607                         POSTING_READ(PGTBL_ER);
1608                 }
1609         }
1610
1611         if (!IS_GEN2(dev)) {
1612                 if (eir & I915_ERROR_PAGE_TABLE) {
1613                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1614                         pr_err("page table error\n");
1615                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1616                         I915_WRITE(PGTBL_ER, pgtbl_err);
1617                         POSTING_READ(PGTBL_ER);
1618                 }
1619         }
1620
1621         if (eir & I915_ERROR_MEMORY_REFRESH) {
1622                 pr_err("memory refresh error:\n");
1623                 for_each_pipe(pipe)
1624                         pr_err("pipe %c stat: 0x%08x\n",
1625                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1626                 /* pipestat has already been acked */
1627         }
1628         if (eir & I915_ERROR_INSTRUCTION) {
1629                 pr_err("instruction error\n");
1630                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1631                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1632                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1633                 if (INTEL_INFO(dev)->gen < 4) {
1634                         u32 ipeir = I915_READ(IPEIR);
1635
1636                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1637                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1638                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1639                         I915_WRITE(IPEIR, ipeir);
1640                         POSTING_READ(IPEIR);
1641                 } else {
1642                         u32 ipeir = I915_READ(IPEIR_I965);
1643
1644                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1645                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1646                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1647                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1648                         I915_WRITE(IPEIR_I965, ipeir);
1649                         POSTING_READ(IPEIR_I965);
1650                 }
1651         }
1652
1653         I915_WRITE(EIR, eir);
1654         POSTING_READ(EIR);
1655         eir = I915_READ(EIR);
1656         if (eir) {
1657                 /*
1658                  * some errors might have become stuck,
1659                  * mask them.
1660                  */
1661                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1662                 I915_WRITE(EMR, I915_READ(EMR) | eir);
1663                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1664         }
1665 }
1666
1667 /**
1668  * i915_handle_error - handle an error interrupt
1669  * @dev: drm device
1670  *
1671  * Do some basic checking of regsiter state at error interrupt time and
1672  * dump it to the syslog.  Also call i915_capture_error_state() to make
1673  * sure we get a record and make it available in debugfs.  Fire a uevent
1674  * so userspace knows something bad happened (should trigger collection
1675  * of a ring dump etc.).
1676  */
1677 void i915_handle_error(struct drm_device *dev, bool wedged)
1678 {
1679         struct drm_i915_private *dev_priv = dev->dev_private;
1680         struct intel_ring_buffer *ring;
1681         int i;
1682
1683         i915_capture_error_state(dev);
1684         i915_report_and_clear_eir(dev);
1685
1686         if (wedged) {
1687                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1688                                 &dev_priv->gpu_error.reset_counter);
1689
1690                 /*
1691                  * Wakeup waiting processes so that the reset work item
1692                  * doesn't deadlock trying to grab various locks.
1693                  */
1694                 for_each_ring(ring, dev_priv, i)
1695                         wake_up_all(&ring->irq_queue);
1696         }
1697
1698         queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
1699 }
1700
1701 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1702 {
1703         drm_i915_private_t *dev_priv = dev->dev_private;
1704         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1705         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1706         struct drm_i915_gem_object *obj;
1707         struct intel_unpin_work *work;
1708         unsigned long flags;
1709         bool stall_detected;
1710
1711         /* Ignore early vblank irqs */
1712         if (intel_crtc == NULL)
1713                 return;
1714
1715         spin_lock_irqsave(&dev->event_lock, flags);
1716         work = intel_crtc->unpin_work;
1717
1718         if (work == NULL ||
1719             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1720             !work->enable_stall_check) {
1721                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1722                 spin_unlock_irqrestore(&dev->event_lock, flags);
1723                 return;
1724         }
1725
1726         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1727         obj = work->pending_flip_obj;
1728         if (INTEL_INFO(dev)->gen >= 4) {
1729                 int dspsurf = DSPSURF(intel_crtc->plane);
1730                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1731                                         obj->gtt_offset;
1732         } else {
1733                 int dspaddr = DSPADDR(intel_crtc->plane);
1734                 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1735                                                         crtc->y * crtc->fb->pitches[0] +
1736                                                         crtc->x * crtc->fb->bits_per_pixel/8);
1737         }
1738
1739         spin_unlock_irqrestore(&dev->event_lock, flags);
1740
1741         if (stall_detected) {
1742                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1743                 intel_prepare_page_flip(dev, intel_crtc->plane);
1744         }
1745 }
1746
1747 /* Called from drm generic code, passed 'crtc' which
1748  * we use as a pipe index
1749  */
1750 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1751 {
1752         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1753         unsigned long irqflags;
1754
1755         if (!i915_pipe_enabled(dev, pipe))
1756                 return -EINVAL;
1757
1758         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1759         if (INTEL_INFO(dev)->gen >= 4)
1760                 i915_enable_pipestat(dev_priv, pipe,
1761                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1762         else
1763                 i915_enable_pipestat(dev_priv, pipe,
1764                                      PIPE_VBLANK_INTERRUPT_ENABLE);
1765
1766         /* maintain vblank delivery even in deep C-states */
1767         if (dev_priv->info->gen == 3)
1768                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1769         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1770
1771         return 0;
1772 }
1773
1774 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1775 {
1776         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1777         unsigned long irqflags;
1778
1779         if (!i915_pipe_enabled(dev, pipe))
1780                 return -EINVAL;
1781
1782         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1783         ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1784                                     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1785         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1786
1787         return 0;
1788 }
1789
1790 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1791 {
1792         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1793         unsigned long irqflags;
1794
1795         if (!i915_pipe_enabled(dev, pipe))
1796                 return -EINVAL;
1797
1798         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1799         ironlake_enable_display_irq(dev_priv,
1800                                     DE_PIPEA_VBLANK_IVB << (5 * pipe));
1801         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1802
1803         return 0;
1804 }
1805
1806 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1807 {
1808         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1809         unsigned long irqflags;
1810         u32 imr;
1811
1812         if (!i915_pipe_enabled(dev, pipe))
1813                 return -EINVAL;
1814
1815         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1816         imr = I915_READ(VLV_IMR);
1817         if (pipe == 0)
1818                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1819         else
1820                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1821         I915_WRITE(VLV_IMR, imr);
1822         i915_enable_pipestat(dev_priv, pipe,
1823                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
1824         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1825
1826         return 0;
1827 }
1828
1829 /* Called from drm generic code, passed 'crtc' which
1830  * we use as a pipe index
1831  */
1832 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1833 {
1834         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1835         unsigned long irqflags;
1836
1837         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1838         if (dev_priv->info->gen == 3)
1839                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1840
1841         i915_disable_pipestat(dev_priv, pipe,
1842                               PIPE_VBLANK_INTERRUPT_ENABLE |
1843                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1844         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1845 }
1846
1847 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1848 {
1849         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1850         unsigned long irqflags;
1851
1852         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1853         ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1854                                      DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1855         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1856 }
1857
1858 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1859 {
1860         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1861         unsigned long irqflags;
1862
1863         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1864         ironlake_disable_display_irq(dev_priv,
1865                                      DE_PIPEA_VBLANK_IVB << (pipe * 5));
1866         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1867 }
1868
1869 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1870 {
1871         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1872         unsigned long irqflags;
1873         u32 imr;
1874
1875         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1876         i915_disable_pipestat(dev_priv, pipe,
1877                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1878         imr = I915_READ(VLV_IMR);
1879         if (pipe == 0)
1880                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1881         else
1882                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1883         I915_WRITE(VLV_IMR, imr);
1884         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1885 }
1886
1887 static u32
1888 ring_last_seqno(struct intel_ring_buffer *ring)
1889 {
1890         return list_entry(ring->request_list.prev,
1891                           struct drm_i915_gem_request, list)->seqno;
1892 }
1893
1894 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1895 {
1896         if (list_empty(&ring->request_list) ||
1897             i915_seqno_passed(ring->get_seqno(ring, false),
1898                               ring_last_seqno(ring))) {
1899                 /* Issue a wake-up to catch stuck h/w. */
1900                 if (waitqueue_active(&ring->irq_queue)) {
1901                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1902                                   ring->name);
1903                         wake_up_all(&ring->irq_queue);
1904                         *err = true;
1905                 }
1906                 return true;
1907         }
1908         return false;
1909 }
1910
1911 static bool semaphore_passed(struct intel_ring_buffer *ring)
1912 {
1913         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1914         u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1915         struct intel_ring_buffer *signaller;
1916         u32 cmd, ipehr, acthd_min;
1917
1918         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1919         if ((ipehr & ~(0x3 << 16)) !=
1920             (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1921                 return false;
1922
1923         /* ACTHD is likely pointing to the dword after the actual command,
1924          * so scan backwards until we find the MBOX.
1925          */
1926         acthd_min = max((int)acthd - 3 * 4, 0);
1927         do {
1928                 cmd = ioread32(ring->virtual_start + acthd);
1929                 if (cmd == ipehr)
1930                         break;
1931
1932                 acthd -= 4;
1933                 if (acthd < acthd_min)
1934                         return false;
1935         } while (1);
1936
1937         signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
1938         return i915_seqno_passed(signaller->get_seqno(signaller, false),
1939                                  ioread32(ring->virtual_start+acthd+4)+1);
1940 }
1941
1942 static bool kick_ring(struct intel_ring_buffer *ring)
1943 {
1944         struct drm_device *dev = ring->dev;
1945         struct drm_i915_private *dev_priv = dev->dev_private;
1946         u32 tmp = I915_READ_CTL(ring);
1947         if (tmp & RING_WAIT) {
1948                 DRM_ERROR("Kicking stuck wait on %s\n",
1949                           ring->name);
1950                 I915_WRITE_CTL(ring, tmp);
1951                 return true;
1952         }
1953
1954         if (INTEL_INFO(dev)->gen >= 6 &&
1955             tmp & RING_WAIT_SEMAPHORE &&
1956             semaphore_passed(ring)) {
1957                 DRM_ERROR("Kicking stuck semaphore on %s\n",
1958                           ring->name);
1959                 I915_WRITE_CTL(ring, tmp);
1960                 return true;
1961         }
1962         return false;
1963 }
1964
1965 static bool i915_hangcheck_hung(struct drm_device *dev)
1966 {
1967         drm_i915_private_t *dev_priv = dev->dev_private;
1968
1969         if (dev_priv->gpu_error.hangcheck_count++ > 1) {
1970                 bool hung = true;
1971
1972                 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1973                 i915_handle_error(dev, true);
1974
1975                 if (!IS_GEN2(dev)) {
1976                         struct intel_ring_buffer *ring;
1977                         int i;
1978
1979                         /* Is the chip hanging on a WAIT_FOR_EVENT?
1980                          * If so we can simply poke the RB_WAIT bit
1981                          * and break the hang. This should work on
1982                          * all but the second generation chipsets.
1983                          */
1984                         for_each_ring(ring, dev_priv, i)
1985                                 hung &= !kick_ring(ring);
1986                 }
1987
1988                 return hung;
1989         }
1990
1991         return false;
1992 }
1993
1994 /**
1995  * This is called when the chip hasn't reported back with completed
1996  * batchbuffers in a long time. The first time this is called we simply record
1997  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1998  * again, we assume the chip is wedged and try to fix it.
1999  */
2000 void i915_hangcheck_elapsed(unsigned long data)
2001 {
2002         struct drm_device *dev = (struct drm_device *)data;
2003         drm_i915_private_t *dev_priv = dev->dev_private;
2004         uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
2005         struct intel_ring_buffer *ring;
2006         bool err = false, idle;
2007         int i;
2008
2009         if (!i915_enable_hangcheck)
2010                 return;
2011
2012         memset(acthd, 0, sizeof(acthd));
2013         idle = true;
2014         for_each_ring(ring, dev_priv, i) {
2015             idle &= i915_hangcheck_ring_idle(ring, &err);
2016             acthd[i] = intel_ring_get_active_head(ring);
2017         }
2018
2019         /* If all work is done then ACTHD clearly hasn't advanced. */
2020         if (idle) {
2021                 if (err) {
2022                         if (i915_hangcheck_hung(dev))
2023                                 return;
2024
2025                         goto repeat;
2026                 }
2027
2028                 dev_priv->gpu_error.hangcheck_count = 0;
2029                 return;
2030         }
2031
2032         i915_get_extra_instdone(dev, instdone);
2033         if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
2034                    sizeof(acthd)) == 0 &&
2035             memcmp(dev_priv->gpu_error.prev_instdone, instdone,
2036                    sizeof(instdone)) == 0) {
2037                 if (i915_hangcheck_hung(dev))
2038                         return;
2039         } else {
2040                 dev_priv->gpu_error.hangcheck_count = 0;
2041
2042                 memcpy(dev_priv->gpu_error.last_acthd, acthd,
2043                        sizeof(acthd));
2044                 memcpy(dev_priv->gpu_error.prev_instdone, instdone,
2045                        sizeof(instdone));
2046         }
2047
2048 repeat:
2049         /* Reset timer case chip hangs without another request being added */
2050         mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2051                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2052 }
2053
2054 /* drm_dma.h hooks
2055 */
2056 static void ironlake_irq_preinstall(struct drm_device *dev)
2057 {
2058         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2059
2060         atomic_set(&dev_priv->irq_received, 0);
2061
2062         I915_WRITE(HWSTAM, 0xeffe);
2063
2064         /* XXX hotplug from PCH */
2065
2066         I915_WRITE(DEIMR, 0xffffffff);
2067         I915_WRITE(DEIER, 0x0);
2068         POSTING_READ(DEIER);
2069
2070         /* and GT */
2071         I915_WRITE(GTIMR, 0xffffffff);
2072         I915_WRITE(GTIER, 0x0);
2073         POSTING_READ(GTIER);
2074
2075         if (HAS_PCH_NOP(dev))
2076                 return;
2077
2078         /* south display irq */
2079         I915_WRITE(SDEIMR, 0xffffffff);
2080         /*
2081          * SDEIER is also touched by the interrupt handler to work around missed
2082          * PCH interrupts. Hence we can't update it after the interrupt handler
2083          * is enabled - instead we unconditionally enable all PCH interrupt
2084          * sources here, but then only unmask them as needed with SDEIMR.
2085          */
2086         I915_WRITE(SDEIER, 0xffffffff);
2087         POSTING_READ(SDEIER);
2088 }
2089
2090 static void valleyview_irq_preinstall(struct drm_device *dev)
2091 {
2092         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2093         int pipe;
2094
2095         atomic_set(&dev_priv->irq_received, 0);
2096
2097         /* VLV magic */
2098         I915_WRITE(VLV_IMR, 0);
2099         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2100         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2101         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2102
2103         /* and GT */
2104         I915_WRITE(GTIIR, I915_READ(GTIIR));
2105         I915_WRITE(GTIIR, I915_READ(GTIIR));
2106         I915_WRITE(GTIMR, 0xffffffff);
2107         I915_WRITE(GTIER, 0x0);
2108         POSTING_READ(GTIER);
2109
2110         I915_WRITE(DPINVGTT, 0xff);
2111
2112         I915_WRITE(PORT_HOTPLUG_EN, 0);
2113         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2114         for_each_pipe(pipe)
2115                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2116         I915_WRITE(VLV_IIR, 0xffffffff);
2117         I915_WRITE(VLV_IMR, 0xffffffff);
2118         I915_WRITE(VLV_IER, 0x0);
2119         POSTING_READ(VLV_IER);
2120 }
2121
2122 static void ibx_hpd_irq_setup(struct drm_device *dev)
2123 {
2124         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2125         struct drm_mode_config *mode_config = &dev->mode_config;
2126         struct intel_encoder *intel_encoder;
2127         u32 mask = ~I915_READ(SDEIMR);
2128         u32 hotplug;
2129
2130         if (HAS_PCH_IBX(dev)) {
2131                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2132                         mask |= hpd_ibx[intel_encoder->hpd_pin];
2133         } else {
2134                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2135                         mask |= hpd_cpt[intel_encoder->hpd_pin];
2136         }
2137
2138         I915_WRITE(SDEIMR, ~mask);
2139
2140         /*
2141          * Enable digital hotplug on the PCH, and configure the DP short pulse
2142          * duration to 2ms (which is the minimum in the Display Port spec)
2143          *
2144          * This register is the same on all known PCH chips.
2145          */
2146         hotplug = I915_READ(PCH_PORT_HOTPLUG);
2147         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2148         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2149         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2150         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2151         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2152 }
2153
2154 static void ibx_irq_postinstall(struct drm_device *dev)
2155 {
2156         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2157         u32 mask;
2158
2159         if (HAS_PCH_IBX(dev))
2160                 mask = SDE_GMBUS | SDE_AUX_MASK;
2161         else
2162                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
2163
2164         if (HAS_PCH_NOP(dev))
2165                 return;
2166
2167         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2168         I915_WRITE(SDEIMR, ~mask);
2169 }
2170
2171 static int ironlake_irq_postinstall(struct drm_device *dev)
2172 {
2173         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2174         /* enable kind of interrupts always enabled */
2175         u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2176                            DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2177                            DE_AUX_CHANNEL_A;
2178         u32 render_irqs;
2179
2180         dev_priv->irq_mask = ~display_mask;
2181
2182         /* should always can generate irq */
2183         I915_WRITE(DEIIR, I915_READ(DEIIR));
2184         I915_WRITE(DEIMR, dev_priv->irq_mask);
2185         I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
2186         POSTING_READ(DEIER);
2187
2188         dev_priv->gt_irq_mask = ~0;
2189
2190         I915_WRITE(GTIIR, I915_READ(GTIIR));
2191         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2192
2193         if (IS_GEN6(dev))
2194                 render_irqs =
2195                         GT_USER_INTERRUPT |
2196                         GEN6_BSD_USER_INTERRUPT |
2197                         GEN6_BLITTER_USER_INTERRUPT;
2198         else
2199                 render_irqs =
2200                         GT_USER_INTERRUPT |
2201                         GT_PIPE_NOTIFY |
2202                         GT_BSD_USER_INTERRUPT;
2203         I915_WRITE(GTIER, render_irqs);
2204         POSTING_READ(GTIER);
2205
2206         ibx_irq_postinstall(dev);
2207
2208         if (IS_IRONLAKE_M(dev)) {
2209                 /* Clear & enable PCU event interrupts */
2210                 I915_WRITE(DEIIR, DE_PCU_EVENT);
2211                 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2212                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2213         }
2214
2215         return 0;
2216 }
2217
2218 static int ivybridge_irq_postinstall(struct drm_device *dev)
2219 {
2220         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2221         /* enable kind of interrupts always enabled */
2222         u32 display_mask =
2223                 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2224                 DE_PLANEC_FLIP_DONE_IVB |
2225                 DE_PLANEB_FLIP_DONE_IVB |
2226                 DE_PLANEA_FLIP_DONE_IVB |
2227                 DE_AUX_CHANNEL_A_IVB;
2228         u32 render_irqs;
2229
2230         dev_priv->irq_mask = ~display_mask;
2231
2232         /* should always can generate irq */
2233         I915_WRITE(DEIIR, I915_READ(DEIIR));
2234         I915_WRITE(DEIMR, dev_priv->irq_mask);
2235         I915_WRITE(DEIER,
2236                    display_mask |
2237                    DE_PIPEC_VBLANK_IVB |
2238                    DE_PIPEB_VBLANK_IVB |
2239                    DE_PIPEA_VBLANK_IVB);
2240         POSTING_READ(DEIER);
2241
2242         dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2243
2244         I915_WRITE(GTIIR, I915_READ(GTIIR));
2245         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2246
2247         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2248                 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2249         I915_WRITE(GTIER, render_irqs);
2250         POSTING_READ(GTIER);
2251
2252         ibx_irq_postinstall(dev);
2253
2254         return 0;
2255 }
2256
2257 static int valleyview_irq_postinstall(struct drm_device *dev)
2258 {
2259         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2260         u32 enable_mask;
2261         u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2262         u32 render_irqs;
2263         u16 msid;
2264
2265         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2266         enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2267                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2268                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2269                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2270
2271         /*
2272          *Leave vblank interrupts masked initially.  enable/disable will
2273          * toggle them based on usage.
2274          */
2275         dev_priv->irq_mask = (~enable_mask) |
2276                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2277                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2278
2279         /* Hack for broken MSIs on VLV */
2280         pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2281         pci_read_config_word(dev->pdev, 0x98, &msid);
2282         msid &= 0xff; /* mask out delivery bits */
2283         msid |= (1<<14);
2284         pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2285
2286         I915_WRITE(PORT_HOTPLUG_EN, 0);
2287         POSTING_READ(PORT_HOTPLUG_EN);
2288
2289         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2290         I915_WRITE(VLV_IER, enable_mask);
2291         I915_WRITE(VLV_IIR, 0xffffffff);
2292         I915_WRITE(PIPESTAT(0), 0xffff);
2293         I915_WRITE(PIPESTAT(1), 0xffff);
2294         POSTING_READ(VLV_IER);
2295
2296         i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2297         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2298         i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2299
2300         I915_WRITE(VLV_IIR, 0xffffffff);
2301         I915_WRITE(VLV_IIR, 0xffffffff);
2302
2303         I915_WRITE(GTIIR, I915_READ(GTIIR));
2304         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2305
2306         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2307                 GEN6_BLITTER_USER_INTERRUPT;
2308         I915_WRITE(GTIER, render_irqs);
2309         POSTING_READ(GTIER);
2310
2311         /* ack & enable invalid PTE error interrupts */
2312 #if 0 /* FIXME: add support to irq handler for checking these bits */
2313         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2314         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2315 #endif
2316
2317         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2318
2319         return 0;
2320 }
2321
2322 static void valleyview_irq_uninstall(struct drm_device *dev)
2323 {
2324         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2325         int pipe;
2326
2327         if (!dev_priv)
2328                 return;
2329
2330         for_each_pipe(pipe)
2331                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2332
2333         I915_WRITE(HWSTAM, 0xffffffff);
2334         I915_WRITE(PORT_HOTPLUG_EN, 0);
2335         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2336         for_each_pipe(pipe)
2337                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2338         I915_WRITE(VLV_IIR, 0xffffffff);
2339         I915_WRITE(VLV_IMR, 0xffffffff);
2340         I915_WRITE(VLV_IER, 0x0);
2341         POSTING_READ(VLV_IER);
2342 }
2343
2344 static void ironlake_irq_uninstall(struct drm_device *dev)
2345 {
2346         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2347
2348         if (!dev_priv)
2349                 return;
2350
2351         I915_WRITE(HWSTAM, 0xffffffff);
2352
2353         I915_WRITE(DEIMR, 0xffffffff);
2354         I915_WRITE(DEIER, 0x0);
2355         I915_WRITE(DEIIR, I915_READ(DEIIR));
2356
2357         I915_WRITE(GTIMR, 0xffffffff);
2358         I915_WRITE(GTIER, 0x0);
2359         I915_WRITE(GTIIR, I915_READ(GTIIR));
2360
2361         if (HAS_PCH_NOP(dev))
2362                 return;
2363
2364         I915_WRITE(SDEIMR, 0xffffffff);
2365         I915_WRITE(SDEIER, 0x0);
2366         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2367 }
2368
2369 static void i8xx_irq_preinstall(struct drm_device * dev)
2370 {
2371         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2372         int pipe;
2373
2374         atomic_set(&dev_priv->irq_received, 0);
2375
2376         for_each_pipe(pipe)
2377                 I915_WRITE(PIPESTAT(pipe), 0);
2378         I915_WRITE16(IMR, 0xffff);
2379         I915_WRITE16(IER, 0x0);
2380         POSTING_READ16(IER);
2381 }
2382
2383 static int i8xx_irq_postinstall(struct drm_device *dev)
2384 {
2385         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2386
2387         I915_WRITE16(EMR,
2388                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2389
2390         /* Unmask the interrupts that we always want on. */
2391         dev_priv->irq_mask =
2392                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2393                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2394                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2395                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2396                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2397         I915_WRITE16(IMR, dev_priv->irq_mask);
2398
2399         I915_WRITE16(IER,
2400                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2401                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2402                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2403                      I915_USER_INTERRUPT);
2404         POSTING_READ16(IER);
2405
2406         return 0;
2407 }
2408
2409 /*
2410  * Returns true when a page flip has completed.
2411  */
2412 static bool i8xx_handle_vblank(struct drm_device *dev,
2413                                int pipe, u16 iir)
2414 {
2415         drm_i915_private_t *dev_priv = dev->dev_private;
2416         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2417
2418         if (!drm_handle_vblank(dev, pipe))
2419                 return false;
2420
2421         if ((iir & flip_pending) == 0)
2422                 return false;
2423
2424         intel_prepare_page_flip(dev, pipe);
2425
2426         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2427          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2428          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2429          * the flip is completed (no longer pending). Since this doesn't raise
2430          * an interrupt per se, we watch for the change at vblank.
2431          */
2432         if (I915_READ16(ISR) & flip_pending)
2433                 return false;
2434
2435         intel_finish_page_flip(dev, pipe);
2436
2437         return true;
2438 }
2439
2440 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2441 {
2442         struct drm_device *dev = (struct drm_device *) arg;
2443         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2444         u16 iir, new_iir;
2445         u32 pipe_stats[2];
2446         unsigned long irqflags;
2447         int irq_received;
2448         int pipe;
2449         u16 flip_mask =
2450                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2451                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2452
2453         atomic_inc(&dev_priv->irq_received);
2454
2455         iir = I915_READ16(IIR);
2456         if (iir == 0)
2457                 return IRQ_NONE;
2458
2459         while (iir & ~flip_mask) {
2460                 /* Can't rely on pipestat interrupt bit in iir as it might
2461                  * have been cleared after the pipestat interrupt was received.
2462                  * It doesn't set the bit in iir again, but it still produces
2463                  * interrupts (for non-MSI).
2464                  */
2465                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2466                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2467                         i915_handle_error(dev, false);
2468
2469                 for_each_pipe(pipe) {
2470                         int reg = PIPESTAT(pipe);
2471                         pipe_stats[pipe] = I915_READ(reg);
2472
2473                         /*
2474                          * Clear the PIPE*STAT regs before the IIR
2475                          */
2476                         if (pipe_stats[pipe] & 0x8000ffff) {
2477                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2478                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2479                                                          pipe_name(pipe));
2480                                 I915_WRITE(reg, pipe_stats[pipe]);
2481                                 irq_received = 1;
2482                         }
2483                 }
2484                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2485
2486                 I915_WRITE16(IIR, iir & ~flip_mask);
2487                 new_iir = I915_READ16(IIR); /* Flush posted writes */
2488
2489                 i915_update_dri1_breadcrumb(dev);
2490
2491                 if (iir & I915_USER_INTERRUPT)
2492                         notify_ring(dev, &dev_priv->ring[RCS]);
2493
2494                 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2495                     i8xx_handle_vblank(dev, 0, iir))
2496                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
2497
2498                 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2499                     i8xx_handle_vblank(dev, 1, iir))
2500                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
2501
2502                 iir = new_iir;
2503         }
2504
2505         return IRQ_HANDLED;
2506 }
2507
2508 static void i8xx_irq_uninstall(struct drm_device * dev)
2509 {
2510         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2511         int pipe;
2512
2513         for_each_pipe(pipe) {
2514                 /* Clear enable bits; then clear status bits */
2515                 I915_WRITE(PIPESTAT(pipe), 0);
2516                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2517         }
2518         I915_WRITE16(IMR, 0xffff);
2519         I915_WRITE16(IER, 0x0);
2520         I915_WRITE16(IIR, I915_READ16(IIR));
2521 }
2522
2523 static void i915_irq_preinstall(struct drm_device * dev)
2524 {
2525         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2526         int pipe;
2527
2528         atomic_set(&dev_priv->irq_received, 0);
2529
2530         if (I915_HAS_HOTPLUG(dev)) {
2531                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2532                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2533         }
2534
2535         I915_WRITE16(HWSTAM, 0xeffe);
2536         for_each_pipe(pipe)
2537                 I915_WRITE(PIPESTAT(pipe), 0);
2538         I915_WRITE(IMR, 0xffffffff);
2539         I915_WRITE(IER, 0x0);
2540         POSTING_READ(IER);
2541 }
2542
2543 static int i915_irq_postinstall(struct drm_device *dev)
2544 {
2545         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2546         u32 enable_mask;
2547
2548         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2549
2550         /* Unmask the interrupts that we always want on. */
2551         dev_priv->irq_mask =
2552                 ~(I915_ASLE_INTERRUPT |
2553                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2554                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2555                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2556                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2557                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2558
2559         enable_mask =
2560                 I915_ASLE_INTERRUPT |
2561                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2562                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2563                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2564                 I915_USER_INTERRUPT;
2565
2566         if (I915_HAS_HOTPLUG(dev)) {
2567                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2568                 POSTING_READ(PORT_HOTPLUG_EN);
2569
2570                 /* Enable in IER... */
2571                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2572                 /* and unmask in IMR */
2573                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2574         }
2575
2576         I915_WRITE(IMR, dev_priv->irq_mask);
2577         I915_WRITE(IER, enable_mask);
2578         POSTING_READ(IER);
2579
2580         intel_opregion_enable_asle(dev);
2581
2582         return 0;
2583 }
2584
2585 /*
2586  * Returns true when a page flip has completed.
2587  */
2588 static bool i915_handle_vblank(struct drm_device *dev,
2589                                int plane, int pipe, u32 iir)
2590 {
2591         drm_i915_private_t *dev_priv = dev->dev_private;
2592         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2593
2594         if (!drm_handle_vblank(dev, pipe))
2595                 return false;
2596
2597         if ((iir & flip_pending) == 0)
2598                 return false;
2599
2600         intel_prepare_page_flip(dev, plane);
2601
2602         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2603          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2604          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2605          * the flip is completed (no longer pending). Since this doesn't raise
2606          * an interrupt per se, we watch for the change at vblank.
2607          */
2608         if (I915_READ(ISR) & flip_pending)
2609                 return false;
2610
2611         intel_finish_page_flip(dev, pipe);
2612
2613         return true;
2614 }
2615
2616 static irqreturn_t i915_irq_handler(int irq, void *arg)
2617 {
2618         struct drm_device *dev = (struct drm_device *) arg;
2619         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2620         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2621         unsigned long irqflags;
2622         u32 flip_mask =
2623                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2624                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2625         int pipe, ret = IRQ_NONE;
2626
2627         atomic_inc(&dev_priv->irq_received);
2628
2629         iir = I915_READ(IIR);
2630         do {
2631                 bool irq_received = (iir & ~flip_mask) != 0;
2632                 bool blc_event = false;
2633
2634                 /* Can't rely on pipestat interrupt bit in iir as it might
2635                  * have been cleared after the pipestat interrupt was received.
2636                  * It doesn't set the bit in iir again, but it still produces
2637                  * interrupts (for non-MSI).
2638                  */
2639                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2640                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2641                         i915_handle_error(dev, false);
2642
2643                 for_each_pipe(pipe) {
2644                         int reg = PIPESTAT(pipe);
2645                         pipe_stats[pipe] = I915_READ(reg);
2646
2647                         /* Clear the PIPE*STAT regs before the IIR */
2648                         if (pipe_stats[pipe] & 0x8000ffff) {
2649                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2650                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2651                                                          pipe_name(pipe));
2652                                 I915_WRITE(reg, pipe_stats[pipe]);
2653                                 irq_received = true;
2654                         }
2655                 }
2656                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2657
2658                 if (!irq_received)
2659                         break;
2660
2661                 /* Consume port.  Then clear IIR or we'll miss events */
2662                 if ((I915_HAS_HOTPLUG(dev)) &&
2663                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2664                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2665                         u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2666
2667                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2668                                   hotplug_status);
2669                         if (hotplug_trigger) {
2670                                 hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915);
2671                                 queue_work(dev_priv->wq,
2672                                            &dev_priv->hotplug_work);
2673                         }
2674                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2675                         POSTING_READ(PORT_HOTPLUG_STAT);
2676                 }
2677
2678                 I915_WRITE(IIR, iir & ~flip_mask);
2679                 new_iir = I915_READ(IIR); /* Flush posted writes */
2680
2681                 if (iir & I915_USER_INTERRUPT)
2682                         notify_ring(dev, &dev_priv->ring[RCS]);
2683
2684                 for_each_pipe(pipe) {
2685                         int plane = pipe;
2686                         if (IS_MOBILE(dev))
2687                                 plane = !plane;
2688
2689                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2690                             i915_handle_vblank(dev, plane, pipe, iir))
2691                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2692
2693                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2694                                 blc_event = true;
2695                 }
2696
2697                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2698                         intel_opregion_asle_intr(dev);
2699
2700                 /* With MSI, interrupts are only generated when iir
2701                  * transitions from zero to nonzero.  If another bit got
2702                  * set while we were handling the existing iir bits, then
2703                  * we would never get another interrupt.
2704                  *
2705                  * This is fine on non-MSI as well, as if we hit this path
2706                  * we avoid exiting the interrupt handler only to generate
2707                  * another one.
2708                  *
2709                  * Note that for MSI this could cause a stray interrupt report
2710                  * if an interrupt landed in the time between writing IIR and
2711                  * the posting read.  This should be rare enough to never
2712                  * trigger the 99% of 100,000 interrupts test for disabling
2713                  * stray interrupts.
2714                  */
2715                 ret = IRQ_HANDLED;
2716                 iir = new_iir;
2717         } while (iir & ~flip_mask);
2718
2719         i915_update_dri1_breadcrumb(dev);
2720
2721         return ret;
2722 }
2723
2724 static void i915_irq_uninstall(struct drm_device * dev)
2725 {
2726         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2727         int pipe;
2728
2729         if (I915_HAS_HOTPLUG(dev)) {
2730                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2731                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2732         }
2733
2734         I915_WRITE16(HWSTAM, 0xffff);
2735         for_each_pipe(pipe) {
2736                 /* Clear enable bits; then clear status bits */
2737                 I915_WRITE(PIPESTAT(pipe), 0);
2738                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2739         }
2740         I915_WRITE(IMR, 0xffffffff);
2741         I915_WRITE(IER, 0x0);
2742
2743         I915_WRITE(IIR, I915_READ(IIR));
2744 }
2745
2746 static void i965_irq_preinstall(struct drm_device * dev)
2747 {
2748         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2749         int pipe;
2750
2751         atomic_set(&dev_priv->irq_received, 0);
2752
2753         I915_WRITE(PORT_HOTPLUG_EN, 0);
2754         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2755
2756         I915_WRITE(HWSTAM, 0xeffe);
2757         for_each_pipe(pipe)
2758                 I915_WRITE(PIPESTAT(pipe), 0);
2759         I915_WRITE(IMR, 0xffffffff);
2760         I915_WRITE(IER, 0x0);
2761         POSTING_READ(IER);
2762 }
2763
2764 static int i965_irq_postinstall(struct drm_device *dev)
2765 {
2766         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2767         u32 enable_mask;
2768         u32 error_mask;
2769
2770         /* Unmask the interrupts that we always want on. */
2771         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2772                                I915_DISPLAY_PORT_INTERRUPT |
2773                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2774                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2775                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2776                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2777                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2778
2779         enable_mask = ~dev_priv->irq_mask;
2780         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2781                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
2782         enable_mask |= I915_USER_INTERRUPT;
2783
2784         if (IS_G4X(dev))
2785                 enable_mask |= I915_BSD_USER_INTERRUPT;
2786
2787         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2788
2789         /*
2790          * Enable some error detection, note the instruction error mask
2791          * bit is reserved, so we leave it masked.
2792          */
2793         if (IS_G4X(dev)) {
2794                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2795                                GM45_ERROR_MEM_PRIV |
2796                                GM45_ERROR_CP_PRIV |
2797                                I915_ERROR_MEMORY_REFRESH);
2798         } else {
2799                 error_mask = ~(I915_ERROR_PAGE_TABLE |
2800                                I915_ERROR_MEMORY_REFRESH);
2801         }
2802         I915_WRITE(EMR, error_mask);
2803
2804         I915_WRITE(IMR, dev_priv->irq_mask);
2805         I915_WRITE(IER, enable_mask);
2806         POSTING_READ(IER);
2807
2808         I915_WRITE(PORT_HOTPLUG_EN, 0);
2809         POSTING_READ(PORT_HOTPLUG_EN);
2810
2811         intel_opregion_enable_asle(dev);
2812
2813         return 0;
2814 }
2815
2816 static void i915_hpd_irq_setup(struct drm_device *dev)
2817 {
2818         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2819         struct drm_mode_config *mode_config = &dev->mode_config;
2820         struct intel_encoder *encoder;
2821         u32 hotplug_en;
2822
2823         if (I915_HAS_HOTPLUG(dev)) {
2824                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2825                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2826                 /* Note HDMI and DP share hotplug bits */
2827                 /* enable bits are the same for all generations */
2828                 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
2829                         hotplug_en |= hpd_mask_i915[encoder->hpd_pin];
2830                 /* Programming the CRT detection parameters tends
2831                    to generate a spurious hotplug event about three
2832                    seconds later.  So just do it once.
2833                 */
2834                 if (IS_G4X(dev))
2835                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2836                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
2837                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2838
2839                 /* Ignore TV since it's buggy */
2840                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2841         }
2842 }
2843
2844 static irqreturn_t i965_irq_handler(int irq, void *arg)
2845 {
2846         struct drm_device *dev = (struct drm_device *) arg;
2847         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2848         u32 iir, new_iir;
2849         u32 pipe_stats[I915_MAX_PIPES];
2850         unsigned long irqflags;
2851         int irq_received;
2852         int ret = IRQ_NONE, pipe;
2853         u32 flip_mask =
2854                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2855                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2856
2857         atomic_inc(&dev_priv->irq_received);
2858
2859         iir = I915_READ(IIR);
2860
2861         for (;;) {
2862                 bool blc_event = false;
2863
2864                 irq_received = (iir & ~flip_mask) != 0;
2865
2866                 /* Can't rely on pipestat interrupt bit in iir as it might
2867                  * have been cleared after the pipestat interrupt was received.
2868                  * It doesn't set the bit in iir again, but it still produces
2869                  * interrupts (for non-MSI).
2870                  */
2871                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2872                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2873                         i915_handle_error(dev, false);
2874
2875                 for_each_pipe(pipe) {
2876                         int reg = PIPESTAT(pipe);
2877                         pipe_stats[pipe] = I915_READ(reg);
2878
2879                         /*
2880                          * Clear the PIPE*STAT regs before the IIR
2881                          */
2882                         if (pipe_stats[pipe] & 0x8000ffff) {
2883                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2884                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2885                                                          pipe_name(pipe));
2886                                 I915_WRITE(reg, pipe_stats[pipe]);
2887                                 irq_received = 1;
2888                         }
2889                 }
2890                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2891
2892                 if (!irq_received)
2893                         break;
2894
2895                 ret = IRQ_HANDLED;
2896
2897                 /* Consume port.  Then clear IIR or we'll miss events */
2898                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2899                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2900                         u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
2901                                                                   HOTPLUG_INT_STATUS_G4X :
2902                                                                   HOTPLUG_INT_STATUS_I965);
2903
2904                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2905                                   hotplug_status);
2906                         if (hotplug_trigger) {
2907                                 hotplug_irq_storm_detect(dev, hotplug_trigger,
2908                                                          IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i965);
2909                                 queue_work(dev_priv->wq,
2910                                            &dev_priv->hotplug_work);
2911                         }
2912                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2913                         I915_READ(PORT_HOTPLUG_STAT);
2914                 }
2915
2916                 I915_WRITE(IIR, iir & ~flip_mask);
2917                 new_iir = I915_READ(IIR); /* Flush posted writes */
2918
2919                 if (iir & I915_USER_INTERRUPT)
2920                         notify_ring(dev, &dev_priv->ring[RCS]);
2921                 if (iir & I915_BSD_USER_INTERRUPT)
2922                         notify_ring(dev, &dev_priv->ring[VCS]);
2923
2924                 for_each_pipe(pipe) {
2925                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2926                             i915_handle_vblank(dev, pipe, pipe, iir))
2927                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2928
2929                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2930                                 blc_event = true;
2931                 }
2932
2933
2934                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2935                         intel_opregion_asle_intr(dev);
2936
2937                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2938                         gmbus_irq_handler(dev);
2939
2940                 /* With MSI, interrupts are only generated when iir
2941                  * transitions from zero to nonzero.  If another bit got
2942                  * set while we were handling the existing iir bits, then
2943                  * we would never get another interrupt.
2944                  *
2945                  * This is fine on non-MSI as well, as if we hit this path
2946                  * we avoid exiting the interrupt handler only to generate
2947                  * another one.
2948                  *
2949                  * Note that for MSI this could cause a stray interrupt report
2950                  * if an interrupt landed in the time between writing IIR and
2951                  * the posting read.  This should be rare enough to never
2952                  * trigger the 99% of 100,000 interrupts test for disabling
2953                  * stray interrupts.
2954                  */
2955                 iir = new_iir;
2956         }
2957
2958         i915_update_dri1_breadcrumb(dev);
2959
2960         return ret;
2961 }
2962
2963 static void i965_irq_uninstall(struct drm_device * dev)
2964 {
2965         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2966         int pipe;
2967
2968         if (!dev_priv)
2969                 return;
2970
2971         I915_WRITE(PORT_HOTPLUG_EN, 0);
2972         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2973
2974         I915_WRITE(HWSTAM, 0xffffffff);
2975         for_each_pipe(pipe)
2976                 I915_WRITE(PIPESTAT(pipe), 0);
2977         I915_WRITE(IMR, 0xffffffff);
2978         I915_WRITE(IER, 0x0);
2979
2980         for_each_pipe(pipe)
2981                 I915_WRITE(PIPESTAT(pipe),
2982                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2983         I915_WRITE(IIR, I915_READ(IIR));
2984 }
2985
2986 void intel_irq_init(struct drm_device *dev)
2987 {
2988         struct drm_i915_private *dev_priv = dev->dev_private;
2989
2990         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2991         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
2992         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2993         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2994
2995         setup_timer(&dev_priv->gpu_error.hangcheck_timer,
2996                     i915_hangcheck_elapsed,
2997                     (unsigned long) dev);
2998
2999         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3000
3001         dev->driver->get_vblank_counter = i915_get_vblank_counter;
3002         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3003         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3004                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3005                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3006         }
3007
3008         if (drm_core_check_feature(dev, DRIVER_MODESET))
3009                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3010         else
3011                 dev->driver->get_vblank_timestamp = NULL;
3012         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3013
3014         if (IS_VALLEYVIEW(dev)) {
3015                 dev->driver->irq_handler = valleyview_irq_handler;
3016                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3017                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3018                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3019                 dev->driver->enable_vblank = valleyview_enable_vblank;
3020                 dev->driver->disable_vblank = valleyview_disable_vblank;
3021                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3022         } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3023                 /* Share pre & uninstall handlers with ILK/SNB */
3024                 dev->driver->irq_handler = ivybridge_irq_handler;
3025                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3026                 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3027                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3028                 dev->driver->enable_vblank = ivybridge_enable_vblank;
3029                 dev->driver->disable_vblank = ivybridge_disable_vblank;
3030                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3031         } else if (HAS_PCH_SPLIT(dev)) {
3032                 dev->driver->irq_handler = ironlake_irq_handler;
3033                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3034                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3035                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3036                 dev->driver->enable_vblank = ironlake_enable_vblank;
3037                 dev->driver->disable_vblank = ironlake_disable_vblank;
3038                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3039         } else {
3040                 if (INTEL_INFO(dev)->gen == 2) {
3041                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
3042                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
3043                         dev->driver->irq_handler = i8xx_irq_handler;
3044                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
3045                 } else if (INTEL_INFO(dev)->gen == 3) {
3046                         dev->driver->irq_preinstall = i915_irq_preinstall;
3047                         dev->driver->irq_postinstall = i915_irq_postinstall;
3048                         dev->driver->irq_uninstall = i915_irq_uninstall;
3049                         dev->driver->irq_handler = i915_irq_handler;
3050                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3051                 } else {
3052                         dev->driver->irq_preinstall = i965_irq_preinstall;
3053                         dev->driver->irq_postinstall = i965_irq_postinstall;
3054                         dev->driver->irq_uninstall = i965_irq_uninstall;
3055                         dev->driver->irq_handler = i965_irq_handler;
3056                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3057                 }
3058                 dev->driver->enable_vblank = i915_enable_vblank;
3059                 dev->driver->disable_vblank = i915_disable_vblank;
3060         }
3061 }
3062
3063 void intel_hpd_init(struct drm_device *dev)
3064 {
3065         struct drm_i915_private *dev_priv = dev->dev_private;
3066         struct drm_mode_config *mode_config = &dev->mode_config;
3067         struct drm_connector *connector;
3068         int i;
3069
3070         for (i = 1; i < HPD_NUM_PINS; i++) {
3071                 dev_priv->hpd_stats[i].hpd_cnt = 0;
3072                 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3073         }
3074         list_for_each_entry(connector, &mode_config->connector_list, head) {
3075                 struct intel_connector *intel_connector = to_intel_connector(connector);
3076                 connector->polled = intel_connector->polled;
3077                 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3078                         connector->polled = DRM_CONNECTOR_POLL_HPD;
3079         }
3080         if (dev_priv->display.hpd_irq_setup)
3081                 dev_priv->display.hpd_irq_setup(dev);
3082 }