]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/gpu/drm/i915/i915_irq.c
drm/i915: remove "inline" keyword from ironlake_disable_display_irq
[linux-imx.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <drm/drmP.h>
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "i915_trace.h"
37 #include "intel_drv.h"
38
39 static const u32 hpd_ibx[] = {
40         [HPD_CRT] = SDE_CRT_HOTPLUG,
41         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42         [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43         [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44         [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45 };
46
47 static const u32 hpd_cpt[] = {
48         [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
49         [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50         [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51         [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52         [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53 };
54
55 static const u32 hpd_mask_i915[] = {
56         [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59         [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60         [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61         [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62 };
63
64 static const u32 hpd_status_gen4[] = {
65         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71 };
72
73 static const u32 hpd_status_i965[] = {
74          [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75          [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I965,
76          [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I965,
77          [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78          [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79          [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81
82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83         [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84         [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85         [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86         [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87         [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88         [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89 };
90
91
92
93 /* For display hotplug interrupt */
94 static void
95 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
96 {
97         if ((dev_priv->irq_mask & mask) != 0) {
98                 dev_priv->irq_mask &= ~mask;
99                 I915_WRITE(DEIMR, dev_priv->irq_mask);
100                 POSTING_READ(DEIMR);
101         }
102 }
103
104 static void
105 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
106 {
107         if ((dev_priv->irq_mask & mask) != mask) {
108                 dev_priv->irq_mask |= mask;
109                 I915_WRITE(DEIMR, dev_priv->irq_mask);
110                 POSTING_READ(DEIMR);
111         }
112 }
113
114 void
115 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
116 {
117         u32 reg = PIPESTAT(pipe);
118         u32 pipestat = I915_READ(reg) & 0x7fff0000;
119
120         if ((pipestat & mask) == mask)
121                 return;
122
123         /* Enable the interrupt, clear any pending status */
124         pipestat |= mask | (mask >> 16);
125         I915_WRITE(reg, pipestat);
126         POSTING_READ(reg);
127 }
128
129 void
130 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
131 {
132         u32 reg = PIPESTAT(pipe);
133         u32 pipestat = I915_READ(reg) & 0x7fff0000;
134
135         if ((pipestat & mask) == 0)
136                 return;
137
138         pipestat &= ~mask;
139         I915_WRITE(reg, pipestat);
140         POSTING_READ(reg);
141 }
142
143 /**
144  * intel_enable_asle - enable ASLE interrupt for OpRegion
145  */
146 void intel_enable_asle(struct drm_device *dev)
147 {
148         drm_i915_private_t *dev_priv = dev->dev_private;
149         unsigned long irqflags;
150
151         /* FIXME: opregion/asle for VLV */
152         if (IS_VALLEYVIEW(dev))
153                 return;
154
155         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
156
157         if (HAS_PCH_SPLIT(dev))
158                 ironlake_enable_display_irq(dev_priv, DE_GSE);
159         else {
160                 i915_enable_pipestat(dev_priv, 1,
161                                      PIPE_LEGACY_BLC_EVENT_ENABLE);
162                 if (INTEL_INFO(dev)->gen >= 4)
163                         i915_enable_pipestat(dev_priv, 0,
164                                              PIPE_LEGACY_BLC_EVENT_ENABLE);
165         }
166
167         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
168 }
169
170 /**
171  * i915_pipe_enabled - check if a pipe is enabled
172  * @dev: DRM device
173  * @pipe: pipe to check
174  *
175  * Reading certain registers when the pipe is disabled can hang the chip.
176  * Use this routine to make sure the PLL is running and the pipe is active
177  * before reading such registers if unsure.
178  */
179 static int
180 i915_pipe_enabled(struct drm_device *dev, int pipe)
181 {
182         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
183         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
184                                                                       pipe);
185
186         return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
187 }
188
189 /* Called from drm generic code, passed a 'crtc', which
190  * we use as a pipe index
191  */
192 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
193 {
194         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
195         unsigned long high_frame;
196         unsigned long low_frame;
197         u32 high1, high2, low;
198
199         if (!i915_pipe_enabled(dev, pipe)) {
200                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
201                                 "pipe %c\n", pipe_name(pipe));
202                 return 0;
203         }
204
205         high_frame = PIPEFRAME(pipe);
206         low_frame = PIPEFRAMEPIXEL(pipe);
207
208         /*
209          * High & low register fields aren't synchronized, so make sure
210          * we get a low value that's stable across two reads of the high
211          * register.
212          */
213         do {
214                 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
215                 low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
216                 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
217         } while (high1 != high2);
218
219         high1 >>= PIPE_FRAME_HIGH_SHIFT;
220         low >>= PIPE_FRAME_LOW_SHIFT;
221         return (high1 << 8) | low;
222 }
223
224 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
225 {
226         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
227         int reg = PIPE_FRMCOUNT_GM45(pipe);
228
229         if (!i915_pipe_enabled(dev, pipe)) {
230                 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
231                                  "pipe %c\n", pipe_name(pipe));
232                 return 0;
233         }
234
235         return I915_READ(reg);
236 }
237
238 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
239                              int *vpos, int *hpos)
240 {
241         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
242         u32 vbl = 0, position = 0;
243         int vbl_start, vbl_end, htotal, vtotal;
244         bool in_vbl = true;
245         int ret = 0;
246         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
247                                                                       pipe);
248
249         if (!i915_pipe_enabled(dev, pipe)) {
250                 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
251                                  "pipe %c\n", pipe_name(pipe));
252                 return 0;
253         }
254
255         /* Get vtotal. */
256         vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
257
258         if (INTEL_INFO(dev)->gen >= 4) {
259                 /* No obvious pixelcount register. Only query vertical
260                  * scanout position from Display scan line register.
261                  */
262                 position = I915_READ(PIPEDSL(pipe));
263
264                 /* Decode into vertical scanout position. Don't have
265                  * horizontal scanout position.
266                  */
267                 *vpos = position & 0x1fff;
268                 *hpos = 0;
269         } else {
270                 /* Have access to pixelcount since start of frame.
271                  * We can split this into vertical and horizontal
272                  * scanout position.
273                  */
274                 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
275
276                 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
277                 *vpos = position / htotal;
278                 *hpos = position - (*vpos * htotal);
279         }
280
281         /* Query vblank area. */
282         vbl = I915_READ(VBLANK(cpu_transcoder));
283
284         /* Test position against vblank region. */
285         vbl_start = vbl & 0x1fff;
286         vbl_end = (vbl >> 16) & 0x1fff;
287
288         if ((*vpos < vbl_start) || (*vpos > vbl_end))
289                 in_vbl = false;
290
291         /* Inside "upper part" of vblank area? Apply corrective offset: */
292         if (in_vbl && (*vpos >= vbl_start))
293                 *vpos = *vpos - vtotal;
294
295         /* Readouts valid? */
296         if (vbl > 0)
297                 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
298
299         /* In vblank? */
300         if (in_vbl)
301                 ret |= DRM_SCANOUTPOS_INVBL;
302
303         return ret;
304 }
305
306 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
307                               int *max_error,
308                               struct timeval *vblank_time,
309                               unsigned flags)
310 {
311         struct drm_crtc *crtc;
312
313         if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
314                 DRM_ERROR("Invalid crtc %d\n", pipe);
315                 return -EINVAL;
316         }
317
318         /* Get drm_crtc to timestamp: */
319         crtc = intel_get_crtc_for_pipe(dev, pipe);
320         if (crtc == NULL) {
321                 DRM_ERROR("Invalid crtc %d\n", pipe);
322                 return -EINVAL;
323         }
324
325         if (!crtc->enabled) {
326                 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
327                 return -EBUSY;
328         }
329
330         /* Helper routine in DRM core does all the work: */
331         return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
332                                                      vblank_time, flags,
333                                                      crtc);
334 }
335
336 /*
337  * Handle hotplug events outside the interrupt handler proper.
338  */
339 static void i915_hotplug_work_func(struct work_struct *work)
340 {
341         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
342                                                     hotplug_work);
343         struct drm_device *dev = dev_priv->dev;
344         struct drm_mode_config *mode_config = &dev->mode_config;
345         struct intel_encoder *encoder;
346
347         /* HPD irq before everything is fully set up. */
348         if (!dev_priv->enable_hotplug_processing)
349                 return;
350
351         mutex_lock(&mode_config->mutex);
352         DRM_DEBUG_KMS("running encoder hotplug functions\n");
353
354         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
355                 if (encoder->hot_plug)
356                         encoder->hot_plug(encoder);
357
358         mutex_unlock(&mode_config->mutex);
359
360         /* Just fire off a uevent and let userspace tell us what to do */
361         drm_helper_hpd_irq_event(dev);
362 }
363
364 static void ironlake_handle_rps_change(struct drm_device *dev)
365 {
366         drm_i915_private_t *dev_priv = dev->dev_private;
367         u32 busy_up, busy_down, max_avg, min_avg;
368         u8 new_delay;
369         unsigned long flags;
370
371         spin_lock_irqsave(&mchdev_lock, flags);
372
373         I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
374
375         new_delay = dev_priv->ips.cur_delay;
376
377         I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
378         busy_up = I915_READ(RCPREVBSYTUPAVG);
379         busy_down = I915_READ(RCPREVBSYTDNAVG);
380         max_avg = I915_READ(RCBMAXAVG);
381         min_avg = I915_READ(RCBMINAVG);
382
383         /* Handle RCS change request from hw */
384         if (busy_up > max_avg) {
385                 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
386                         new_delay = dev_priv->ips.cur_delay - 1;
387                 if (new_delay < dev_priv->ips.max_delay)
388                         new_delay = dev_priv->ips.max_delay;
389         } else if (busy_down < min_avg) {
390                 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
391                         new_delay = dev_priv->ips.cur_delay + 1;
392                 if (new_delay > dev_priv->ips.min_delay)
393                         new_delay = dev_priv->ips.min_delay;
394         }
395
396         if (ironlake_set_drps(dev, new_delay))
397                 dev_priv->ips.cur_delay = new_delay;
398
399         spin_unlock_irqrestore(&mchdev_lock, flags);
400
401         return;
402 }
403
404 static void notify_ring(struct drm_device *dev,
405                         struct intel_ring_buffer *ring)
406 {
407         struct drm_i915_private *dev_priv = dev->dev_private;
408
409         if (ring->obj == NULL)
410                 return;
411
412         trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
413
414         wake_up_all(&ring->irq_queue);
415         if (i915_enable_hangcheck) {
416                 dev_priv->gpu_error.hangcheck_count = 0;
417                 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
418                           round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
419         }
420 }
421
422 static void gen6_pm_rps_work(struct work_struct *work)
423 {
424         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
425                                                     rps.work);
426         u32 pm_iir, pm_imr;
427         u8 new_delay;
428
429         spin_lock_irq(&dev_priv->rps.lock);
430         pm_iir = dev_priv->rps.pm_iir;
431         dev_priv->rps.pm_iir = 0;
432         pm_imr = I915_READ(GEN6_PMIMR);
433         I915_WRITE(GEN6_PMIMR, 0);
434         spin_unlock_irq(&dev_priv->rps.lock);
435
436         if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
437                 return;
438
439         mutex_lock(&dev_priv->rps.hw_lock);
440
441         if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
442                 new_delay = dev_priv->rps.cur_delay + 1;
443         else
444                 new_delay = dev_priv->rps.cur_delay - 1;
445
446         /* sysfs frequency interfaces may have snuck in while servicing the
447          * interrupt
448          */
449         if (!(new_delay > dev_priv->rps.max_delay ||
450               new_delay < dev_priv->rps.min_delay)) {
451                 gen6_set_rps(dev_priv->dev, new_delay);
452         }
453
454         mutex_unlock(&dev_priv->rps.hw_lock);
455 }
456
457
458 /**
459  * ivybridge_parity_work - Workqueue called when a parity error interrupt
460  * occurred.
461  * @work: workqueue struct
462  *
463  * Doesn't actually do anything except notify userspace. As a consequence of
464  * this event, userspace should try to remap the bad rows since statistically
465  * it is likely the same row is more likely to go bad again.
466  */
467 static void ivybridge_parity_work(struct work_struct *work)
468 {
469         drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
470                                                     l3_parity.error_work);
471         u32 error_status, row, bank, subbank;
472         char *parity_event[5];
473         uint32_t misccpctl;
474         unsigned long flags;
475
476         /* We must turn off DOP level clock gating to access the L3 registers.
477          * In order to prevent a get/put style interface, acquire struct mutex
478          * any time we access those registers.
479          */
480         mutex_lock(&dev_priv->dev->struct_mutex);
481
482         misccpctl = I915_READ(GEN7_MISCCPCTL);
483         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
484         POSTING_READ(GEN7_MISCCPCTL);
485
486         error_status = I915_READ(GEN7_L3CDERRST1);
487         row = GEN7_PARITY_ERROR_ROW(error_status);
488         bank = GEN7_PARITY_ERROR_BANK(error_status);
489         subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
490
491         I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
492                                     GEN7_L3CDERRST1_ENABLE);
493         POSTING_READ(GEN7_L3CDERRST1);
494
495         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
496
497         spin_lock_irqsave(&dev_priv->irq_lock, flags);
498         dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
499         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
500         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
501
502         mutex_unlock(&dev_priv->dev->struct_mutex);
503
504         parity_event[0] = "L3_PARITY_ERROR=1";
505         parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
506         parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
507         parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
508         parity_event[4] = NULL;
509
510         kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
511                            KOBJ_CHANGE, parity_event);
512
513         DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
514                   row, bank, subbank);
515
516         kfree(parity_event[3]);
517         kfree(parity_event[2]);
518         kfree(parity_event[1]);
519 }
520
521 static void ivybridge_handle_parity_error(struct drm_device *dev)
522 {
523         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
524         unsigned long flags;
525
526         if (!HAS_L3_GPU_CACHE(dev))
527                 return;
528
529         spin_lock_irqsave(&dev_priv->irq_lock, flags);
530         dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
531         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
532         spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
533
534         queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
535 }
536
537 static void snb_gt_irq_handler(struct drm_device *dev,
538                                struct drm_i915_private *dev_priv,
539                                u32 gt_iir)
540 {
541
542         if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
543                       GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
544                 notify_ring(dev, &dev_priv->ring[RCS]);
545         if (gt_iir & GEN6_BSD_USER_INTERRUPT)
546                 notify_ring(dev, &dev_priv->ring[VCS]);
547         if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
548                 notify_ring(dev, &dev_priv->ring[BCS]);
549
550         if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
551                       GT_GEN6_BSD_CS_ERROR_INTERRUPT |
552                       GT_RENDER_CS_ERROR_INTERRUPT)) {
553                 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
554                 i915_handle_error(dev, false);
555         }
556
557         if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
558                 ivybridge_handle_parity_error(dev);
559 }
560
561 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
562                                 u32 pm_iir)
563 {
564         unsigned long flags;
565
566         /*
567          * IIR bits should never already be set because IMR should
568          * prevent an interrupt from being shown in IIR. The warning
569          * displays a case where we've unsafely cleared
570          * dev_priv->rps.pm_iir. Although missing an interrupt of the same
571          * type is not a problem, it displays a problem in the logic.
572          *
573          * The mask bit in IMR is cleared by dev_priv->rps.work.
574          */
575
576         spin_lock_irqsave(&dev_priv->rps.lock, flags);
577         dev_priv->rps.pm_iir |= pm_iir;
578         I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
579         POSTING_READ(GEN6_PMIMR);
580         spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
581
582         queue_work(dev_priv->wq, &dev_priv->rps.work);
583 }
584
585 static void gmbus_irq_handler(struct drm_device *dev)
586 {
587         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
588
589         wake_up_all(&dev_priv->gmbus_wait_queue);
590 }
591
592 static void dp_aux_irq_handler(struct drm_device *dev)
593 {
594         struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
595
596         wake_up_all(&dev_priv->gmbus_wait_queue);
597 }
598
599 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
600 {
601         struct drm_device *dev = (struct drm_device *) arg;
602         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
603         u32 iir, gt_iir, pm_iir;
604         irqreturn_t ret = IRQ_NONE;
605         unsigned long irqflags;
606         int pipe;
607         u32 pipe_stats[I915_MAX_PIPES];
608
609         atomic_inc(&dev_priv->irq_received);
610
611         while (true) {
612                 iir = I915_READ(VLV_IIR);
613                 gt_iir = I915_READ(GTIIR);
614                 pm_iir = I915_READ(GEN6_PMIIR);
615
616                 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
617                         goto out;
618
619                 ret = IRQ_HANDLED;
620
621                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
622
623                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
624                 for_each_pipe(pipe) {
625                         int reg = PIPESTAT(pipe);
626                         pipe_stats[pipe] = I915_READ(reg);
627
628                         /*
629                          * Clear the PIPE*STAT regs before the IIR
630                          */
631                         if (pipe_stats[pipe] & 0x8000ffff) {
632                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
633                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
634                                                          pipe_name(pipe));
635                                 I915_WRITE(reg, pipe_stats[pipe]);
636                         }
637                 }
638                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
639
640                 for_each_pipe(pipe) {
641                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
642                                 drm_handle_vblank(dev, pipe);
643
644                         if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
645                                 intel_prepare_page_flip(dev, pipe);
646                                 intel_finish_page_flip(dev, pipe);
647                         }
648                 }
649
650                 /* Consume port.  Then clear IIR or we'll miss events */
651                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
652                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
653
654                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
655                                          hotplug_status);
656                         if (hotplug_status & HOTPLUG_INT_STATUS_I915)
657                                 queue_work(dev_priv->wq,
658                                            &dev_priv->hotplug_work);
659
660                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
661                         I915_READ(PORT_HOTPLUG_STAT);
662                 }
663
664                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
665                         gmbus_irq_handler(dev);
666
667                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
668                         gen6_queue_rps_work(dev_priv, pm_iir);
669
670                 I915_WRITE(GTIIR, gt_iir);
671                 I915_WRITE(GEN6_PMIIR, pm_iir);
672                 I915_WRITE(VLV_IIR, iir);
673         }
674
675 out:
676         return ret;
677 }
678
679 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
680 {
681         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
682         int pipe;
683
684         if (pch_iir & SDE_HOTPLUG_MASK)
685                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
686
687         if (pch_iir & SDE_AUDIO_POWER_MASK)
688                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
689                                  (pch_iir & SDE_AUDIO_POWER_MASK) >>
690                                  SDE_AUDIO_POWER_SHIFT);
691
692         if (pch_iir & SDE_AUX_MASK)
693                 dp_aux_irq_handler(dev);
694
695         if (pch_iir & SDE_GMBUS)
696                 gmbus_irq_handler(dev);
697
698         if (pch_iir & SDE_AUDIO_HDCP_MASK)
699                 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
700
701         if (pch_iir & SDE_AUDIO_TRANS_MASK)
702                 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
703
704         if (pch_iir & SDE_POISON)
705                 DRM_ERROR("PCH poison interrupt\n");
706
707         if (pch_iir & SDE_FDI_MASK)
708                 for_each_pipe(pipe)
709                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
710                                          pipe_name(pipe),
711                                          I915_READ(FDI_RX_IIR(pipe)));
712
713         if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
714                 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
715
716         if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
717                 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
718
719         if (pch_iir & SDE_TRANSB_FIFO_UNDER)
720                 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
721         if (pch_iir & SDE_TRANSA_FIFO_UNDER)
722                 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
723 }
724
725 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
726 {
727         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
728         int pipe;
729
730         if (pch_iir & SDE_HOTPLUG_MASK_CPT)
731                 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
732
733         if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
734                 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
735                                  (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
736                                  SDE_AUDIO_POWER_SHIFT_CPT);
737
738         if (pch_iir & SDE_AUX_MASK_CPT)
739                 dp_aux_irq_handler(dev);
740
741         if (pch_iir & SDE_GMBUS_CPT)
742                 gmbus_irq_handler(dev);
743
744         if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
745                 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
746
747         if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
748                 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
749
750         if (pch_iir & SDE_FDI_MASK_CPT)
751                 for_each_pipe(pipe)
752                         DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
753                                          pipe_name(pipe),
754                                          I915_READ(FDI_RX_IIR(pipe)));
755 }
756
757 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
758 {
759         struct drm_device *dev = (struct drm_device *) arg;
760         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
761         u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
762         irqreturn_t ret = IRQ_NONE;
763         int i;
764
765         atomic_inc(&dev_priv->irq_received);
766
767         /* disable master interrupt before clearing iir  */
768         de_ier = I915_READ(DEIER);
769         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
770
771         /* Disable south interrupts. We'll only write to SDEIIR once, so further
772          * interrupts will will be stored on its back queue, and then we'll be
773          * able to process them after we restore SDEIER (as soon as we restore
774          * it, we'll get an interrupt if SDEIIR still has something to process
775          * due to its back queue). */
776         sde_ier = I915_READ(SDEIER);
777         I915_WRITE(SDEIER, 0);
778         POSTING_READ(SDEIER);
779
780         gt_iir = I915_READ(GTIIR);
781         if (gt_iir) {
782                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
783                 I915_WRITE(GTIIR, gt_iir);
784                 ret = IRQ_HANDLED;
785         }
786
787         de_iir = I915_READ(DEIIR);
788         if (de_iir) {
789                 if (de_iir & DE_AUX_CHANNEL_A_IVB)
790                         dp_aux_irq_handler(dev);
791
792                 if (de_iir & DE_GSE_IVB)
793                         intel_opregion_gse_intr(dev);
794
795                 for (i = 0; i < 3; i++) {
796                         if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
797                                 drm_handle_vblank(dev, i);
798                         if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
799                                 intel_prepare_page_flip(dev, i);
800                                 intel_finish_page_flip_plane(dev, i);
801                         }
802                 }
803
804                 /* check event from PCH */
805                 if (de_iir & DE_PCH_EVENT_IVB) {
806                         u32 pch_iir = I915_READ(SDEIIR);
807
808                         cpt_irq_handler(dev, pch_iir);
809
810                         /* clear PCH hotplug event before clear CPU irq */
811                         I915_WRITE(SDEIIR, pch_iir);
812                 }
813
814                 I915_WRITE(DEIIR, de_iir);
815                 ret = IRQ_HANDLED;
816         }
817
818         pm_iir = I915_READ(GEN6_PMIIR);
819         if (pm_iir) {
820                 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
821                         gen6_queue_rps_work(dev_priv, pm_iir);
822                 I915_WRITE(GEN6_PMIIR, pm_iir);
823                 ret = IRQ_HANDLED;
824         }
825
826         I915_WRITE(DEIER, de_ier);
827         POSTING_READ(DEIER);
828         I915_WRITE(SDEIER, sde_ier);
829         POSTING_READ(SDEIER);
830
831         return ret;
832 }
833
834 static void ilk_gt_irq_handler(struct drm_device *dev,
835                                struct drm_i915_private *dev_priv,
836                                u32 gt_iir)
837 {
838         if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
839                 notify_ring(dev, &dev_priv->ring[RCS]);
840         if (gt_iir & GT_BSD_USER_INTERRUPT)
841                 notify_ring(dev, &dev_priv->ring[VCS]);
842 }
843
844 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
845 {
846         struct drm_device *dev = (struct drm_device *) arg;
847         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
848         int ret = IRQ_NONE;
849         u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
850
851         atomic_inc(&dev_priv->irq_received);
852
853         /* disable master interrupt before clearing iir  */
854         de_ier = I915_READ(DEIER);
855         I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
856         POSTING_READ(DEIER);
857
858         /* Disable south interrupts. We'll only write to SDEIIR once, so further
859          * interrupts will will be stored on its back queue, and then we'll be
860          * able to process them after we restore SDEIER (as soon as we restore
861          * it, we'll get an interrupt if SDEIIR still has something to process
862          * due to its back queue). */
863         sde_ier = I915_READ(SDEIER);
864         I915_WRITE(SDEIER, 0);
865         POSTING_READ(SDEIER);
866
867         de_iir = I915_READ(DEIIR);
868         gt_iir = I915_READ(GTIIR);
869         pm_iir = I915_READ(GEN6_PMIIR);
870
871         if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
872                 goto done;
873
874         ret = IRQ_HANDLED;
875
876         if (IS_GEN5(dev))
877                 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
878         else
879                 snb_gt_irq_handler(dev, dev_priv, gt_iir);
880
881         if (de_iir & DE_AUX_CHANNEL_A)
882                 dp_aux_irq_handler(dev);
883
884         if (de_iir & DE_GSE)
885                 intel_opregion_gse_intr(dev);
886
887         if (de_iir & DE_PIPEA_VBLANK)
888                 drm_handle_vblank(dev, 0);
889
890         if (de_iir & DE_PIPEB_VBLANK)
891                 drm_handle_vblank(dev, 1);
892
893         if (de_iir & DE_PLANEA_FLIP_DONE) {
894                 intel_prepare_page_flip(dev, 0);
895                 intel_finish_page_flip_plane(dev, 0);
896         }
897
898         if (de_iir & DE_PLANEB_FLIP_DONE) {
899                 intel_prepare_page_flip(dev, 1);
900                 intel_finish_page_flip_plane(dev, 1);
901         }
902
903         /* check event from PCH */
904         if (de_iir & DE_PCH_EVENT) {
905                 u32 pch_iir = I915_READ(SDEIIR);
906
907                 if (HAS_PCH_CPT(dev))
908                         cpt_irq_handler(dev, pch_iir);
909                 else
910                         ibx_irq_handler(dev, pch_iir);
911
912                 /* should clear PCH hotplug event before clear CPU irq */
913                 I915_WRITE(SDEIIR, pch_iir);
914         }
915
916         if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
917                 ironlake_handle_rps_change(dev);
918
919         if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
920                 gen6_queue_rps_work(dev_priv, pm_iir);
921
922         I915_WRITE(GTIIR, gt_iir);
923         I915_WRITE(DEIIR, de_iir);
924         I915_WRITE(GEN6_PMIIR, pm_iir);
925
926 done:
927         I915_WRITE(DEIER, de_ier);
928         POSTING_READ(DEIER);
929         I915_WRITE(SDEIER, sde_ier);
930         POSTING_READ(SDEIER);
931
932         return ret;
933 }
934
935 /**
936  * i915_error_work_func - do process context error handling work
937  * @work: work struct
938  *
939  * Fire an error uevent so userspace can see that a hang or error
940  * was detected.
941  */
942 static void i915_error_work_func(struct work_struct *work)
943 {
944         struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
945                                                     work);
946         drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
947                                                     gpu_error);
948         struct drm_device *dev = dev_priv->dev;
949         struct intel_ring_buffer *ring;
950         char *error_event[] = { "ERROR=1", NULL };
951         char *reset_event[] = { "RESET=1", NULL };
952         char *reset_done_event[] = { "ERROR=0", NULL };
953         int i, ret;
954
955         kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
956
957         /*
958          * Note that there's only one work item which does gpu resets, so we
959          * need not worry about concurrent gpu resets potentially incrementing
960          * error->reset_counter twice. We only need to take care of another
961          * racing irq/hangcheck declaring the gpu dead for a second time. A
962          * quick check for that is good enough: schedule_work ensures the
963          * correct ordering between hang detection and this work item, and since
964          * the reset in-progress bit is only ever set by code outside of this
965          * work we don't need to worry about any other races.
966          */
967         if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
968                 DRM_DEBUG_DRIVER("resetting chip\n");
969                 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
970                                    reset_event);
971
972                 ret = i915_reset(dev);
973
974                 if (ret == 0) {
975                         /*
976                          * After all the gem state is reset, increment the reset
977                          * counter and wake up everyone waiting for the reset to
978                          * complete.
979                          *
980                          * Since unlock operations are a one-sided barrier only,
981                          * we need to insert a barrier here to order any seqno
982                          * updates before
983                          * the counter increment.
984                          */
985                         smp_mb__before_atomic_inc();
986                         atomic_inc(&dev_priv->gpu_error.reset_counter);
987
988                         kobject_uevent_env(&dev->primary->kdev.kobj,
989                                            KOBJ_CHANGE, reset_done_event);
990                 } else {
991                         atomic_set(&error->reset_counter, I915_WEDGED);
992                 }
993
994                 for_each_ring(ring, dev_priv, i)
995                         wake_up_all(&ring->irq_queue);
996
997                 intel_display_handle_reset(dev);
998
999                 wake_up_all(&dev_priv->gpu_error.reset_queue);
1000         }
1001 }
1002
1003 /* NB: please notice the memset */
1004 static void i915_get_extra_instdone(struct drm_device *dev,
1005                                     uint32_t *instdone)
1006 {
1007         struct drm_i915_private *dev_priv = dev->dev_private;
1008         memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1009
1010         switch(INTEL_INFO(dev)->gen) {
1011         case 2:
1012         case 3:
1013                 instdone[0] = I915_READ(INSTDONE);
1014                 break;
1015         case 4:
1016         case 5:
1017         case 6:
1018                 instdone[0] = I915_READ(INSTDONE_I965);
1019                 instdone[1] = I915_READ(INSTDONE1);
1020                 break;
1021         default:
1022                 WARN_ONCE(1, "Unsupported platform\n");
1023         case 7:
1024                 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1025                 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1026                 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1027                 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1028                 break;
1029         }
1030 }
1031
1032 #ifdef CONFIG_DEBUG_FS
1033 static struct drm_i915_error_object *
1034 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1035                                struct drm_i915_gem_object *src,
1036                                const int num_pages)
1037 {
1038         struct drm_i915_error_object *dst;
1039         int i;
1040         u32 reloc_offset;
1041
1042         if (src == NULL || src->pages == NULL)
1043                 return NULL;
1044
1045         dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1046         if (dst == NULL)
1047                 return NULL;
1048
1049         reloc_offset = src->gtt_offset;
1050         for (i = 0; i < num_pages; i++) {
1051                 unsigned long flags;
1052                 void *d;
1053
1054                 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1055                 if (d == NULL)
1056                         goto unwind;
1057
1058                 local_irq_save(flags);
1059                 if (reloc_offset < dev_priv->gtt.mappable_end &&
1060                     src->has_global_gtt_mapping) {
1061                         void __iomem *s;
1062
1063                         /* Simply ignore tiling or any overlapping fence.
1064                          * It's part of the error state, and this hopefully
1065                          * captures what the GPU read.
1066                          */
1067
1068                         s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1069                                                      reloc_offset);
1070                         memcpy_fromio(d, s, PAGE_SIZE);
1071                         io_mapping_unmap_atomic(s);
1072                 } else if (src->stolen) {
1073                         unsigned long offset;
1074
1075                         offset = dev_priv->mm.stolen_base;
1076                         offset += src->stolen->start;
1077                         offset += i << PAGE_SHIFT;
1078
1079                         memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1080                 } else {
1081                         struct page *page;
1082                         void *s;
1083
1084                         page = i915_gem_object_get_page(src, i);
1085
1086                         drm_clflush_pages(&page, 1);
1087
1088                         s = kmap_atomic(page);
1089                         memcpy(d, s, PAGE_SIZE);
1090                         kunmap_atomic(s);
1091
1092                         drm_clflush_pages(&page, 1);
1093                 }
1094                 local_irq_restore(flags);
1095
1096                 dst->pages[i] = d;
1097
1098                 reloc_offset += PAGE_SIZE;
1099         }
1100         dst->page_count = num_pages;
1101         dst->gtt_offset = src->gtt_offset;
1102
1103         return dst;
1104
1105 unwind:
1106         while (i--)
1107                 kfree(dst->pages[i]);
1108         kfree(dst);
1109         return NULL;
1110 }
1111 #define i915_error_object_create(dev_priv, src) \
1112         i915_error_object_create_sized((dev_priv), (src), \
1113                                        (src)->base.size>>PAGE_SHIFT)
1114
1115 static void
1116 i915_error_object_free(struct drm_i915_error_object *obj)
1117 {
1118         int page;
1119
1120         if (obj == NULL)
1121                 return;
1122
1123         for (page = 0; page < obj->page_count; page++)
1124                 kfree(obj->pages[page]);
1125
1126         kfree(obj);
1127 }
1128
1129 void
1130 i915_error_state_free(struct kref *error_ref)
1131 {
1132         struct drm_i915_error_state *error = container_of(error_ref,
1133                                                           typeof(*error), ref);
1134         int i;
1135
1136         for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1137                 i915_error_object_free(error->ring[i].batchbuffer);
1138                 i915_error_object_free(error->ring[i].ringbuffer);
1139                 kfree(error->ring[i].requests);
1140         }
1141
1142         kfree(error->active_bo);
1143         kfree(error->overlay);
1144         kfree(error);
1145 }
1146 static void capture_bo(struct drm_i915_error_buffer *err,
1147                        struct drm_i915_gem_object *obj)
1148 {
1149         err->size = obj->base.size;
1150         err->name = obj->base.name;
1151         err->rseqno = obj->last_read_seqno;
1152         err->wseqno = obj->last_write_seqno;
1153         err->gtt_offset = obj->gtt_offset;
1154         err->read_domains = obj->base.read_domains;
1155         err->write_domain = obj->base.write_domain;
1156         err->fence_reg = obj->fence_reg;
1157         err->pinned = 0;
1158         if (obj->pin_count > 0)
1159                 err->pinned = 1;
1160         if (obj->user_pin_count > 0)
1161                 err->pinned = -1;
1162         err->tiling = obj->tiling_mode;
1163         err->dirty = obj->dirty;
1164         err->purgeable = obj->madv != I915_MADV_WILLNEED;
1165         err->ring = obj->ring ? obj->ring->id : -1;
1166         err->cache_level = obj->cache_level;
1167 }
1168
1169 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1170                              int count, struct list_head *head)
1171 {
1172         struct drm_i915_gem_object *obj;
1173         int i = 0;
1174
1175         list_for_each_entry(obj, head, mm_list) {
1176                 capture_bo(err++, obj);
1177                 if (++i == count)
1178                         break;
1179         }
1180
1181         return i;
1182 }
1183
1184 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1185                              int count, struct list_head *head)
1186 {
1187         struct drm_i915_gem_object *obj;
1188         int i = 0;
1189
1190         list_for_each_entry(obj, head, gtt_list) {
1191                 if (obj->pin_count == 0)
1192                         continue;
1193
1194                 capture_bo(err++, obj);
1195                 if (++i == count)
1196                         break;
1197         }
1198
1199         return i;
1200 }
1201
1202 static void i915_gem_record_fences(struct drm_device *dev,
1203                                    struct drm_i915_error_state *error)
1204 {
1205         struct drm_i915_private *dev_priv = dev->dev_private;
1206         int i;
1207
1208         /* Fences */
1209         switch (INTEL_INFO(dev)->gen) {
1210         case 7:
1211         case 6:
1212                 for (i = 0; i < 16; i++)
1213                         error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1214                 break;
1215         case 5:
1216         case 4:
1217                 for (i = 0; i < 16; i++)
1218                         error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1219                 break;
1220         case 3:
1221                 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1222                         for (i = 0; i < 8; i++)
1223                                 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1224         case 2:
1225                 for (i = 0; i < 8; i++)
1226                         error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1227                 break;
1228
1229         default:
1230                 BUG();
1231         }
1232 }
1233
1234 static struct drm_i915_error_object *
1235 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1236                              struct intel_ring_buffer *ring)
1237 {
1238         struct drm_i915_gem_object *obj;
1239         u32 seqno;
1240
1241         if (!ring->get_seqno)
1242                 return NULL;
1243
1244         if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1245                 u32 acthd = I915_READ(ACTHD);
1246
1247                 if (WARN_ON(ring->id != RCS))
1248                         return NULL;
1249
1250                 obj = ring->private;
1251                 if (acthd >= obj->gtt_offset &&
1252                     acthd < obj->gtt_offset + obj->base.size)
1253                         return i915_error_object_create(dev_priv, obj);
1254         }
1255
1256         seqno = ring->get_seqno(ring, false);
1257         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1258                 if (obj->ring != ring)
1259                         continue;
1260
1261                 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1262                         continue;
1263
1264                 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1265                         continue;
1266
1267                 /* We need to copy these to an anonymous buffer as the simplest
1268                  * method to avoid being overwritten by userspace.
1269                  */
1270                 return i915_error_object_create(dev_priv, obj);
1271         }
1272
1273         return NULL;
1274 }
1275
1276 static void i915_record_ring_state(struct drm_device *dev,
1277                                    struct drm_i915_error_state *error,
1278                                    struct intel_ring_buffer *ring)
1279 {
1280         struct drm_i915_private *dev_priv = dev->dev_private;
1281
1282         if (INTEL_INFO(dev)->gen >= 6) {
1283                 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1284                 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1285                 error->semaphore_mboxes[ring->id][0]
1286                         = I915_READ(RING_SYNC_0(ring->mmio_base));
1287                 error->semaphore_mboxes[ring->id][1]
1288                         = I915_READ(RING_SYNC_1(ring->mmio_base));
1289                 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1290                 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1291         }
1292
1293         if (INTEL_INFO(dev)->gen >= 4) {
1294                 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1295                 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1296                 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1297                 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1298                 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1299                 if (ring->id == RCS)
1300                         error->bbaddr = I915_READ64(BB_ADDR);
1301         } else {
1302                 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1303                 error->ipeir[ring->id] = I915_READ(IPEIR);
1304                 error->ipehr[ring->id] = I915_READ(IPEHR);
1305                 error->instdone[ring->id] = I915_READ(INSTDONE);
1306         }
1307
1308         error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1309         error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1310         error->seqno[ring->id] = ring->get_seqno(ring, false);
1311         error->acthd[ring->id] = intel_ring_get_active_head(ring);
1312         error->head[ring->id] = I915_READ_HEAD(ring);
1313         error->tail[ring->id] = I915_READ_TAIL(ring);
1314         error->ctl[ring->id] = I915_READ_CTL(ring);
1315
1316         error->cpu_ring_head[ring->id] = ring->head;
1317         error->cpu_ring_tail[ring->id] = ring->tail;
1318 }
1319
1320
1321 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1322                                            struct drm_i915_error_state *error,
1323                                            struct drm_i915_error_ring *ering)
1324 {
1325         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1326         struct drm_i915_gem_object *obj;
1327
1328         /* Currently render ring is the only HW context user */
1329         if (ring->id != RCS || !error->ccid)
1330                 return;
1331
1332         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
1333                 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1334                         ering->ctx = i915_error_object_create_sized(dev_priv,
1335                                                                     obj, 1);
1336                 }
1337         }
1338 }
1339
1340 static void i915_gem_record_rings(struct drm_device *dev,
1341                                   struct drm_i915_error_state *error)
1342 {
1343         struct drm_i915_private *dev_priv = dev->dev_private;
1344         struct intel_ring_buffer *ring;
1345         struct drm_i915_gem_request *request;
1346         int i, count;
1347
1348         for_each_ring(ring, dev_priv, i) {
1349                 i915_record_ring_state(dev, error, ring);
1350
1351                 error->ring[i].batchbuffer =
1352                         i915_error_first_batchbuffer(dev_priv, ring);
1353
1354                 error->ring[i].ringbuffer =
1355                         i915_error_object_create(dev_priv, ring->obj);
1356
1357
1358                 i915_gem_record_active_context(ring, error, &error->ring[i]);
1359
1360                 count = 0;
1361                 list_for_each_entry(request, &ring->request_list, list)
1362                         count++;
1363
1364                 error->ring[i].num_requests = count;
1365                 error->ring[i].requests =
1366                         kmalloc(count*sizeof(struct drm_i915_error_request),
1367                                 GFP_ATOMIC);
1368                 if (error->ring[i].requests == NULL) {
1369                         error->ring[i].num_requests = 0;
1370                         continue;
1371                 }
1372
1373                 count = 0;
1374                 list_for_each_entry(request, &ring->request_list, list) {
1375                         struct drm_i915_error_request *erq;
1376
1377                         erq = &error->ring[i].requests[count++];
1378                         erq->seqno = request->seqno;
1379                         erq->jiffies = request->emitted_jiffies;
1380                         erq->tail = request->tail;
1381                 }
1382         }
1383 }
1384
1385 /**
1386  * i915_capture_error_state - capture an error record for later analysis
1387  * @dev: drm device
1388  *
1389  * Should be called when an error is detected (either a hang or an error
1390  * interrupt) to capture error state from the time of the error.  Fills
1391  * out a structure which becomes available in debugfs for user level tools
1392  * to pick up.
1393  */
1394 static void i915_capture_error_state(struct drm_device *dev)
1395 {
1396         struct drm_i915_private *dev_priv = dev->dev_private;
1397         struct drm_i915_gem_object *obj;
1398         struct drm_i915_error_state *error;
1399         unsigned long flags;
1400         int i, pipe;
1401
1402         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1403         error = dev_priv->gpu_error.first_error;
1404         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1405         if (error)
1406                 return;
1407
1408         /* Account for pipe specific data like PIPE*STAT */
1409         error = kzalloc(sizeof(*error), GFP_ATOMIC);
1410         if (!error) {
1411                 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1412                 return;
1413         }
1414
1415         DRM_INFO("capturing error event; look for more information in "
1416                  "/sys/kernel/debug/dri/%d/i915_error_state\n",
1417                  dev->primary->index);
1418
1419         kref_init(&error->ref);
1420         error->eir = I915_READ(EIR);
1421         error->pgtbl_er = I915_READ(PGTBL_ER);
1422         if (HAS_HW_CONTEXTS(dev))
1423                 error->ccid = I915_READ(CCID);
1424
1425         if (HAS_PCH_SPLIT(dev))
1426                 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1427         else if (IS_VALLEYVIEW(dev))
1428                 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1429         else if (IS_GEN2(dev))
1430                 error->ier = I915_READ16(IER);
1431         else
1432                 error->ier = I915_READ(IER);
1433
1434         if (INTEL_INFO(dev)->gen >= 6)
1435                 error->derrmr = I915_READ(DERRMR);
1436
1437         if (IS_VALLEYVIEW(dev))
1438                 error->forcewake = I915_READ(FORCEWAKE_VLV);
1439         else if (INTEL_INFO(dev)->gen >= 7)
1440                 error->forcewake = I915_READ(FORCEWAKE_MT);
1441         else if (INTEL_INFO(dev)->gen == 6)
1442                 error->forcewake = I915_READ(FORCEWAKE);
1443
1444         if (!HAS_PCH_SPLIT(dev))
1445                 for_each_pipe(pipe)
1446                         error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1447
1448         if (INTEL_INFO(dev)->gen >= 6) {
1449                 error->error = I915_READ(ERROR_GEN6);
1450                 error->done_reg = I915_READ(DONE_REG);
1451         }
1452
1453         if (INTEL_INFO(dev)->gen == 7)
1454                 error->err_int = I915_READ(GEN7_ERR_INT);
1455
1456         i915_get_extra_instdone(dev, error->extra_instdone);
1457
1458         i915_gem_record_fences(dev, error);
1459         i915_gem_record_rings(dev, error);
1460
1461         /* Record buffers on the active and pinned lists. */
1462         error->active_bo = NULL;
1463         error->pinned_bo = NULL;
1464
1465         i = 0;
1466         list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1467                 i++;
1468         error->active_bo_count = i;
1469         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1470                 if (obj->pin_count)
1471                         i++;
1472         error->pinned_bo_count = i - error->active_bo_count;
1473
1474         error->active_bo = NULL;
1475         error->pinned_bo = NULL;
1476         if (i) {
1477                 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1478                                            GFP_ATOMIC);
1479                 if (error->active_bo)
1480                         error->pinned_bo =
1481                                 error->active_bo + error->active_bo_count;
1482         }
1483
1484         if (error->active_bo)
1485                 error->active_bo_count =
1486                         capture_active_bo(error->active_bo,
1487                                           error->active_bo_count,
1488                                           &dev_priv->mm.active_list);
1489
1490         if (error->pinned_bo)
1491                 error->pinned_bo_count =
1492                         capture_pinned_bo(error->pinned_bo,
1493                                           error->pinned_bo_count,
1494                                           &dev_priv->mm.bound_list);
1495
1496         do_gettimeofday(&error->time);
1497
1498         error->overlay = intel_overlay_capture_error_state(dev);
1499         error->display = intel_display_capture_error_state(dev);
1500
1501         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1502         if (dev_priv->gpu_error.first_error == NULL) {
1503                 dev_priv->gpu_error.first_error = error;
1504                 error = NULL;
1505         }
1506         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1507
1508         if (error)
1509                 i915_error_state_free(&error->ref);
1510 }
1511
1512 void i915_destroy_error_state(struct drm_device *dev)
1513 {
1514         struct drm_i915_private *dev_priv = dev->dev_private;
1515         struct drm_i915_error_state *error;
1516         unsigned long flags;
1517
1518         spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1519         error = dev_priv->gpu_error.first_error;
1520         dev_priv->gpu_error.first_error = NULL;
1521         spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1522
1523         if (error)
1524                 kref_put(&error->ref, i915_error_state_free);
1525 }
1526 #else
1527 #define i915_capture_error_state(x)
1528 #endif
1529
1530 static void i915_report_and_clear_eir(struct drm_device *dev)
1531 {
1532         struct drm_i915_private *dev_priv = dev->dev_private;
1533         uint32_t instdone[I915_NUM_INSTDONE_REG];
1534         u32 eir = I915_READ(EIR);
1535         int pipe, i;
1536
1537         if (!eir)
1538                 return;
1539
1540         pr_err("render error detected, EIR: 0x%08x\n", eir);
1541
1542         i915_get_extra_instdone(dev, instdone);
1543
1544         if (IS_G4X(dev)) {
1545                 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1546                         u32 ipeir = I915_READ(IPEIR_I965);
1547
1548                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1549                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1550                         for (i = 0; i < ARRAY_SIZE(instdone); i++)
1551                                 pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1552                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1553                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1554                         I915_WRITE(IPEIR_I965, ipeir);
1555                         POSTING_READ(IPEIR_I965);
1556                 }
1557                 if (eir & GM45_ERROR_PAGE_TABLE) {
1558                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1559                         pr_err("page table error\n");
1560                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1561                         I915_WRITE(PGTBL_ER, pgtbl_err);
1562                         POSTING_READ(PGTBL_ER);
1563                 }
1564         }
1565
1566         if (!IS_GEN2(dev)) {
1567                 if (eir & I915_ERROR_PAGE_TABLE) {
1568                         u32 pgtbl_err = I915_READ(PGTBL_ER);
1569                         pr_err("page table error\n");
1570                         pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1571                         I915_WRITE(PGTBL_ER, pgtbl_err);
1572                         POSTING_READ(PGTBL_ER);
1573                 }
1574         }
1575
1576         if (eir & I915_ERROR_MEMORY_REFRESH) {
1577                 pr_err("memory refresh error:\n");
1578                 for_each_pipe(pipe)
1579                         pr_err("pipe %c stat: 0x%08x\n",
1580                                pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1581                 /* pipestat has already been acked */
1582         }
1583         if (eir & I915_ERROR_INSTRUCTION) {
1584                 pr_err("instruction error\n");
1585                 pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1586                 for (i = 0; i < ARRAY_SIZE(instdone); i++)
1587                         pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1588                 if (INTEL_INFO(dev)->gen < 4) {
1589                         u32 ipeir = I915_READ(IPEIR);
1590
1591                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1592                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1593                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1594                         I915_WRITE(IPEIR, ipeir);
1595                         POSTING_READ(IPEIR);
1596                 } else {
1597                         u32 ipeir = I915_READ(IPEIR_I965);
1598
1599                         pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1600                         pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1601                         pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1602                         pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1603                         I915_WRITE(IPEIR_I965, ipeir);
1604                         POSTING_READ(IPEIR_I965);
1605                 }
1606         }
1607
1608         I915_WRITE(EIR, eir);
1609         POSTING_READ(EIR);
1610         eir = I915_READ(EIR);
1611         if (eir) {
1612                 /*
1613                  * some errors might have become stuck,
1614                  * mask them.
1615                  */
1616                 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1617                 I915_WRITE(EMR, I915_READ(EMR) | eir);
1618                 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1619         }
1620 }
1621
1622 /**
1623  * i915_handle_error - handle an error interrupt
1624  * @dev: drm device
1625  *
1626  * Do some basic checking of regsiter state at error interrupt time and
1627  * dump it to the syslog.  Also call i915_capture_error_state() to make
1628  * sure we get a record and make it available in debugfs.  Fire a uevent
1629  * so userspace knows something bad happened (should trigger collection
1630  * of a ring dump etc.).
1631  */
1632 void i915_handle_error(struct drm_device *dev, bool wedged)
1633 {
1634         struct drm_i915_private *dev_priv = dev->dev_private;
1635         struct intel_ring_buffer *ring;
1636         int i;
1637
1638         i915_capture_error_state(dev);
1639         i915_report_and_clear_eir(dev);
1640
1641         if (wedged) {
1642                 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1643                                 &dev_priv->gpu_error.reset_counter);
1644
1645                 /*
1646                  * Wakeup waiting processes so that the reset work item
1647                  * doesn't deadlock trying to grab various locks.
1648                  */
1649                 for_each_ring(ring, dev_priv, i)
1650                         wake_up_all(&ring->irq_queue);
1651         }
1652
1653         queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
1654 }
1655
1656 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1657 {
1658         drm_i915_private_t *dev_priv = dev->dev_private;
1659         struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1660         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1661         struct drm_i915_gem_object *obj;
1662         struct intel_unpin_work *work;
1663         unsigned long flags;
1664         bool stall_detected;
1665
1666         /* Ignore early vblank irqs */
1667         if (intel_crtc == NULL)
1668                 return;
1669
1670         spin_lock_irqsave(&dev->event_lock, flags);
1671         work = intel_crtc->unpin_work;
1672
1673         if (work == NULL ||
1674             atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1675             !work->enable_stall_check) {
1676                 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1677                 spin_unlock_irqrestore(&dev->event_lock, flags);
1678                 return;
1679         }
1680
1681         /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1682         obj = work->pending_flip_obj;
1683         if (INTEL_INFO(dev)->gen >= 4) {
1684                 int dspsurf = DSPSURF(intel_crtc->plane);
1685                 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1686                                         obj->gtt_offset;
1687         } else {
1688                 int dspaddr = DSPADDR(intel_crtc->plane);
1689                 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1690                                                         crtc->y * crtc->fb->pitches[0] +
1691                                                         crtc->x * crtc->fb->bits_per_pixel/8);
1692         }
1693
1694         spin_unlock_irqrestore(&dev->event_lock, flags);
1695
1696         if (stall_detected) {
1697                 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1698                 intel_prepare_page_flip(dev, intel_crtc->plane);
1699         }
1700 }
1701
1702 /* Called from drm generic code, passed 'crtc' which
1703  * we use as a pipe index
1704  */
1705 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1706 {
1707         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1708         unsigned long irqflags;
1709
1710         if (!i915_pipe_enabled(dev, pipe))
1711                 return -EINVAL;
1712
1713         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1714         if (INTEL_INFO(dev)->gen >= 4)
1715                 i915_enable_pipestat(dev_priv, pipe,
1716                                      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1717         else
1718                 i915_enable_pipestat(dev_priv, pipe,
1719                                      PIPE_VBLANK_INTERRUPT_ENABLE);
1720
1721         /* maintain vblank delivery even in deep C-states */
1722         if (dev_priv->info->gen == 3)
1723                 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1724         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1725
1726         return 0;
1727 }
1728
1729 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1730 {
1731         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1732         unsigned long irqflags;
1733
1734         if (!i915_pipe_enabled(dev, pipe))
1735                 return -EINVAL;
1736
1737         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1738         ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1739                                     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1740         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1741
1742         return 0;
1743 }
1744
1745 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1746 {
1747         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1748         unsigned long irqflags;
1749
1750         if (!i915_pipe_enabled(dev, pipe))
1751                 return -EINVAL;
1752
1753         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1754         ironlake_enable_display_irq(dev_priv,
1755                                     DE_PIPEA_VBLANK_IVB << (5 * pipe));
1756         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1757
1758         return 0;
1759 }
1760
1761 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1762 {
1763         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1764         unsigned long irqflags;
1765         u32 imr;
1766
1767         if (!i915_pipe_enabled(dev, pipe))
1768                 return -EINVAL;
1769
1770         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1771         imr = I915_READ(VLV_IMR);
1772         if (pipe == 0)
1773                 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1774         else
1775                 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1776         I915_WRITE(VLV_IMR, imr);
1777         i915_enable_pipestat(dev_priv, pipe,
1778                              PIPE_START_VBLANK_INTERRUPT_ENABLE);
1779         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1780
1781         return 0;
1782 }
1783
1784 /* Called from drm generic code, passed 'crtc' which
1785  * we use as a pipe index
1786  */
1787 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1788 {
1789         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1790         unsigned long irqflags;
1791
1792         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1793         if (dev_priv->info->gen == 3)
1794                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1795
1796         i915_disable_pipestat(dev_priv, pipe,
1797                               PIPE_VBLANK_INTERRUPT_ENABLE |
1798                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1799         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1800 }
1801
1802 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1803 {
1804         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1805         unsigned long irqflags;
1806
1807         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1808         ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1809                                      DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1810         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1811 }
1812
1813 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1814 {
1815         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1816         unsigned long irqflags;
1817
1818         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1819         ironlake_disable_display_irq(dev_priv,
1820                                      DE_PIPEA_VBLANK_IVB << (pipe * 5));
1821         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1822 }
1823
1824 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1825 {
1826         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1827         unsigned long irqflags;
1828         u32 imr;
1829
1830         spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1831         i915_disable_pipestat(dev_priv, pipe,
1832                               PIPE_START_VBLANK_INTERRUPT_ENABLE);
1833         imr = I915_READ(VLV_IMR);
1834         if (pipe == 0)
1835                 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1836         else
1837                 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1838         I915_WRITE(VLV_IMR, imr);
1839         spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1840 }
1841
1842 static u32
1843 ring_last_seqno(struct intel_ring_buffer *ring)
1844 {
1845         return list_entry(ring->request_list.prev,
1846                           struct drm_i915_gem_request, list)->seqno;
1847 }
1848
1849 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1850 {
1851         if (list_empty(&ring->request_list) ||
1852             i915_seqno_passed(ring->get_seqno(ring, false),
1853                               ring_last_seqno(ring))) {
1854                 /* Issue a wake-up to catch stuck h/w. */
1855                 if (waitqueue_active(&ring->irq_queue)) {
1856                         DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1857                                   ring->name);
1858                         wake_up_all(&ring->irq_queue);
1859                         *err = true;
1860                 }
1861                 return true;
1862         }
1863         return false;
1864 }
1865
1866 static bool semaphore_passed(struct intel_ring_buffer *ring)
1867 {
1868         struct drm_i915_private *dev_priv = ring->dev->dev_private;
1869         u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
1870         struct intel_ring_buffer *signaller;
1871         u32 cmd, ipehr, acthd_min;
1872
1873         ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
1874         if ((ipehr & ~(0x3 << 16)) !=
1875             (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
1876                 return false;
1877
1878         /* ACTHD is likely pointing to the dword after the actual command,
1879          * so scan backwards until we find the MBOX.
1880          */
1881         acthd_min = max((int)acthd - 3 * 4, 0);
1882         do {
1883                 cmd = ioread32(ring->virtual_start + acthd);
1884                 if (cmd == ipehr)
1885                         break;
1886
1887                 acthd -= 4;
1888                 if (acthd < acthd_min)
1889                         return false;
1890         } while (1);
1891
1892         signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
1893         return i915_seqno_passed(signaller->get_seqno(signaller, false),
1894                                  ioread32(ring->virtual_start+acthd+4)+1);
1895 }
1896
1897 static bool kick_ring(struct intel_ring_buffer *ring)
1898 {
1899         struct drm_device *dev = ring->dev;
1900         struct drm_i915_private *dev_priv = dev->dev_private;
1901         u32 tmp = I915_READ_CTL(ring);
1902         if (tmp & RING_WAIT) {
1903                 DRM_ERROR("Kicking stuck wait on %s\n",
1904                           ring->name);
1905                 I915_WRITE_CTL(ring, tmp);
1906                 return true;
1907         }
1908
1909         if (INTEL_INFO(dev)->gen >= 6 &&
1910             tmp & RING_WAIT_SEMAPHORE &&
1911             semaphore_passed(ring)) {
1912                 DRM_ERROR("Kicking stuck semaphore on %s\n",
1913                           ring->name);
1914                 I915_WRITE_CTL(ring, tmp);
1915                 return true;
1916         }
1917         return false;
1918 }
1919
1920 static bool i915_hangcheck_hung(struct drm_device *dev)
1921 {
1922         drm_i915_private_t *dev_priv = dev->dev_private;
1923
1924         if (dev_priv->gpu_error.hangcheck_count++ > 1) {
1925                 bool hung = true;
1926
1927                 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1928                 i915_handle_error(dev, true);
1929
1930                 if (!IS_GEN2(dev)) {
1931                         struct intel_ring_buffer *ring;
1932                         int i;
1933
1934                         /* Is the chip hanging on a WAIT_FOR_EVENT?
1935                          * If so we can simply poke the RB_WAIT bit
1936                          * and break the hang. This should work on
1937                          * all but the second generation chipsets.
1938                          */
1939                         for_each_ring(ring, dev_priv, i)
1940                                 hung &= !kick_ring(ring);
1941                 }
1942
1943                 return hung;
1944         }
1945
1946         return false;
1947 }
1948
1949 /**
1950  * This is called when the chip hasn't reported back with completed
1951  * batchbuffers in a long time. The first time this is called we simply record
1952  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1953  * again, we assume the chip is wedged and try to fix it.
1954  */
1955 void i915_hangcheck_elapsed(unsigned long data)
1956 {
1957         struct drm_device *dev = (struct drm_device *)data;
1958         drm_i915_private_t *dev_priv = dev->dev_private;
1959         uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
1960         struct intel_ring_buffer *ring;
1961         bool err = false, idle;
1962         int i;
1963
1964         if (!i915_enable_hangcheck)
1965                 return;
1966
1967         memset(acthd, 0, sizeof(acthd));
1968         idle = true;
1969         for_each_ring(ring, dev_priv, i) {
1970             idle &= i915_hangcheck_ring_idle(ring, &err);
1971             acthd[i] = intel_ring_get_active_head(ring);
1972         }
1973
1974         /* If all work is done then ACTHD clearly hasn't advanced. */
1975         if (idle) {
1976                 if (err) {
1977                         if (i915_hangcheck_hung(dev))
1978                                 return;
1979
1980                         goto repeat;
1981                 }
1982
1983                 dev_priv->gpu_error.hangcheck_count = 0;
1984                 return;
1985         }
1986
1987         i915_get_extra_instdone(dev, instdone);
1988         if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
1989                    sizeof(acthd)) == 0 &&
1990             memcmp(dev_priv->gpu_error.prev_instdone, instdone,
1991                    sizeof(instdone)) == 0) {
1992                 if (i915_hangcheck_hung(dev))
1993                         return;
1994         } else {
1995                 dev_priv->gpu_error.hangcheck_count = 0;
1996
1997                 memcpy(dev_priv->gpu_error.last_acthd, acthd,
1998                        sizeof(acthd));
1999                 memcpy(dev_priv->gpu_error.prev_instdone, instdone,
2000                        sizeof(instdone));
2001         }
2002
2003 repeat:
2004         /* Reset timer case chip hangs without another request being added */
2005         mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2006                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2007 }
2008
2009 /* drm_dma.h hooks
2010 */
2011 static void ironlake_irq_preinstall(struct drm_device *dev)
2012 {
2013         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2014
2015         atomic_set(&dev_priv->irq_received, 0);
2016
2017         I915_WRITE(HWSTAM, 0xeffe);
2018
2019         /* XXX hotplug from PCH */
2020
2021         I915_WRITE(DEIMR, 0xffffffff);
2022         I915_WRITE(DEIER, 0x0);
2023         POSTING_READ(DEIER);
2024
2025         /* and GT */
2026         I915_WRITE(GTIMR, 0xffffffff);
2027         I915_WRITE(GTIER, 0x0);
2028         POSTING_READ(GTIER);
2029
2030         /* south display irq */
2031         I915_WRITE(SDEIMR, 0xffffffff);
2032         /*
2033          * SDEIER is also touched by the interrupt handler to work around missed
2034          * PCH interrupts. Hence we can't update it after the interrupt handler
2035          * is enabled - instead we unconditionally enable all PCH interrupt
2036          * sources here, but then only unmask them as needed with SDEIMR.
2037          */
2038         I915_WRITE(SDEIER, 0xffffffff);
2039         POSTING_READ(SDEIER);
2040 }
2041
2042 static void valleyview_irq_preinstall(struct drm_device *dev)
2043 {
2044         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2045         int pipe;
2046
2047         atomic_set(&dev_priv->irq_received, 0);
2048
2049         /* VLV magic */
2050         I915_WRITE(VLV_IMR, 0);
2051         I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2052         I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2053         I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2054
2055         /* and GT */
2056         I915_WRITE(GTIIR, I915_READ(GTIIR));
2057         I915_WRITE(GTIIR, I915_READ(GTIIR));
2058         I915_WRITE(GTIMR, 0xffffffff);
2059         I915_WRITE(GTIER, 0x0);
2060         POSTING_READ(GTIER);
2061
2062         I915_WRITE(DPINVGTT, 0xff);
2063
2064         I915_WRITE(PORT_HOTPLUG_EN, 0);
2065         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2066         for_each_pipe(pipe)
2067                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2068         I915_WRITE(VLV_IIR, 0xffffffff);
2069         I915_WRITE(VLV_IMR, 0xffffffff);
2070         I915_WRITE(VLV_IER, 0x0);
2071         POSTING_READ(VLV_IER);
2072 }
2073
2074 static void ibx_hpd_irq_setup(struct drm_device *dev)
2075 {
2076         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2077         struct drm_mode_config *mode_config = &dev->mode_config;
2078         struct intel_encoder *intel_encoder;
2079         u32 mask = ~I915_READ(SDEIMR);
2080         u32 hotplug;
2081
2082         if (HAS_PCH_IBX(dev)) {
2083                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2084                         mask |= hpd_ibx[intel_encoder->hpd_pin];
2085         } else {
2086                 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2087                         mask |= hpd_cpt[intel_encoder->hpd_pin];
2088         }
2089
2090         I915_WRITE(SDEIMR, ~mask);
2091
2092         /*
2093          * Enable digital hotplug on the PCH, and configure the DP short pulse
2094          * duration to 2ms (which is the minimum in the Display Port spec)
2095          *
2096          * This register is the same on all known PCH chips.
2097          */
2098         hotplug = I915_READ(PCH_PORT_HOTPLUG);
2099         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2100         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2101         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2102         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2103         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2104 }
2105
2106 static void ibx_irq_postinstall(struct drm_device *dev)
2107 {
2108         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2109         u32 mask;
2110
2111         if (HAS_PCH_IBX(dev))
2112                 mask = SDE_GMBUS | SDE_AUX_MASK;
2113         else
2114                 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
2115         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2116         I915_WRITE(SDEIMR, ~mask);
2117 }
2118
2119 static int ironlake_irq_postinstall(struct drm_device *dev)
2120 {
2121         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2122         /* enable kind of interrupts always enabled */
2123         u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2124                            DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2125                            DE_AUX_CHANNEL_A;
2126         u32 render_irqs;
2127
2128         dev_priv->irq_mask = ~display_mask;
2129
2130         /* should always can generate irq */
2131         I915_WRITE(DEIIR, I915_READ(DEIIR));
2132         I915_WRITE(DEIMR, dev_priv->irq_mask);
2133         I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
2134         POSTING_READ(DEIER);
2135
2136         dev_priv->gt_irq_mask = ~0;
2137
2138         I915_WRITE(GTIIR, I915_READ(GTIIR));
2139         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2140
2141         if (IS_GEN6(dev))
2142                 render_irqs =
2143                         GT_USER_INTERRUPT |
2144                         GEN6_BSD_USER_INTERRUPT |
2145                         GEN6_BLITTER_USER_INTERRUPT;
2146         else
2147                 render_irqs =
2148                         GT_USER_INTERRUPT |
2149                         GT_PIPE_NOTIFY |
2150                         GT_BSD_USER_INTERRUPT;
2151         I915_WRITE(GTIER, render_irqs);
2152         POSTING_READ(GTIER);
2153
2154         ibx_irq_postinstall(dev);
2155
2156         if (IS_IRONLAKE_M(dev)) {
2157                 /* Clear & enable PCU event interrupts */
2158                 I915_WRITE(DEIIR, DE_PCU_EVENT);
2159                 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2160                 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2161         }
2162
2163         return 0;
2164 }
2165
2166 static int ivybridge_irq_postinstall(struct drm_device *dev)
2167 {
2168         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2169         /* enable kind of interrupts always enabled */
2170         u32 display_mask =
2171                 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2172                 DE_PLANEC_FLIP_DONE_IVB |
2173                 DE_PLANEB_FLIP_DONE_IVB |
2174                 DE_PLANEA_FLIP_DONE_IVB |
2175                 DE_AUX_CHANNEL_A_IVB;
2176         u32 render_irqs;
2177
2178         dev_priv->irq_mask = ~display_mask;
2179
2180         /* should always can generate irq */
2181         I915_WRITE(DEIIR, I915_READ(DEIIR));
2182         I915_WRITE(DEIMR, dev_priv->irq_mask);
2183         I915_WRITE(DEIER,
2184                    display_mask |
2185                    DE_PIPEC_VBLANK_IVB |
2186                    DE_PIPEB_VBLANK_IVB |
2187                    DE_PIPEA_VBLANK_IVB);
2188         POSTING_READ(DEIER);
2189
2190         dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2191
2192         I915_WRITE(GTIIR, I915_READ(GTIIR));
2193         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2194
2195         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2196                 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
2197         I915_WRITE(GTIER, render_irqs);
2198         POSTING_READ(GTIER);
2199
2200         ibx_irq_postinstall(dev);
2201
2202         return 0;
2203 }
2204
2205 static int valleyview_irq_postinstall(struct drm_device *dev)
2206 {
2207         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2208         u32 enable_mask;
2209         u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2210         u32 render_irqs;
2211         u16 msid;
2212
2213         enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2214         enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2215                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2216                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2217                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2218
2219         /*
2220          *Leave vblank interrupts masked initially.  enable/disable will
2221          * toggle them based on usage.
2222          */
2223         dev_priv->irq_mask = (~enable_mask) |
2224                 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2225                 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2226
2227         /* Hack for broken MSIs on VLV */
2228         pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
2229         pci_read_config_word(dev->pdev, 0x98, &msid);
2230         msid &= 0xff; /* mask out delivery bits */
2231         msid |= (1<<14);
2232         pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2233
2234         I915_WRITE(PORT_HOTPLUG_EN, 0);
2235         POSTING_READ(PORT_HOTPLUG_EN);
2236
2237         I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2238         I915_WRITE(VLV_IER, enable_mask);
2239         I915_WRITE(VLV_IIR, 0xffffffff);
2240         I915_WRITE(PIPESTAT(0), 0xffff);
2241         I915_WRITE(PIPESTAT(1), 0xffff);
2242         POSTING_READ(VLV_IER);
2243
2244         i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2245         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2246         i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2247
2248         I915_WRITE(VLV_IIR, 0xffffffff);
2249         I915_WRITE(VLV_IIR, 0xffffffff);
2250
2251         I915_WRITE(GTIIR, I915_READ(GTIIR));
2252         I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2253
2254         render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2255                 GEN6_BLITTER_USER_INTERRUPT;
2256         I915_WRITE(GTIER, render_irqs);
2257         POSTING_READ(GTIER);
2258
2259         /* ack & enable invalid PTE error interrupts */
2260 #if 0 /* FIXME: add support to irq handler for checking these bits */
2261         I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2262         I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2263 #endif
2264
2265         I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2266
2267         return 0;
2268 }
2269
2270 static void valleyview_irq_uninstall(struct drm_device *dev)
2271 {
2272         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2273         int pipe;
2274
2275         if (!dev_priv)
2276                 return;
2277
2278         for_each_pipe(pipe)
2279                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2280
2281         I915_WRITE(HWSTAM, 0xffffffff);
2282         I915_WRITE(PORT_HOTPLUG_EN, 0);
2283         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2284         for_each_pipe(pipe)
2285                 I915_WRITE(PIPESTAT(pipe), 0xffff);
2286         I915_WRITE(VLV_IIR, 0xffffffff);
2287         I915_WRITE(VLV_IMR, 0xffffffff);
2288         I915_WRITE(VLV_IER, 0x0);
2289         POSTING_READ(VLV_IER);
2290 }
2291
2292 static void ironlake_irq_uninstall(struct drm_device *dev)
2293 {
2294         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2295
2296         if (!dev_priv)
2297                 return;
2298
2299         I915_WRITE(HWSTAM, 0xffffffff);
2300
2301         I915_WRITE(DEIMR, 0xffffffff);
2302         I915_WRITE(DEIER, 0x0);
2303         I915_WRITE(DEIIR, I915_READ(DEIIR));
2304
2305         I915_WRITE(GTIMR, 0xffffffff);
2306         I915_WRITE(GTIER, 0x0);
2307         I915_WRITE(GTIIR, I915_READ(GTIIR));
2308
2309         I915_WRITE(SDEIMR, 0xffffffff);
2310         I915_WRITE(SDEIER, 0x0);
2311         I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2312 }
2313
2314 static void i8xx_irq_preinstall(struct drm_device * dev)
2315 {
2316         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2317         int pipe;
2318
2319         atomic_set(&dev_priv->irq_received, 0);
2320
2321         for_each_pipe(pipe)
2322                 I915_WRITE(PIPESTAT(pipe), 0);
2323         I915_WRITE16(IMR, 0xffff);
2324         I915_WRITE16(IER, 0x0);
2325         POSTING_READ16(IER);
2326 }
2327
2328 static int i8xx_irq_postinstall(struct drm_device *dev)
2329 {
2330         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2331
2332         I915_WRITE16(EMR,
2333                      ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2334
2335         /* Unmask the interrupts that we always want on. */
2336         dev_priv->irq_mask =
2337                 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2338                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2339                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2340                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2341                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2342         I915_WRITE16(IMR, dev_priv->irq_mask);
2343
2344         I915_WRITE16(IER,
2345                      I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2346                      I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2347                      I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2348                      I915_USER_INTERRUPT);
2349         POSTING_READ16(IER);
2350
2351         return 0;
2352 }
2353
2354 /*
2355  * Returns true when a page flip has completed.
2356  */
2357 static bool i8xx_handle_vblank(struct drm_device *dev,
2358                                int pipe, u16 iir)
2359 {
2360         drm_i915_private_t *dev_priv = dev->dev_private;
2361         u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2362
2363         if (!drm_handle_vblank(dev, pipe))
2364                 return false;
2365
2366         if ((iir & flip_pending) == 0)
2367                 return false;
2368
2369         intel_prepare_page_flip(dev, pipe);
2370
2371         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2372          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2373          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2374          * the flip is completed (no longer pending). Since this doesn't raise
2375          * an interrupt per se, we watch for the change at vblank.
2376          */
2377         if (I915_READ16(ISR) & flip_pending)
2378                 return false;
2379
2380         intel_finish_page_flip(dev, pipe);
2381
2382         return true;
2383 }
2384
2385 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2386 {
2387         struct drm_device *dev = (struct drm_device *) arg;
2388         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2389         u16 iir, new_iir;
2390         u32 pipe_stats[2];
2391         unsigned long irqflags;
2392         int irq_received;
2393         int pipe;
2394         u16 flip_mask =
2395                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2396                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2397
2398         atomic_inc(&dev_priv->irq_received);
2399
2400         iir = I915_READ16(IIR);
2401         if (iir == 0)
2402                 return IRQ_NONE;
2403
2404         while (iir & ~flip_mask) {
2405                 /* Can't rely on pipestat interrupt bit in iir as it might
2406                  * have been cleared after the pipestat interrupt was received.
2407                  * It doesn't set the bit in iir again, but it still produces
2408                  * interrupts (for non-MSI).
2409                  */
2410                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2411                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2412                         i915_handle_error(dev, false);
2413
2414                 for_each_pipe(pipe) {
2415                         int reg = PIPESTAT(pipe);
2416                         pipe_stats[pipe] = I915_READ(reg);
2417
2418                         /*
2419                          * Clear the PIPE*STAT regs before the IIR
2420                          */
2421                         if (pipe_stats[pipe] & 0x8000ffff) {
2422                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2423                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2424                                                          pipe_name(pipe));
2425                                 I915_WRITE(reg, pipe_stats[pipe]);
2426                                 irq_received = 1;
2427                         }
2428                 }
2429                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2430
2431                 I915_WRITE16(IIR, iir & ~flip_mask);
2432                 new_iir = I915_READ16(IIR); /* Flush posted writes */
2433
2434                 i915_update_dri1_breadcrumb(dev);
2435
2436                 if (iir & I915_USER_INTERRUPT)
2437                         notify_ring(dev, &dev_priv->ring[RCS]);
2438
2439                 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2440                     i8xx_handle_vblank(dev, 0, iir))
2441                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
2442
2443                 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2444                     i8xx_handle_vblank(dev, 1, iir))
2445                         flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
2446
2447                 iir = new_iir;
2448         }
2449
2450         return IRQ_HANDLED;
2451 }
2452
2453 static void i8xx_irq_uninstall(struct drm_device * dev)
2454 {
2455         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2456         int pipe;
2457
2458         for_each_pipe(pipe) {
2459                 /* Clear enable bits; then clear status bits */
2460                 I915_WRITE(PIPESTAT(pipe), 0);
2461                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2462         }
2463         I915_WRITE16(IMR, 0xffff);
2464         I915_WRITE16(IER, 0x0);
2465         I915_WRITE16(IIR, I915_READ16(IIR));
2466 }
2467
2468 static void i915_irq_preinstall(struct drm_device * dev)
2469 {
2470         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2471         int pipe;
2472
2473         atomic_set(&dev_priv->irq_received, 0);
2474
2475         if (I915_HAS_HOTPLUG(dev)) {
2476                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2477                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2478         }
2479
2480         I915_WRITE16(HWSTAM, 0xeffe);
2481         for_each_pipe(pipe)
2482                 I915_WRITE(PIPESTAT(pipe), 0);
2483         I915_WRITE(IMR, 0xffffffff);
2484         I915_WRITE(IER, 0x0);
2485         POSTING_READ(IER);
2486 }
2487
2488 static int i915_irq_postinstall(struct drm_device *dev)
2489 {
2490         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2491         u32 enable_mask;
2492
2493         I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2494
2495         /* Unmask the interrupts that we always want on. */
2496         dev_priv->irq_mask =
2497                 ~(I915_ASLE_INTERRUPT |
2498                   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2499                   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2500                   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2501                   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2502                   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2503
2504         enable_mask =
2505                 I915_ASLE_INTERRUPT |
2506                 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2507                 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2508                 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2509                 I915_USER_INTERRUPT;
2510
2511         if (I915_HAS_HOTPLUG(dev)) {
2512                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2513                 POSTING_READ(PORT_HOTPLUG_EN);
2514
2515                 /* Enable in IER... */
2516                 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2517                 /* and unmask in IMR */
2518                 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2519         }
2520
2521         I915_WRITE(IMR, dev_priv->irq_mask);
2522         I915_WRITE(IER, enable_mask);
2523         POSTING_READ(IER);
2524
2525         intel_opregion_enable_asle(dev);
2526
2527         return 0;
2528 }
2529
2530 /*
2531  * Returns true when a page flip has completed.
2532  */
2533 static bool i915_handle_vblank(struct drm_device *dev,
2534                                int plane, int pipe, u32 iir)
2535 {
2536         drm_i915_private_t *dev_priv = dev->dev_private;
2537         u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
2538
2539         if (!drm_handle_vblank(dev, pipe))
2540                 return false;
2541
2542         if ((iir & flip_pending) == 0)
2543                 return false;
2544
2545         intel_prepare_page_flip(dev, plane);
2546
2547         /* We detect FlipDone by looking for the change in PendingFlip from '1'
2548          * to '0' on the following vblank, i.e. IIR has the Pendingflip
2549          * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2550          * the flip is completed (no longer pending). Since this doesn't raise
2551          * an interrupt per se, we watch for the change at vblank.
2552          */
2553         if (I915_READ(ISR) & flip_pending)
2554                 return false;
2555
2556         intel_finish_page_flip(dev, pipe);
2557
2558         return true;
2559 }
2560
2561 static irqreturn_t i915_irq_handler(int irq, void *arg)
2562 {
2563         struct drm_device *dev = (struct drm_device *) arg;
2564         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2565         u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2566         unsigned long irqflags;
2567         u32 flip_mask =
2568                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2569                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2570         int pipe, ret = IRQ_NONE;
2571
2572         atomic_inc(&dev_priv->irq_received);
2573
2574         iir = I915_READ(IIR);
2575         do {
2576                 bool irq_received = (iir & ~flip_mask) != 0;
2577                 bool blc_event = false;
2578
2579                 /* Can't rely on pipestat interrupt bit in iir as it might
2580                  * have been cleared after the pipestat interrupt was received.
2581                  * It doesn't set the bit in iir again, but it still produces
2582                  * interrupts (for non-MSI).
2583                  */
2584                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2585                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2586                         i915_handle_error(dev, false);
2587
2588                 for_each_pipe(pipe) {
2589                         int reg = PIPESTAT(pipe);
2590                         pipe_stats[pipe] = I915_READ(reg);
2591
2592                         /* Clear the PIPE*STAT regs before the IIR */
2593                         if (pipe_stats[pipe] & 0x8000ffff) {
2594                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2595                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2596                                                          pipe_name(pipe));
2597                                 I915_WRITE(reg, pipe_stats[pipe]);
2598                                 irq_received = true;
2599                         }
2600                 }
2601                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2602
2603                 if (!irq_received)
2604                         break;
2605
2606                 /* Consume port.  Then clear IIR or we'll miss events */
2607                 if ((I915_HAS_HOTPLUG(dev)) &&
2608                     (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2609                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2610
2611                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2612                                   hotplug_status);
2613                         if (hotplug_status & HOTPLUG_INT_STATUS_I915)
2614                                 queue_work(dev_priv->wq,
2615                                            &dev_priv->hotplug_work);
2616
2617                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2618                         POSTING_READ(PORT_HOTPLUG_STAT);
2619                 }
2620
2621                 I915_WRITE(IIR, iir & ~flip_mask);
2622                 new_iir = I915_READ(IIR); /* Flush posted writes */
2623
2624                 if (iir & I915_USER_INTERRUPT)
2625                         notify_ring(dev, &dev_priv->ring[RCS]);
2626
2627                 for_each_pipe(pipe) {
2628                         int plane = pipe;
2629                         if (IS_MOBILE(dev))
2630                                 plane = !plane;
2631
2632                         if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2633                             i915_handle_vblank(dev, plane, pipe, iir))
2634                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
2635
2636                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2637                                 blc_event = true;
2638                 }
2639
2640                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2641                         intel_opregion_asle_intr(dev);
2642
2643                 /* With MSI, interrupts are only generated when iir
2644                  * transitions from zero to nonzero.  If another bit got
2645                  * set while we were handling the existing iir bits, then
2646                  * we would never get another interrupt.
2647                  *
2648                  * This is fine on non-MSI as well, as if we hit this path
2649                  * we avoid exiting the interrupt handler only to generate
2650                  * another one.
2651                  *
2652                  * Note that for MSI this could cause a stray interrupt report
2653                  * if an interrupt landed in the time between writing IIR and
2654                  * the posting read.  This should be rare enough to never
2655                  * trigger the 99% of 100,000 interrupts test for disabling
2656                  * stray interrupts.
2657                  */
2658                 ret = IRQ_HANDLED;
2659                 iir = new_iir;
2660         } while (iir & ~flip_mask);
2661
2662         i915_update_dri1_breadcrumb(dev);
2663
2664         return ret;
2665 }
2666
2667 static void i915_irq_uninstall(struct drm_device * dev)
2668 {
2669         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2670         int pipe;
2671
2672         if (I915_HAS_HOTPLUG(dev)) {
2673                 I915_WRITE(PORT_HOTPLUG_EN, 0);
2674                 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2675         }
2676
2677         I915_WRITE16(HWSTAM, 0xffff);
2678         for_each_pipe(pipe) {
2679                 /* Clear enable bits; then clear status bits */
2680                 I915_WRITE(PIPESTAT(pipe), 0);
2681                 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2682         }
2683         I915_WRITE(IMR, 0xffffffff);
2684         I915_WRITE(IER, 0x0);
2685
2686         I915_WRITE(IIR, I915_READ(IIR));
2687 }
2688
2689 static void i965_irq_preinstall(struct drm_device * dev)
2690 {
2691         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2692         int pipe;
2693
2694         atomic_set(&dev_priv->irq_received, 0);
2695
2696         I915_WRITE(PORT_HOTPLUG_EN, 0);
2697         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2698
2699         I915_WRITE(HWSTAM, 0xeffe);
2700         for_each_pipe(pipe)
2701                 I915_WRITE(PIPESTAT(pipe), 0);
2702         I915_WRITE(IMR, 0xffffffff);
2703         I915_WRITE(IER, 0x0);
2704         POSTING_READ(IER);
2705 }
2706
2707 static int i965_irq_postinstall(struct drm_device *dev)
2708 {
2709         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2710         u32 enable_mask;
2711         u32 error_mask;
2712
2713         /* Unmask the interrupts that we always want on. */
2714         dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2715                                I915_DISPLAY_PORT_INTERRUPT |
2716                                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2717                                I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2718                                I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2719                                I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2720                                I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2721
2722         enable_mask = ~dev_priv->irq_mask;
2723         enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2724                          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
2725         enable_mask |= I915_USER_INTERRUPT;
2726
2727         if (IS_G4X(dev))
2728                 enable_mask |= I915_BSD_USER_INTERRUPT;
2729
2730         i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2731
2732         /*
2733          * Enable some error detection, note the instruction error mask
2734          * bit is reserved, so we leave it masked.
2735          */
2736         if (IS_G4X(dev)) {
2737                 error_mask = ~(GM45_ERROR_PAGE_TABLE |
2738                                GM45_ERROR_MEM_PRIV |
2739                                GM45_ERROR_CP_PRIV |
2740                                I915_ERROR_MEMORY_REFRESH);
2741         } else {
2742                 error_mask = ~(I915_ERROR_PAGE_TABLE |
2743                                I915_ERROR_MEMORY_REFRESH);
2744         }
2745         I915_WRITE(EMR, error_mask);
2746
2747         I915_WRITE(IMR, dev_priv->irq_mask);
2748         I915_WRITE(IER, enable_mask);
2749         POSTING_READ(IER);
2750
2751         I915_WRITE(PORT_HOTPLUG_EN, 0);
2752         POSTING_READ(PORT_HOTPLUG_EN);
2753
2754         intel_opregion_enable_asle(dev);
2755
2756         return 0;
2757 }
2758
2759 static void i915_hpd_irq_setup(struct drm_device *dev)
2760 {
2761         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2762         struct drm_mode_config *mode_config = &dev->mode_config;
2763         struct intel_encoder *encoder;
2764         u32 hotplug_en;
2765
2766         if (I915_HAS_HOTPLUG(dev)) {
2767                 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2768                 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
2769                 /* Note HDMI and DP share hotplug bits */
2770                 /* enable bits are the same for all generations */
2771                 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
2772                         hotplug_en |= hpd_mask_i915[encoder->hpd_pin];
2773                 /* Programming the CRT detection parameters tends
2774                    to generate a spurious hotplug event about three
2775                    seconds later.  So just do it once.
2776                 */
2777                 if (IS_G4X(dev))
2778                         hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2779                 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
2780                 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2781
2782                 /* Ignore TV since it's buggy */
2783                 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2784         }
2785 }
2786
2787 static irqreturn_t i965_irq_handler(int irq, void *arg)
2788 {
2789         struct drm_device *dev = (struct drm_device *) arg;
2790         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2791         u32 iir, new_iir;
2792         u32 pipe_stats[I915_MAX_PIPES];
2793         unsigned long irqflags;
2794         int irq_received;
2795         int ret = IRQ_NONE, pipe;
2796         u32 flip_mask =
2797                 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2798                 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2799
2800         atomic_inc(&dev_priv->irq_received);
2801
2802         iir = I915_READ(IIR);
2803
2804         for (;;) {
2805                 bool blc_event = false;
2806
2807                 irq_received = (iir & ~flip_mask) != 0;
2808
2809                 /* Can't rely on pipestat interrupt bit in iir as it might
2810                  * have been cleared after the pipestat interrupt was received.
2811                  * It doesn't set the bit in iir again, but it still produces
2812                  * interrupts (for non-MSI).
2813                  */
2814                 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2815                 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2816                         i915_handle_error(dev, false);
2817
2818                 for_each_pipe(pipe) {
2819                         int reg = PIPESTAT(pipe);
2820                         pipe_stats[pipe] = I915_READ(reg);
2821
2822                         /*
2823                          * Clear the PIPE*STAT regs before the IIR
2824                          */
2825                         if (pipe_stats[pipe] & 0x8000ffff) {
2826                                 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2827                                         DRM_DEBUG_DRIVER("pipe %c underrun\n",
2828                                                          pipe_name(pipe));
2829                                 I915_WRITE(reg, pipe_stats[pipe]);
2830                                 irq_received = 1;
2831                         }
2832                 }
2833                 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2834
2835                 if (!irq_received)
2836                         break;
2837
2838                 ret = IRQ_HANDLED;
2839
2840                 /* Consume port.  Then clear IIR or we'll miss events */
2841                 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2842                         u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2843
2844                         DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2845                                   hotplug_status);
2846                         if (hotplug_status & (IS_G4X(dev) ?
2847                                               HOTPLUG_INT_STATUS_G4X :
2848                                               HOTPLUG_INT_STATUS_I965))
2849                                 queue_work(dev_priv->wq,
2850                                            &dev_priv->hotplug_work);
2851
2852                         I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2853                         I915_READ(PORT_HOTPLUG_STAT);
2854                 }
2855
2856                 I915_WRITE(IIR, iir & ~flip_mask);
2857                 new_iir = I915_READ(IIR); /* Flush posted writes */
2858
2859                 if (iir & I915_USER_INTERRUPT)
2860                         notify_ring(dev, &dev_priv->ring[RCS]);
2861                 if (iir & I915_BSD_USER_INTERRUPT)
2862                         notify_ring(dev, &dev_priv->ring[VCS]);
2863
2864                 for_each_pipe(pipe) {
2865                         if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2866                             i915_handle_vblank(dev, pipe, pipe, iir))
2867                                 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
2868
2869                         if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2870                                 blc_event = true;
2871                 }
2872
2873
2874                 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2875                         intel_opregion_asle_intr(dev);
2876
2877                 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2878                         gmbus_irq_handler(dev);
2879
2880                 /* With MSI, interrupts are only generated when iir
2881                  * transitions from zero to nonzero.  If another bit got
2882                  * set while we were handling the existing iir bits, then
2883                  * we would never get another interrupt.
2884                  *
2885                  * This is fine on non-MSI as well, as if we hit this path
2886                  * we avoid exiting the interrupt handler only to generate
2887                  * another one.
2888                  *
2889                  * Note that for MSI this could cause a stray interrupt report
2890                  * if an interrupt landed in the time between writing IIR and
2891                  * the posting read.  This should be rare enough to never
2892                  * trigger the 99% of 100,000 interrupts test for disabling
2893                  * stray interrupts.
2894                  */
2895                 iir = new_iir;
2896         }
2897
2898         i915_update_dri1_breadcrumb(dev);
2899
2900         return ret;
2901 }
2902
2903 static void i965_irq_uninstall(struct drm_device * dev)
2904 {
2905         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2906         int pipe;
2907
2908         if (!dev_priv)
2909                 return;
2910
2911         I915_WRITE(PORT_HOTPLUG_EN, 0);
2912         I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2913
2914         I915_WRITE(HWSTAM, 0xffffffff);
2915         for_each_pipe(pipe)
2916                 I915_WRITE(PIPESTAT(pipe), 0);
2917         I915_WRITE(IMR, 0xffffffff);
2918         I915_WRITE(IER, 0x0);
2919
2920         for_each_pipe(pipe)
2921                 I915_WRITE(PIPESTAT(pipe),
2922                            I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2923         I915_WRITE(IIR, I915_READ(IIR));
2924 }
2925
2926 void intel_irq_init(struct drm_device *dev)
2927 {
2928         struct drm_i915_private *dev_priv = dev->dev_private;
2929
2930         INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2931         INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
2932         INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2933         INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2934
2935         setup_timer(&dev_priv->gpu_error.hangcheck_timer,
2936                     i915_hangcheck_elapsed,
2937                     (unsigned long) dev);
2938
2939         pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
2940
2941         dev->driver->get_vblank_counter = i915_get_vblank_counter;
2942         dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2943         if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2944                 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2945                 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2946         }
2947
2948         if (drm_core_check_feature(dev, DRIVER_MODESET))
2949                 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2950         else
2951                 dev->driver->get_vblank_timestamp = NULL;
2952         dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2953
2954         if (IS_VALLEYVIEW(dev)) {
2955                 dev->driver->irq_handler = valleyview_irq_handler;
2956                 dev->driver->irq_preinstall = valleyview_irq_preinstall;
2957                 dev->driver->irq_postinstall = valleyview_irq_postinstall;
2958                 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2959                 dev->driver->enable_vblank = valleyview_enable_vblank;
2960                 dev->driver->disable_vblank = valleyview_disable_vblank;
2961                 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2962         } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
2963                 /* Share pre & uninstall handlers with ILK/SNB */
2964                 dev->driver->irq_handler = ivybridge_irq_handler;
2965                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2966                 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2967                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2968                 dev->driver->enable_vblank = ivybridge_enable_vblank;
2969                 dev->driver->disable_vblank = ivybridge_disable_vblank;
2970                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
2971         } else if (HAS_PCH_SPLIT(dev)) {
2972                 dev->driver->irq_handler = ironlake_irq_handler;
2973                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2974                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
2975                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2976                 dev->driver->enable_vblank = ironlake_enable_vblank;
2977                 dev->driver->disable_vblank = ironlake_disable_vblank;
2978                 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
2979         } else {
2980                 if (INTEL_INFO(dev)->gen == 2) {
2981                         dev->driver->irq_preinstall = i8xx_irq_preinstall;
2982                         dev->driver->irq_postinstall = i8xx_irq_postinstall;
2983                         dev->driver->irq_handler = i8xx_irq_handler;
2984                         dev->driver->irq_uninstall = i8xx_irq_uninstall;
2985                 } else if (INTEL_INFO(dev)->gen == 3) {
2986                         dev->driver->irq_preinstall = i915_irq_preinstall;
2987                         dev->driver->irq_postinstall = i915_irq_postinstall;
2988                         dev->driver->irq_uninstall = i915_irq_uninstall;
2989                         dev->driver->irq_handler = i915_irq_handler;
2990                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2991                 } else {
2992                         dev->driver->irq_preinstall = i965_irq_preinstall;
2993                         dev->driver->irq_postinstall = i965_irq_postinstall;
2994                         dev->driver->irq_uninstall = i965_irq_uninstall;
2995                         dev->driver->irq_handler = i965_irq_handler;
2996                         dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2997                 }
2998                 dev->driver->enable_vblank = i915_enable_vblank;
2999                 dev->driver->disable_vblank = i915_disable_vblank;
3000         }
3001 }
3002
3003 void intel_hpd_init(struct drm_device *dev)
3004 {
3005         struct drm_i915_private *dev_priv = dev->dev_private;
3006
3007         if (dev_priv->display.hpd_irq_setup)
3008                 dev_priv->display.hpd_irq_setup(dev);
3009 }