1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
34 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
37 #include "intel_drv.h"
39 static const u32 hpd_ibx[] = {
40 [HPD_CRT] = SDE_CRT_HOTPLUG,
41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
47 static const u32 hpd_cpt[] = {
48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
49 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
55 static const u32 hpd_mask_i915[] = {
56 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
64 static const u32 hpd_status_gen4[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
73 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
82 static void ibx_hpd_irq_setup(struct drm_device *dev);
83 static void i915_hpd_irq_setup(struct drm_device *dev);
85 /* For display hotplug interrupt */
87 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
89 assert_spin_locked(&dev_priv->irq_lock);
91 if ((dev_priv->irq_mask & mask) != 0) {
92 dev_priv->irq_mask &= ~mask;
93 I915_WRITE(DEIMR, dev_priv->irq_mask);
99 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
101 assert_spin_locked(&dev_priv->irq_lock);
103 if ((dev_priv->irq_mask & mask) != mask) {
104 dev_priv->irq_mask |= mask;
105 I915_WRITE(DEIMR, dev_priv->irq_mask);
110 static bool ivb_can_enable_err_int(struct drm_device *dev)
112 struct drm_i915_private *dev_priv = dev->dev_private;
113 struct intel_crtc *crtc;
116 assert_spin_locked(&dev_priv->irq_lock);
118 for_each_pipe(pipe) {
119 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
121 if (crtc->cpu_fifo_underrun_disabled)
128 static bool cpt_can_enable_serr_int(struct drm_device *dev)
130 struct drm_i915_private *dev_priv = dev->dev_private;
132 struct intel_crtc *crtc;
134 for_each_pipe(pipe) {
135 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
137 if (crtc->pch_fifo_underrun_disabled)
144 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
145 enum pipe pipe, bool enable)
147 struct drm_i915_private *dev_priv = dev->dev_private;
148 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
149 DE_PIPEB_FIFO_UNDERRUN;
152 ironlake_enable_display_irq(dev_priv, bit);
154 ironlake_disable_display_irq(dev_priv, bit);
157 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
160 struct drm_i915_private *dev_priv = dev->dev_private;
163 if (!ivb_can_enable_err_int(dev))
166 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
167 ERR_INT_FIFO_UNDERRUN_B |
168 ERR_INT_FIFO_UNDERRUN_C);
170 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
172 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
176 static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
179 struct drm_device *dev = crtc->base.dev;
180 struct drm_i915_private *dev_priv = dev->dev_private;
181 uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
182 SDE_TRANSB_FIFO_UNDER;
185 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
187 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
189 POSTING_READ(SDEIMR);
192 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
193 enum transcoder pch_transcoder,
196 struct drm_i915_private *dev_priv = dev->dev_private;
199 if (!cpt_can_enable_serr_int(dev))
202 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
203 SERR_INT_TRANS_B_FIFO_UNDERRUN |
204 SERR_INT_TRANS_C_FIFO_UNDERRUN);
206 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
208 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
211 POSTING_READ(SDEIMR);
215 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
218 * @enable: true if we want to report FIFO underrun errors, false otherwise
220 * This function makes us disable or enable CPU fifo underruns for a specific
221 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
222 * reporting for one pipe may also disable all the other CPU error interruts for
223 * the other pipes, due to the fact that there's just one interrupt mask/enable
224 * bit for all the pipes.
226 * Returns the previous state of underrun reporting.
228 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
229 enum pipe pipe, bool enable)
231 struct drm_i915_private *dev_priv = dev->dev_private;
232 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
233 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
237 spin_lock_irqsave(&dev_priv->irq_lock, flags);
239 ret = !intel_crtc->cpu_fifo_underrun_disabled;
244 intel_crtc->cpu_fifo_underrun_disabled = !enable;
246 if (IS_GEN5(dev) || IS_GEN6(dev))
247 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
248 else if (IS_GEN7(dev))
249 ivybridge_set_fifo_underrun_reporting(dev, enable);
252 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
257 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
259 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
260 * @enable: true if we want to report FIFO underrun errors, false otherwise
262 * This function makes us disable or enable PCH fifo underruns for a specific
263 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
264 * underrun reporting for one transcoder may also disable all the other PCH
265 * error interruts for the other transcoders, due to the fact that there's just
266 * one interrupt mask/enable bit for all the transcoders.
268 * Returns the previous state of underrun reporting.
270 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
271 enum transcoder pch_transcoder,
274 struct drm_i915_private *dev_priv = dev->dev_private;
276 struct drm_crtc *crtc;
277 struct intel_crtc *intel_crtc;
281 if (HAS_PCH_LPT(dev)) {
284 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
285 if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
291 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
295 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
297 intel_crtc = to_intel_crtc(crtc);
299 spin_lock_irqsave(&dev_priv->irq_lock, flags);
301 ret = !intel_crtc->pch_fifo_underrun_disabled;
306 intel_crtc->pch_fifo_underrun_disabled = !enable;
308 if (HAS_PCH_IBX(dev))
309 ibx_set_fifo_underrun_reporting(intel_crtc, enable);
311 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
314 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
320 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
322 u32 reg = PIPESTAT(pipe);
323 u32 pipestat = I915_READ(reg) & 0x7fff0000;
325 if ((pipestat & mask) == mask)
328 /* Enable the interrupt, clear any pending status */
329 pipestat |= mask | (mask >> 16);
330 I915_WRITE(reg, pipestat);
335 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
337 u32 reg = PIPESTAT(pipe);
338 u32 pipestat = I915_READ(reg) & 0x7fff0000;
340 if ((pipestat & mask) == 0)
344 I915_WRITE(reg, pipestat);
349 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
351 static void i915_enable_asle_pipestat(struct drm_device *dev)
353 drm_i915_private_t *dev_priv = dev->dev_private;
354 unsigned long irqflags;
356 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
359 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
361 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
362 if (INTEL_INFO(dev)->gen >= 4)
363 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
365 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
369 * i915_pipe_enabled - check if a pipe is enabled
371 * @pipe: pipe to check
373 * Reading certain registers when the pipe is disabled can hang the chip.
374 * Use this routine to make sure the PLL is running and the pipe is active
375 * before reading such registers if unsure.
378 i915_pipe_enabled(struct drm_device *dev, int pipe)
380 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
382 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
383 /* Locking is horribly broken here, but whatever. */
384 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
387 return intel_crtc->active;
389 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
393 /* Called from drm generic code, passed a 'crtc', which
394 * we use as a pipe index
396 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
398 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
399 unsigned long high_frame;
400 unsigned long low_frame;
401 u32 high1, high2, low;
403 if (!i915_pipe_enabled(dev, pipe)) {
404 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
405 "pipe %c\n", pipe_name(pipe));
409 high_frame = PIPEFRAME(pipe);
410 low_frame = PIPEFRAMEPIXEL(pipe);
413 * High & low register fields aren't synchronized, so make sure
414 * we get a low value that's stable across two reads of the high
418 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
419 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
420 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
421 } while (high1 != high2);
423 high1 >>= PIPE_FRAME_HIGH_SHIFT;
424 low >>= PIPE_FRAME_LOW_SHIFT;
425 return (high1 << 8) | low;
428 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
430 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
431 int reg = PIPE_FRMCOUNT_GM45(pipe);
433 if (!i915_pipe_enabled(dev, pipe)) {
434 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
435 "pipe %c\n", pipe_name(pipe));
439 return I915_READ(reg);
442 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
443 int *vpos, int *hpos)
445 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
446 u32 vbl = 0, position = 0;
447 int vbl_start, vbl_end, htotal, vtotal;
450 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
453 if (!i915_pipe_enabled(dev, pipe)) {
454 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
455 "pipe %c\n", pipe_name(pipe));
460 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
462 if (INTEL_INFO(dev)->gen >= 4) {
463 /* No obvious pixelcount register. Only query vertical
464 * scanout position from Display scan line register.
466 position = I915_READ(PIPEDSL(pipe));
468 /* Decode into vertical scanout position. Don't have
469 * horizontal scanout position.
471 *vpos = position & 0x1fff;
474 /* Have access to pixelcount since start of frame.
475 * We can split this into vertical and horizontal
478 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
480 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
481 *vpos = position / htotal;
482 *hpos = position - (*vpos * htotal);
485 /* Query vblank area. */
486 vbl = I915_READ(VBLANK(cpu_transcoder));
488 /* Test position against vblank region. */
489 vbl_start = vbl & 0x1fff;
490 vbl_end = (vbl >> 16) & 0x1fff;
492 if ((*vpos < vbl_start) || (*vpos > vbl_end))
495 /* Inside "upper part" of vblank area? Apply corrective offset: */
496 if (in_vbl && (*vpos >= vbl_start))
497 *vpos = *vpos - vtotal;
499 /* Readouts valid? */
501 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
505 ret |= DRM_SCANOUTPOS_INVBL;
510 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
512 struct timeval *vblank_time,
515 struct drm_crtc *crtc;
517 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
518 DRM_ERROR("Invalid crtc %d\n", pipe);
522 /* Get drm_crtc to timestamp: */
523 crtc = intel_get_crtc_for_pipe(dev, pipe);
525 DRM_ERROR("Invalid crtc %d\n", pipe);
529 if (!crtc->enabled) {
530 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
534 /* Helper routine in DRM core does all the work: */
535 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
540 static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
542 enum drm_connector_status old_status;
544 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
545 old_status = connector->status;
547 connector->status = connector->funcs->detect(connector, false);
548 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
550 drm_get_connector_name(connector),
551 old_status, connector->status);
552 return (old_status != connector->status);
556 * Handle hotplug events outside the interrupt handler proper.
558 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
560 static void i915_hotplug_work_func(struct work_struct *work)
562 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
564 struct drm_device *dev = dev_priv->dev;
565 struct drm_mode_config *mode_config = &dev->mode_config;
566 struct intel_connector *intel_connector;
567 struct intel_encoder *intel_encoder;
568 struct drm_connector *connector;
569 unsigned long irqflags;
570 bool hpd_disabled = false;
571 bool changed = false;
574 /* HPD irq before everything is fully set up. */
575 if (!dev_priv->enable_hotplug_processing)
578 mutex_lock(&mode_config->mutex);
579 DRM_DEBUG_KMS("running encoder hotplug functions\n");
581 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
583 hpd_event_bits = dev_priv->hpd_event_bits;
584 dev_priv->hpd_event_bits = 0;
585 list_for_each_entry(connector, &mode_config->connector_list, head) {
586 intel_connector = to_intel_connector(connector);
587 intel_encoder = intel_connector->encoder;
588 if (intel_encoder->hpd_pin > HPD_NONE &&
589 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
590 connector->polled == DRM_CONNECTOR_POLL_HPD) {
591 DRM_INFO("HPD interrupt storm detected on connector %s: "
592 "switching from hotplug detection to polling\n",
593 drm_get_connector_name(connector));
594 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
595 connector->polled = DRM_CONNECTOR_POLL_CONNECT
596 | DRM_CONNECTOR_POLL_DISCONNECT;
599 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
600 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
601 drm_get_connector_name(connector), intel_encoder->hpd_pin);
604 /* if there were no outputs to poll, poll was disabled,
605 * therefore make sure it's enabled when disabling HPD on
608 drm_kms_helper_poll_enable(dev);
609 mod_timer(&dev_priv->hotplug_reenable_timer,
610 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
613 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
615 list_for_each_entry(connector, &mode_config->connector_list, head) {
616 intel_connector = to_intel_connector(connector);
617 intel_encoder = intel_connector->encoder;
618 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
619 if (intel_encoder->hot_plug)
620 intel_encoder->hot_plug(intel_encoder);
621 if (intel_hpd_irq_event(dev, connector))
625 mutex_unlock(&mode_config->mutex);
628 drm_kms_helper_hotplug_event(dev);
631 static void ironlake_handle_rps_change(struct drm_device *dev)
633 drm_i915_private_t *dev_priv = dev->dev_private;
634 u32 busy_up, busy_down, max_avg, min_avg;
638 spin_lock_irqsave(&mchdev_lock, flags);
640 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
642 new_delay = dev_priv->ips.cur_delay;
644 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
645 busy_up = I915_READ(RCPREVBSYTUPAVG);
646 busy_down = I915_READ(RCPREVBSYTDNAVG);
647 max_avg = I915_READ(RCBMAXAVG);
648 min_avg = I915_READ(RCBMINAVG);
650 /* Handle RCS change request from hw */
651 if (busy_up > max_avg) {
652 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
653 new_delay = dev_priv->ips.cur_delay - 1;
654 if (new_delay < dev_priv->ips.max_delay)
655 new_delay = dev_priv->ips.max_delay;
656 } else if (busy_down < min_avg) {
657 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
658 new_delay = dev_priv->ips.cur_delay + 1;
659 if (new_delay > dev_priv->ips.min_delay)
660 new_delay = dev_priv->ips.min_delay;
663 if (ironlake_set_drps(dev, new_delay))
664 dev_priv->ips.cur_delay = new_delay;
666 spin_unlock_irqrestore(&mchdev_lock, flags);
671 static void notify_ring(struct drm_device *dev,
672 struct intel_ring_buffer *ring)
674 struct drm_i915_private *dev_priv = dev->dev_private;
676 if (ring->obj == NULL)
679 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
681 wake_up_all(&ring->irq_queue);
682 if (i915_enable_hangcheck) {
683 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
684 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
688 static void gen6_pm_rps_work(struct work_struct *work)
690 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
695 spin_lock_irq(&dev_priv->rps.lock);
696 pm_iir = dev_priv->rps.pm_iir;
697 dev_priv->rps.pm_iir = 0;
698 pm_imr = I915_READ(GEN6_PMIMR);
699 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
700 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
701 spin_unlock_irq(&dev_priv->rps.lock);
703 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
706 mutex_lock(&dev_priv->rps.hw_lock);
708 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
709 new_delay = dev_priv->rps.cur_delay + 1;
712 * For better performance, jump directly
713 * to RPe if we're below it.
715 if (IS_VALLEYVIEW(dev_priv->dev) &&
716 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
717 new_delay = dev_priv->rps.rpe_delay;
719 new_delay = dev_priv->rps.cur_delay - 1;
721 /* sysfs frequency interfaces may have snuck in while servicing the
724 if (new_delay >= dev_priv->rps.min_delay &&
725 new_delay <= dev_priv->rps.max_delay) {
726 if (IS_VALLEYVIEW(dev_priv->dev))
727 valleyview_set_rps(dev_priv->dev, new_delay);
729 gen6_set_rps(dev_priv->dev, new_delay);
732 if (IS_VALLEYVIEW(dev_priv->dev)) {
734 * On VLV, when we enter RC6 we may not be at the minimum
735 * voltage level, so arm a timer to check. It should only
736 * fire when there's activity or once after we've entered
737 * RC6, and then won't be re-armed until the next RPS interrupt.
739 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
740 msecs_to_jiffies(100));
743 mutex_unlock(&dev_priv->rps.hw_lock);
748 * ivybridge_parity_work - Workqueue called when a parity error interrupt
750 * @work: workqueue struct
752 * Doesn't actually do anything except notify userspace. As a consequence of
753 * this event, userspace should try to remap the bad rows since statistically
754 * it is likely the same row is more likely to go bad again.
756 static void ivybridge_parity_work(struct work_struct *work)
758 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
759 l3_parity.error_work);
760 u32 error_status, row, bank, subbank;
761 char *parity_event[5];
765 /* We must turn off DOP level clock gating to access the L3 registers.
766 * In order to prevent a get/put style interface, acquire struct mutex
767 * any time we access those registers.
769 mutex_lock(&dev_priv->dev->struct_mutex);
771 misccpctl = I915_READ(GEN7_MISCCPCTL);
772 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
773 POSTING_READ(GEN7_MISCCPCTL);
775 error_status = I915_READ(GEN7_L3CDERRST1);
776 row = GEN7_PARITY_ERROR_ROW(error_status);
777 bank = GEN7_PARITY_ERROR_BANK(error_status);
778 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
780 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
781 GEN7_L3CDERRST1_ENABLE);
782 POSTING_READ(GEN7_L3CDERRST1);
784 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
786 spin_lock_irqsave(&dev_priv->irq_lock, flags);
787 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
788 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
789 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
791 mutex_unlock(&dev_priv->dev->struct_mutex);
793 parity_event[0] = "L3_PARITY_ERROR=1";
794 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
795 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
796 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
797 parity_event[4] = NULL;
799 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
800 KOBJ_CHANGE, parity_event);
802 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
805 kfree(parity_event[3]);
806 kfree(parity_event[2]);
807 kfree(parity_event[1]);
810 static void ivybridge_handle_parity_error(struct drm_device *dev)
812 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
815 if (!HAS_L3_GPU_CACHE(dev))
818 spin_lock_irqsave(&dev_priv->irq_lock, flags);
819 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
820 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
821 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
823 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
826 static void snb_gt_irq_handler(struct drm_device *dev,
827 struct drm_i915_private *dev_priv,
832 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
833 notify_ring(dev, &dev_priv->ring[RCS]);
834 if (gt_iir & GT_BSD_USER_INTERRUPT)
835 notify_ring(dev, &dev_priv->ring[VCS]);
836 if (gt_iir & GT_BLT_USER_INTERRUPT)
837 notify_ring(dev, &dev_priv->ring[BCS]);
839 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
840 GT_BSD_CS_ERROR_INTERRUPT |
841 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
842 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
843 i915_handle_error(dev, false);
846 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
847 ivybridge_handle_parity_error(dev);
850 /* Legacy way of handling PM interrupts */
851 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
857 * IIR bits should never already be set because IMR should
858 * prevent an interrupt from being shown in IIR. The warning
859 * displays a case where we've unsafely cleared
860 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
861 * type is not a problem, it displays a problem in the logic.
863 * The mask bit in IMR is cleared by dev_priv->rps.work.
866 spin_lock_irqsave(&dev_priv->rps.lock, flags);
867 dev_priv->rps.pm_iir |= pm_iir;
868 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
869 POSTING_READ(GEN6_PMIMR);
870 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
872 queue_work(dev_priv->wq, &dev_priv->rps.work);
875 #define HPD_STORM_DETECT_PERIOD 1000
876 #define HPD_STORM_THRESHOLD 5
878 static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
882 drm_i915_private_t *dev_priv = dev->dev_private;
883 unsigned long irqflags;
887 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
889 for (i = 1; i < HPD_NUM_PINS; i++) {
891 if (!(hpd[i] & hotplug_trigger) ||
892 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
895 dev_priv->hpd_event_bits |= (1 << i);
896 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
897 dev_priv->hpd_stats[i].hpd_last_jiffies
898 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
899 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
900 dev_priv->hpd_stats[i].hpd_cnt = 0;
901 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
902 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
903 dev_priv->hpd_event_bits &= ~(1 << i);
904 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
907 dev_priv->hpd_stats[i].hpd_cnt++;
911 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
916 static void gmbus_irq_handler(struct drm_device *dev)
918 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
920 wake_up_all(&dev_priv->gmbus_wait_queue);
923 static void dp_aux_irq_handler(struct drm_device *dev)
925 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
927 wake_up_all(&dev_priv->gmbus_wait_queue);
930 /* Unlike gen6_queue_rps_work() from which this function is originally derived,
931 * we must be able to deal with other PM interrupts. This is complicated because
932 * of the way in which we use the masks to defer the RPS work (which for
933 * posterity is necessary because of forcewake).
935 static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
940 spin_lock_irqsave(&dev_priv->rps.lock, flags);
941 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
942 if (dev_priv->rps.pm_iir) {
943 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
944 /* never want to mask useful interrupts. (also posting read) */
945 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
946 /* TODO: if queue_work is slow, move it out of the spinlock */
947 queue_work(dev_priv->wq, &dev_priv->rps.work);
949 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
951 if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
952 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
953 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
955 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
956 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
957 i915_handle_error(dev_priv->dev, false);
962 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
964 struct drm_device *dev = (struct drm_device *) arg;
965 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
966 u32 iir, gt_iir, pm_iir;
967 irqreturn_t ret = IRQ_NONE;
968 unsigned long irqflags;
970 u32 pipe_stats[I915_MAX_PIPES];
972 atomic_inc(&dev_priv->irq_received);
975 iir = I915_READ(VLV_IIR);
976 gt_iir = I915_READ(GTIIR);
977 pm_iir = I915_READ(GEN6_PMIIR);
979 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
984 snb_gt_irq_handler(dev, dev_priv, gt_iir);
986 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
987 for_each_pipe(pipe) {
988 int reg = PIPESTAT(pipe);
989 pipe_stats[pipe] = I915_READ(reg);
992 * Clear the PIPE*STAT regs before the IIR
994 if (pipe_stats[pipe] & 0x8000ffff) {
995 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
996 DRM_DEBUG_DRIVER("pipe %c underrun\n",
998 I915_WRITE(reg, pipe_stats[pipe]);
1001 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1003 for_each_pipe(pipe) {
1004 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1005 drm_handle_vblank(dev, pipe);
1007 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1008 intel_prepare_page_flip(dev, pipe);
1009 intel_finish_page_flip(dev, pipe);
1013 /* Consume port. Then clear IIR or we'll miss events */
1014 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1015 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1016 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1018 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1020 if (hotplug_trigger) {
1021 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
1022 i915_hpd_irq_setup(dev);
1023 queue_work(dev_priv->wq,
1024 &dev_priv->hotplug_work);
1026 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1027 I915_READ(PORT_HOTPLUG_STAT);
1030 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1031 gmbus_irq_handler(dev);
1033 if (pm_iir & GEN6_PM_RPS_EVENTS)
1034 gen6_queue_rps_work(dev_priv, pm_iir);
1036 I915_WRITE(GTIIR, gt_iir);
1037 I915_WRITE(GEN6_PMIIR, pm_iir);
1038 I915_WRITE(VLV_IIR, iir);
1045 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1047 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1049 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1051 if (hotplug_trigger) {
1052 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
1053 ibx_hpd_irq_setup(dev);
1054 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1056 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1057 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1058 SDE_AUDIO_POWER_SHIFT);
1059 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1063 if (pch_iir & SDE_AUX_MASK)
1064 dp_aux_irq_handler(dev);
1066 if (pch_iir & SDE_GMBUS)
1067 gmbus_irq_handler(dev);
1069 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1070 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1072 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1073 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1075 if (pch_iir & SDE_POISON)
1076 DRM_ERROR("PCH poison interrupt\n");
1078 if (pch_iir & SDE_FDI_MASK)
1080 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1082 I915_READ(FDI_RX_IIR(pipe)));
1084 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1085 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1087 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1088 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1090 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1091 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1093 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1095 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1096 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1098 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1101 static void ivb_err_int_handler(struct drm_device *dev)
1103 struct drm_i915_private *dev_priv = dev->dev_private;
1104 u32 err_int = I915_READ(GEN7_ERR_INT);
1106 if (err_int & ERR_INT_POISON)
1107 DRM_ERROR("Poison interrupt\n");
1109 if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1110 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1111 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1113 if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1114 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1115 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1117 if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1118 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1119 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1121 I915_WRITE(GEN7_ERR_INT, err_int);
1124 static void cpt_serr_int_handler(struct drm_device *dev)
1126 struct drm_i915_private *dev_priv = dev->dev_private;
1127 u32 serr_int = I915_READ(SERR_INT);
1129 if (serr_int & SERR_INT_POISON)
1130 DRM_ERROR("PCH poison interrupt\n");
1132 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1133 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1135 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1137 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1138 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1140 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1142 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1143 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1145 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1147 I915_WRITE(SERR_INT, serr_int);
1150 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1152 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1154 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1156 if (hotplug_trigger) {
1157 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
1158 ibx_hpd_irq_setup(dev);
1159 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1161 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1162 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1163 SDE_AUDIO_POWER_SHIFT_CPT);
1164 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1168 if (pch_iir & SDE_AUX_MASK_CPT)
1169 dp_aux_irq_handler(dev);
1171 if (pch_iir & SDE_GMBUS_CPT)
1172 gmbus_irq_handler(dev);
1174 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1175 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1177 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1178 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1180 if (pch_iir & SDE_FDI_MASK_CPT)
1182 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1184 I915_READ(FDI_RX_IIR(pipe)));
1186 if (pch_iir & SDE_ERROR_CPT)
1187 cpt_serr_int_handler(dev);
1190 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1192 struct drm_device *dev = (struct drm_device *) arg;
1193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1194 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
1195 irqreturn_t ret = IRQ_NONE;
1198 atomic_inc(&dev_priv->irq_received);
1200 /* We get interrupts on unclaimed registers, so check for this before we
1201 * do any I915_{READ,WRITE}. */
1202 if (IS_HASWELL(dev) &&
1203 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1204 DRM_ERROR("Unclaimed register before interrupt\n");
1205 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1208 /* disable master interrupt before clearing iir */
1209 de_ier = I915_READ(DEIER);
1210 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1212 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1213 * interrupts will will be stored on its back queue, and then we'll be
1214 * able to process them after we restore SDEIER (as soon as we restore
1215 * it, we'll get an interrupt if SDEIIR still has something to process
1216 * due to its back queue). */
1217 if (!HAS_PCH_NOP(dev)) {
1218 sde_ier = I915_READ(SDEIER);
1219 I915_WRITE(SDEIER, 0);
1220 POSTING_READ(SDEIER);
1223 /* On Haswell, also mask ERR_INT because we don't want to risk
1224 * generating "unclaimed register" interrupts from inside the interrupt
1226 if (IS_HASWELL(dev)) {
1227 spin_lock(&dev_priv->irq_lock);
1228 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1229 spin_unlock(&dev_priv->irq_lock);
1232 gt_iir = I915_READ(GTIIR);
1234 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1235 I915_WRITE(GTIIR, gt_iir);
1239 de_iir = I915_READ(DEIIR);
1241 if (de_iir & DE_ERR_INT_IVB)
1242 ivb_err_int_handler(dev);
1244 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1245 dp_aux_irq_handler(dev);
1247 if (de_iir & DE_GSE_IVB)
1248 intel_opregion_asle_intr(dev);
1250 for (i = 0; i < 3; i++) {
1251 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1252 drm_handle_vblank(dev, i);
1253 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1254 intel_prepare_page_flip(dev, i);
1255 intel_finish_page_flip_plane(dev, i);
1259 /* check event from PCH */
1260 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1261 u32 pch_iir = I915_READ(SDEIIR);
1263 cpt_irq_handler(dev, pch_iir);
1265 /* clear PCH hotplug event before clear CPU irq */
1266 I915_WRITE(SDEIIR, pch_iir);
1269 I915_WRITE(DEIIR, de_iir);
1273 pm_iir = I915_READ(GEN6_PMIIR);
1275 if (IS_HASWELL(dev))
1276 hsw_pm_irq_handler(dev_priv, pm_iir);
1277 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1278 gen6_queue_rps_work(dev_priv, pm_iir);
1279 I915_WRITE(GEN6_PMIIR, pm_iir);
1283 if (IS_HASWELL(dev)) {
1284 spin_lock(&dev_priv->irq_lock);
1285 if (ivb_can_enable_err_int(dev))
1286 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1287 spin_unlock(&dev_priv->irq_lock);
1290 I915_WRITE(DEIER, de_ier);
1291 POSTING_READ(DEIER);
1292 if (!HAS_PCH_NOP(dev)) {
1293 I915_WRITE(SDEIER, sde_ier);
1294 POSTING_READ(SDEIER);
1300 static void ilk_gt_irq_handler(struct drm_device *dev,
1301 struct drm_i915_private *dev_priv,
1305 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1306 notify_ring(dev, &dev_priv->ring[RCS]);
1307 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1308 notify_ring(dev, &dev_priv->ring[VCS]);
1311 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1313 struct drm_device *dev = (struct drm_device *) arg;
1314 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1316 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
1318 atomic_inc(&dev_priv->irq_received);
1320 /* disable master interrupt before clearing iir */
1321 de_ier = I915_READ(DEIER);
1322 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1323 POSTING_READ(DEIER);
1325 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1326 * interrupts will will be stored on its back queue, and then we'll be
1327 * able to process them after we restore SDEIER (as soon as we restore
1328 * it, we'll get an interrupt if SDEIIR still has something to process
1329 * due to its back queue). */
1330 sde_ier = I915_READ(SDEIER);
1331 I915_WRITE(SDEIER, 0);
1332 POSTING_READ(SDEIER);
1334 de_iir = I915_READ(DEIIR);
1335 gt_iir = I915_READ(GTIIR);
1336 pm_iir = I915_READ(GEN6_PMIIR);
1338 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
1344 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1346 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1348 if (de_iir & DE_AUX_CHANNEL_A)
1349 dp_aux_irq_handler(dev);
1351 if (de_iir & DE_GSE)
1352 intel_opregion_asle_intr(dev);
1354 if (de_iir & DE_PIPEA_VBLANK)
1355 drm_handle_vblank(dev, 0);
1357 if (de_iir & DE_PIPEB_VBLANK)
1358 drm_handle_vblank(dev, 1);
1360 if (de_iir & DE_POISON)
1361 DRM_ERROR("Poison interrupt\n");
1363 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1364 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1365 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1367 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1368 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1369 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1371 if (de_iir & DE_PLANEA_FLIP_DONE) {
1372 intel_prepare_page_flip(dev, 0);
1373 intel_finish_page_flip_plane(dev, 0);
1376 if (de_iir & DE_PLANEB_FLIP_DONE) {
1377 intel_prepare_page_flip(dev, 1);
1378 intel_finish_page_flip_plane(dev, 1);
1381 /* check event from PCH */
1382 if (de_iir & DE_PCH_EVENT) {
1383 u32 pch_iir = I915_READ(SDEIIR);
1385 if (HAS_PCH_CPT(dev))
1386 cpt_irq_handler(dev, pch_iir);
1388 ibx_irq_handler(dev, pch_iir);
1390 /* should clear PCH hotplug event before clear CPU irq */
1391 I915_WRITE(SDEIIR, pch_iir);
1394 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1395 ironlake_handle_rps_change(dev);
1397 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1398 gen6_queue_rps_work(dev_priv, pm_iir);
1400 I915_WRITE(GTIIR, gt_iir);
1401 I915_WRITE(DEIIR, de_iir);
1402 I915_WRITE(GEN6_PMIIR, pm_iir);
1405 I915_WRITE(DEIER, de_ier);
1406 POSTING_READ(DEIER);
1407 I915_WRITE(SDEIER, sde_ier);
1408 POSTING_READ(SDEIER);
1414 * i915_error_work_func - do process context error handling work
1415 * @work: work struct
1417 * Fire an error uevent so userspace can see that a hang or error
1420 static void i915_error_work_func(struct work_struct *work)
1422 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1424 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1426 struct drm_device *dev = dev_priv->dev;
1427 struct intel_ring_buffer *ring;
1428 char *error_event[] = { "ERROR=1", NULL };
1429 char *reset_event[] = { "RESET=1", NULL };
1430 char *reset_done_event[] = { "ERROR=0", NULL };
1433 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1436 * Note that there's only one work item which does gpu resets, so we
1437 * need not worry about concurrent gpu resets potentially incrementing
1438 * error->reset_counter twice. We only need to take care of another
1439 * racing irq/hangcheck declaring the gpu dead for a second time. A
1440 * quick check for that is good enough: schedule_work ensures the
1441 * correct ordering between hang detection and this work item, and since
1442 * the reset in-progress bit is only ever set by code outside of this
1443 * work we don't need to worry about any other races.
1445 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1446 DRM_DEBUG_DRIVER("resetting chip\n");
1447 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1450 ret = i915_reset(dev);
1454 * After all the gem state is reset, increment the reset
1455 * counter and wake up everyone waiting for the reset to
1458 * Since unlock operations are a one-sided barrier only,
1459 * we need to insert a barrier here to order any seqno
1461 * the counter increment.
1463 smp_mb__before_atomic_inc();
1464 atomic_inc(&dev_priv->gpu_error.reset_counter);
1466 kobject_uevent_env(&dev->primary->kdev.kobj,
1467 KOBJ_CHANGE, reset_done_event);
1469 atomic_set(&error->reset_counter, I915_WEDGED);
1472 for_each_ring(ring, dev_priv, i)
1473 wake_up_all(&ring->irq_queue);
1475 intel_display_handle_reset(dev);
1477 wake_up_all(&dev_priv->gpu_error.reset_queue);
1481 /* NB: please notice the memset */
1482 static void i915_get_extra_instdone(struct drm_device *dev,
1485 struct drm_i915_private *dev_priv = dev->dev_private;
1486 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1488 switch(INTEL_INFO(dev)->gen) {
1491 instdone[0] = I915_READ(INSTDONE);
1496 instdone[0] = I915_READ(INSTDONE_I965);
1497 instdone[1] = I915_READ(INSTDONE1);
1500 WARN_ONCE(1, "Unsupported platform\n");
1502 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1503 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1504 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1505 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1510 #ifdef CONFIG_DEBUG_FS
1511 static struct drm_i915_error_object *
1512 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1513 struct drm_i915_gem_object *src,
1514 const int num_pages)
1516 struct drm_i915_error_object *dst;
1520 if (src == NULL || src->pages == NULL)
1523 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1527 reloc_offset = src->gtt_offset;
1528 for (i = 0; i < num_pages; i++) {
1529 unsigned long flags;
1532 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1536 local_irq_save(flags);
1537 if (reloc_offset < dev_priv->gtt.mappable_end &&
1538 src->has_global_gtt_mapping) {
1541 /* Simply ignore tiling or any overlapping fence.
1542 * It's part of the error state, and this hopefully
1543 * captures what the GPU read.
1546 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1548 memcpy_fromio(d, s, PAGE_SIZE);
1549 io_mapping_unmap_atomic(s);
1550 } else if (src->stolen) {
1551 unsigned long offset;
1553 offset = dev_priv->mm.stolen_base;
1554 offset += src->stolen->start;
1555 offset += i << PAGE_SHIFT;
1557 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1562 page = i915_gem_object_get_page(src, i);
1564 drm_clflush_pages(&page, 1);
1566 s = kmap_atomic(page);
1567 memcpy(d, s, PAGE_SIZE);
1570 drm_clflush_pages(&page, 1);
1572 local_irq_restore(flags);
1576 reloc_offset += PAGE_SIZE;
1578 dst->page_count = num_pages;
1579 dst->gtt_offset = src->gtt_offset;
1585 kfree(dst->pages[i]);
1589 #define i915_error_object_create(dev_priv, src) \
1590 i915_error_object_create_sized((dev_priv), (src), \
1591 (src)->base.size>>PAGE_SHIFT)
1594 i915_error_object_free(struct drm_i915_error_object *obj)
1601 for (page = 0; page < obj->page_count; page++)
1602 kfree(obj->pages[page]);
1608 i915_error_state_free(struct kref *error_ref)
1610 struct drm_i915_error_state *error = container_of(error_ref,
1611 typeof(*error), ref);
1614 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1615 i915_error_object_free(error->ring[i].batchbuffer);
1616 i915_error_object_free(error->ring[i].ringbuffer);
1617 i915_error_object_free(error->ring[i].ctx);
1618 kfree(error->ring[i].requests);
1621 kfree(error->active_bo);
1622 kfree(error->overlay);
1623 kfree(error->display);
1626 static void capture_bo(struct drm_i915_error_buffer *err,
1627 struct drm_i915_gem_object *obj)
1629 err->size = obj->base.size;
1630 err->name = obj->base.name;
1631 err->rseqno = obj->last_read_seqno;
1632 err->wseqno = obj->last_write_seqno;
1633 err->gtt_offset = obj->gtt_offset;
1634 err->read_domains = obj->base.read_domains;
1635 err->write_domain = obj->base.write_domain;
1636 err->fence_reg = obj->fence_reg;
1638 if (obj->pin_count > 0)
1640 if (obj->user_pin_count > 0)
1642 err->tiling = obj->tiling_mode;
1643 err->dirty = obj->dirty;
1644 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1645 err->ring = obj->ring ? obj->ring->id : -1;
1646 err->cache_level = obj->cache_level;
1649 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1650 int count, struct list_head *head)
1652 struct drm_i915_gem_object *obj;
1655 list_for_each_entry(obj, head, mm_list) {
1656 capture_bo(err++, obj);
1664 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1665 int count, struct list_head *head)
1667 struct drm_i915_gem_object *obj;
1670 list_for_each_entry(obj, head, global_list) {
1671 if (obj->pin_count == 0)
1674 capture_bo(err++, obj);
1682 static void i915_gem_record_fences(struct drm_device *dev,
1683 struct drm_i915_error_state *error)
1685 struct drm_i915_private *dev_priv = dev->dev_private;
1689 switch (INTEL_INFO(dev)->gen) {
1692 for (i = 0; i < dev_priv->num_fence_regs; i++)
1693 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1697 for (i = 0; i < 16; i++)
1698 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1701 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1702 for (i = 0; i < 8; i++)
1703 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1705 for (i = 0; i < 8; i++)
1706 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1714 static struct drm_i915_error_object *
1715 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1716 struct intel_ring_buffer *ring)
1718 struct drm_i915_gem_object *obj;
1721 if (!ring->get_seqno)
1724 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1725 u32 acthd = I915_READ(ACTHD);
1727 if (WARN_ON(ring->id != RCS))
1730 obj = ring->private;
1731 if (acthd >= obj->gtt_offset &&
1732 acthd < obj->gtt_offset + obj->base.size)
1733 return i915_error_object_create(dev_priv, obj);
1736 seqno = ring->get_seqno(ring, false);
1737 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1738 if (obj->ring != ring)
1741 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1744 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1747 /* We need to copy these to an anonymous buffer as the simplest
1748 * method to avoid being overwritten by userspace.
1750 return i915_error_object_create(dev_priv, obj);
1756 static void i915_record_ring_state(struct drm_device *dev,
1757 struct drm_i915_error_state *error,
1758 struct intel_ring_buffer *ring)
1760 struct drm_i915_private *dev_priv = dev->dev_private;
1762 if (INTEL_INFO(dev)->gen >= 6) {
1763 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1764 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1765 error->semaphore_mboxes[ring->id][0]
1766 = I915_READ(RING_SYNC_0(ring->mmio_base));
1767 error->semaphore_mboxes[ring->id][1]
1768 = I915_READ(RING_SYNC_1(ring->mmio_base));
1769 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1770 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1773 if (INTEL_INFO(dev)->gen >= 4) {
1774 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1775 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1776 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1777 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1778 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1779 if (ring->id == RCS)
1780 error->bbaddr = I915_READ64(BB_ADDR);
1782 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1783 error->ipeir[ring->id] = I915_READ(IPEIR);
1784 error->ipehr[ring->id] = I915_READ(IPEHR);
1785 error->instdone[ring->id] = I915_READ(INSTDONE);
1788 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1789 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1790 error->seqno[ring->id] = ring->get_seqno(ring, false);
1791 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1792 error->head[ring->id] = I915_READ_HEAD(ring);
1793 error->tail[ring->id] = I915_READ_TAIL(ring);
1794 error->ctl[ring->id] = I915_READ_CTL(ring);
1796 error->cpu_ring_head[ring->id] = ring->head;
1797 error->cpu_ring_tail[ring->id] = ring->tail;
1801 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1802 struct drm_i915_error_state *error,
1803 struct drm_i915_error_ring *ering)
1805 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1806 struct drm_i915_gem_object *obj;
1808 /* Currently render ring is the only HW context user */
1809 if (ring->id != RCS || !error->ccid)
1812 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1813 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1814 ering->ctx = i915_error_object_create_sized(dev_priv,
1820 static void i915_gem_record_rings(struct drm_device *dev,
1821 struct drm_i915_error_state *error)
1823 struct drm_i915_private *dev_priv = dev->dev_private;
1824 struct intel_ring_buffer *ring;
1825 struct drm_i915_gem_request *request;
1828 for_each_ring(ring, dev_priv, i) {
1829 i915_record_ring_state(dev, error, ring);
1831 error->ring[i].batchbuffer =
1832 i915_error_first_batchbuffer(dev_priv, ring);
1834 error->ring[i].ringbuffer =
1835 i915_error_object_create(dev_priv, ring->obj);
1838 i915_gem_record_active_context(ring, error, &error->ring[i]);
1841 list_for_each_entry(request, &ring->request_list, list)
1844 error->ring[i].num_requests = count;
1845 error->ring[i].requests =
1846 kmalloc(count*sizeof(struct drm_i915_error_request),
1848 if (error->ring[i].requests == NULL) {
1849 error->ring[i].num_requests = 0;
1854 list_for_each_entry(request, &ring->request_list, list) {
1855 struct drm_i915_error_request *erq;
1857 erq = &error->ring[i].requests[count++];
1858 erq->seqno = request->seqno;
1859 erq->jiffies = request->emitted_jiffies;
1860 erq->tail = request->tail;
1866 * i915_capture_error_state - capture an error record for later analysis
1869 * Should be called when an error is detected (either a hang or an error
1870 * interrupt) to capture error state from the time of the error. Fills
1871 * out a structure which becomes available in debugfs for user level tools
1874 static void i915_capture_error_state(struct drm_device *dev)
1876 struct drm_i915_private *dev_priv = dev->dev_private;
1877 struct drm_i915_gem_object *obj;
1878 struct drm_i915_error_state *error;
1879 unsigned long flags;
1882 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1883 error = dev_priv->gpu_error.first_error;
1884 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1888 /* Account for pipe specific data like PIPE*STAT */
1889 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1891 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1895 DRM_INFO("capturing error event; look for more information in "
1896 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1897 dev->primary->index);
1899 kref_init(&error->ref);
1900 error->eir = I915_READ(EIR);
1901 error->pgtbl_er = I915_READ(PGTBL_ER);
1902 if (HAS_HW_CONTEXTS(dev))
1903 error->ccid = I915_READ(CCID);
1905 if (HAS_PCH_SPLIT(dev))
1906 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1907 else if (IS_VALLEYVIEW(dev))
1908 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1909 else if (IS_GEN2(dev))
1910 error->ier = I915_READ16(IER);
1912 error->ier = I915_READ(IER);
1914 if (INTEL_INFO(dev)->gen >= 6)
1915 error->derrmr = I915_READ(DERRMR);
1917 if (IS_VALLEYVIEW(dev))
1918 error->forcewake = I915_READ(FORCEWAKE_VLV);
1919 else if (INTEL_INFO(dev)->gen >= 7)
1920 error->forcewake = I915_READ(FORCEWAKE_MT);
1921 else if (INTEL_INFO(dev)->gen == 6)
1922 error->forcewake = I915_READ(FORCEWAKE);
1924 if (!HAS_PCH_SPLIT(dev))
1926 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1928 if (INTEL_INFO(dev)->gen >= 6) {
1929 error->error = I915_READ(ERROR_GEN6);
1930 error->done_reg = I915_READ(DONE_REG);
1933 if (INTEL_INFO(dev)->gen == 7)
1934 error->err_int = I915_READ(GEN7_ERR_INT);
1936 i915_get_extra_instdone(dev, error->extra_instdone);
1938 i915_gem_record_fences(dev, error);
1939 i915_gem_record_rings(dev, error);
1941 /* Record buffers on the active and pinned lists. */
1942 error->active_bo = NULL;
1943 error->pinned_bo = NULL;
1946 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1948 error->active_bo_count = i;
1949 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1952 error->pinned_bo_count = i - error->active_bo_count;
1954 error->active_bo = NULL;
1955 error->pinned_bo = NULL;
1957 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1959 if (error->active_bo)
1961 error->active_bo + error->active_bo_count;
1964 if (error->active_bo)
1965 error->active_bo_count =
1966 capture_active_bo(error->active_bo,
1967 error->active_bo_count,
1968 &dev_priv->mm.active_list);
1970 if (error->pinned_bo)
1971 error->pinned_bo_count =
1972 capture_pinned_bo(error->pinned_bo,
1973 error->pinned_bo_count,
1974 &dev_priv->mm.bound_list);
1976 do_gettimeofday(&error->time);
1978 error->overlay = intel_overlay_capture_error_state(dev);
1979 error->display = intel_display_capture_error_state(dev);
1981 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1982 if (dev_priv->gpu_error.first_error == NULL) {
1983 dev_priv->gpu_error.first_error = error;
1986 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1989 i915_error_state_free(&error->ref);
1992 void i915_destroy_error_state(struct drm_device *dev)
1994 struct drm_i915_private *dev_priv = dev->dev_private;
1995 struct drm_i915_error_state *error;
1996 unsigned long flags;
1998 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1999 error = dev_priv->gpu_error.first_error;
2000 dev_priv->gpu_error.first_error = NULL;
2001 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
2004 kref_put(&error->ref, i915_error_state_free);
2007 #define i915_capture_error_state(x)
2010 static void i915_report_and_clear_eir(struct drm_device *dev)
2012 struct drm_i915_private *dev_priv = dev->dev_private;
2013 uint32_t instdone[I915_NUM_INSTDONE_REG];
2014 u32 eir = I915_READ(EIR);
2020 pr_err("render error detected, EIR: 0x%08x\n", eir);
2022 i915_get_extra_instdone(dev, instdone);
2025 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2026 u32 ipeir = I915_READ(IPEIR_I965);
2028 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2029 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2030 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2031 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2032 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2033 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2034 I915_WRITE(IPEIR_I965, ipeir);
2035 POSTING_READ(IPEIR_I965);
2037 if (eir & GM45_ERROR_PAGE_TABLE) {
2038 u32 pgtbl_err = I915_READ(PGTBL_ER);
2039 pr_err("page table error\n");
2040 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2041 I915_WRITE(PGTBL_ER, pgtbl_err);
2042 POSTING_READ(PGTBL_ER);
2046 if (!IS_GEN2(dev)) {
2047 if (eir & I915_ERROR_PAGE_TABLE) {
2048 u32 pgtbl_err = I915_READ(PGTBL_ER);
2049 pr_err("page table error\n");
2050 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2051 I915_WRITE(PGTBL_ER, pgtbl_err);
2052 POSTING_READ(PGTBL_ER);
2056 if (eir & I915_ERROR_MEMORY_REFRESH) {
2057 pr_err("memory refresh error:\n");
2059 pr_err("pipe %c stat: 0x%08x\n",
2060 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2061 /* pipestat has already been acked */
2063 if (eir & I915_ERROR_INSTRUCTION) {
2064 pr_err("instruction error\n");
2065 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2066 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2067 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2068 if (INTEL_INFO(dev)->gen < 4) {
2069 u32 ipeir = I915_READ(IPEIR);
2071 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2072 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2073 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2074 I915_WRITE(IPEIR, ipeir);
2075 POSTING_READ(IPEIR);
2077 u32 ipeir = I915_READ(IPEIR_I965);
2079 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2080 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2081 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2082 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2083 I915_WRITE(IPEIR_I965, ipeir);
2084 POSTING_READ(IPEIR_I965);
2088 I915_WRITE(EIR, eir);
2090 eir = I915_READ(EIR);
2093 * some errors might have become stuck,
2096 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2097 I915_WRITE(EMR, I915_READ(EMR) | eir);
2098 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2103 * i915_handle_error - handle an error interrupt
2106 * Do some basic checking of regsiter state at error interrupt time and
2107 * dump it to the syslog. Also call i915_capture_error_state() to make
2108 * sure we get a record and make it available in debugfs. Fire a uevent
2109 * so userspace knows something bad happened (should trigger collection
2110 * of a ring dump etc.).
2112 void i915_handle_error(struct drm_device *dev, bool wedged)
2114 struct drm_i915_private *dev_priv = dev->dev_private;
2115 struct intel_ring_buffer *ring;
2118 i915_capture_error_state(dev);
2119 i915_report_and_clear_eir(dev);
2122 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2123 &dev_priv->gpu_error.reset_counter);
2126 * Wakeup waiting processes so that the reset work item
2127 * doesn't deadlock trying to grab various locks.
2129 for_each_ring(ring, dev_priv, i)
2130 wake_up_all(&ring->irq_queue);
2133 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
2136 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2138 drm_i915_private_t *dev_priv = dev->dev_private;
2139 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2140 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2141 struct drm_i915_gem_object *obj;
2142 struct intel_unpin_work *work;
2143 unsigned long flags;
2144 bool stall_detected;
2146 /* Ignore early vblank irqs */
2147 if (intel_crtc == NULL)
2150 spin_lock_irqsave(&dev->event_lock, flags);
2151 work = intel_crtc->unpin_work;
2154 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2155 !work->enable_stall_check) {
2156 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2157 spin_unlock_irqrestore(&dev->event_lock, flags);
2161 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2162 obj = work->pending_flip_obj;
2163 if (INTEL_INFO(dev)->gen >= 4) {
2164 int dspsurf = DSPSURF(intel_crtc->plane);
2165 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2168 int dspaddr = DSPADDR(intel_crtc->plane);
2169 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
2170 crtc->y * crtc->fb->pitches[0] +
2171 crtc->x * crtc->fb->bits_per_pixel/8);
2174 spin_unlock_irqrestore(&dev->event_lock, flags);
2176 if (stall_detected) {
2177 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2178 intel_prepare_page_flip(dev, intel_crtc->plane);
2182 /* Called from drm generic code, passed 'crtc' which
2183 * we use as a pipe index
2185 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2187 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2188 unsigned long irqflags;
2190 if (!i915_pipe_enabled(dev, pipe))
2193 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2194 if (INTEL_INFO(dev)->gen >= 4)
2195 i915_enable_pipestat(dev_priv, pipe,
2196 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2198 i915_enable_pipestat(dev_priv, pipe,
2199 PIPE_VBLANK_INTERRUPT_ENABLE);
2201 /* maintain vblank delivery even in deep C-states */
2202 if (dev_priv->info->gen == 3)
2203 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2204 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2209 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2211 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2212 unsigned long irqflags;
2214 if (!i915_pipe_enabled(dev, pipe))
2217 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2218 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
2219 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2220 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2225 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
2227 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2228 unsigned long irqflags;
2230 if (!i915_pipe_enabled(dev, pipe))
2233 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2234 ironlake_enable_display_irq(dev_priv,
2235 DE_PIPEA_VBLANK_IVB << (5 * pipe));
2236 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2241 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2243 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2244 unsigned long irqflags;
2247 if (!i915_pipe_enabled(dev, pipe))
2250 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2251 imr = I915_READ(VLV_IMR);
2253 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2255 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2256 I915_WRITE(VLV_IMR, imr);
2257 i915_enable_pipestat(dev_priv, pipe,
2258 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2259 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2264 /* Called from drm generic code, passed 'crtc' which
2265 * we use as a pipe index
2267 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2269 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2270 unsigned long irqflags;
2272 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2273 if (dev_priv->info->gen == 3)
2274 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2276 i915_disable_pipestat(dev_priv, pipe,
2277 PIPE_VBLANK_INTERRUPT_ENABLE |
2278 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2279 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2282 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2284 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2285 unsigned long irqflags;
2287 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2288 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
2289 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2290 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2293 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
2295 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2296 unsigned long irqflags;
2298 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2299 ironlake_disable_display_irq(dev_priv,
2300 DE_PIPEA_VBLANK_IVB << (pipe * 5));
2301 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2304 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2306 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2307 unsigned long irqflags;
2310 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2311 i915_disable_pipestat(dev_priv, pipe,
2312 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2313 imr = I915_READ(VLV_IMR);
2315 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2317 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2318 I915_WRITE(VLV_IMR, imr);
2319 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2323 ring_last_seqno(struct intel_ring_buffer *ring)
2325 return list_entry(ring->request_list.prev,
2326 struct drm_i915_gem_request, list)->seqno;
2330 ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2332 return (list_empty(&ring->request_list) ||
2333 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2336 static struct intel_ring_buffer *
2337 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2339 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2340 u32 cmd, ipehr, acthd, acthd_min;
2342 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2343 if ((ipehr & ~(0x3 << 16)) !=
2344 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2347 /* ACTHD is likely pointing to the dword after the actual command,
2348 * so scan backwards until we find the MBOX.
2350 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2351 acthd_min = max((int)acthd - 3 * 4, 0);
2353 cmd = ioread32(ring->virtual_start + acthd);
2358 if (acthd < acthd_min)
2362 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2363 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2366 static int semaphore_passed(struct intel_ring_buffer *ring)
2368 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2369 struct intel_ring_buffer *signaller;
2372 ring->hangcheck.deadlock = true;
2374 signaller = semaphore_waits_for(ring, &seqno);
2375 if (signaller == NULL || signaller->hangcheck.deadlock)
2378 /* cursory check for an unkickable deadlock */
2379 ctl = I915_READ_CTL(signaller);
2380 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2383 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2386 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2388 struct intel_ring_buffer *ring;
2391 for_each_ring(ring, dev_priv, i)
2392 ring->hangcheck.deadlock = false;
2395 static enum intel_ring_hangcheck_action
2396 ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2398 struct drm_device *dev = ring->dev;
2399 struct drm_i915_private *dev_priv = dev->dev_private;
2402 if (ring->hangcheck.acthd != acthd)
2408 /* Is the chip hanging on a WAIT_FOR_EVENT?
2409 * If so we can simply poke the RB_WAIT bit
2410 * and break the hang. This should work on
2411 * all but the second generation chipsets.
2413 tmp = I915_READ_CTL(ring);
2414 if (tmp & RING_WAIT) {
2415 DRM_ERROR("Kicking stuck wait on %s\n",
2417 I915_WRITE_CTL(ring, tmp);
2421 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2422 switch (semaphore_passed(ring)) {
2426 DRM_ERROR("Kicking stuck semaphore on %s\n",
2428 I915_WRITE_CTL(ring, tmp);
2439 * This is called when the chip hasn't reported back with completed
2440 * batchbuffers in a long time. We keep track per ring seqno progress and
2441 * if there are no progress, hangcheck score for that ring is increased.
2442 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2443 * we kick the ring. If we see no progress on three subsequent calls
2444 * we assume chip is wedged and try to fix it by resetting the chip.
2446 void i915_hangcheck_elapsed(unsigned long data)
2448 struct drm_device *dev = (struct drm_device *)data;
2449 drm_i915_private_t *dev_priv = dev->dev_private;
2450 struct intel_ring_buffer *ring;
2452 int busy_count = 0, rings_hung = 0;
2453 bool stuck[I915_NUM_RINGS] = { 0 };
2459 if (!i915_enable_hangcheck)
2462 for_each_ring(ring, dev_priv, i) {
2466 semaphore_clear_deadlocks(dev_priv);
2468 seqno = ring->get_seqno(ring, false);
2469 acthd = intel_ring_get_active_head(ring);
2471 if (ring->hangcheck.seqno == seqno) {
2472 if (ring_idle(ring, seqno)) {
2473 if (waitqueue_active(&ring->irq_queue)) {
2474 /* Issue a wake-up to catch stuck h/w. */
2475 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2477 wake_up_all(&ring->irq_queue);
2478 ring->hangcheck.score += HUNG;
2484 /* We always increment the hangcheck score
2485 * if the ring is busy and still processing
2486 * the same request, so that no single request
2487 * can run indefinitely (such as a chain of
2488 * batches). The only time we do not increment
2489 * the hangcheck score on this ring, if this
2490 * ring is in a legitimate wait for another
2491 * ring. In that case the waiting ring is a
2492 * victim and we want to be sure we catch the
2493 * right culprit. Then every time we do kick
2494 * the ring, add a small increment to the
2495 * score so that we can catch a batch that is
2496 * being repeatedly kicked and so responsible
2497 * for stalling the machine.
2499 ring->hangcheck.action = ring_stuck(ring,
2502 switch (ring->hangcheck.action) {
2517 ring->hangcheck.score += score;
2520 /* Gradually reduce the count so that we catch DoS
2521 * attempts across multiple batches.
2523 if (ring->hangcheck.score > 0)
2524 ring->hangcheck.score--;
2527 ring->hangcheck.seqno = seqno;
2528 ring->hangcheck.acthd = acthd;
2532 for_each_ring(ring, dev_priv, i) {
2533 if (ring->hangcheck.score > FIRE) {
2534 DRM_ERROR("%s on %s\n",
2535 stuck[i] ? "stuck" : "no progress",
2542 return i915_handle_error(dev, true);
2545 /* Reset timer case chip hangs without another request
2547 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2548 round_jiffies_up(jiffies +
2549 DRM_I915_HANGCHECK_JIFFIES));
2552 static void ibx_irq_preinstall(struct drm_device *dev)
2554 struct drm_i915_private *dev_priv = dev->dev_private;
2556 if (HAS_PCH_NOP(dev))
2559 /* south display irq */
2560 I915_WRITE(SDEIMR, 0xffffffff);
2562 * SDEIER is also touched by the interrupt handler to work around missed
2563 * PCH interrupts. Hence we can't update it after the interrupt handler
2564 * is enabled - instead we unconditionally enable all PCH interrupt
2565 * sources here, but then only unmask them as needed with SDEIMR.
2567 I915_WRITE(SDEIER, 0xffffffff);
2568 POSTING_READ(SDEIER);
2573 static void ironlake_irq_preinstall(struct drm_device *dev)
2575 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2577 atomic_set(&dev_priv->irq_received, 0);
2579 I915_WRITE(HWSTAM, 0xeffe);
2581 /* XXX hotplug from PCH */
2583 I915_WRITE(DEIMR, 0xffffffff);
2584 I915_WRITE(DEIER, 0x0);
2585 POSTING_READ(DEIER);
2588 I915_WRITE(GTIMR, 0xffffffff);
2589 I915_WRITE(GTIER, 0x0);
2590 POSTING_READ(GTIER);
2592 ibx_irq_preinstall(dev);
2595 static void ivybridge_irq_preinstall(struct drm_device *dev)
2597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2599 atomic_set(&dev_priv->irq_received, 0);
2601 I915_WRITE(HWSTAM, 0xeffe);
2603 /* XXX hotplug from PCH */
2605 I915_WRITE(DEIMR, 0xffffffff);
2606 I915_WRITE(DEIER, 0x0);
2607 POSTING_READ(DEIER);
2610 I915_WRITE(GTIMR, 0xffffffff);
2611 I915_WRITE(GTIER, 0x0);
2612 POSTING_READ(GTIER);
2614 /* Power management */
2615 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2616 I915_WRITE(GEN6_PMIER, 0x0);
2617 POSTING_READ(GEN6_PMIER);
2619 ibx_irq_preinstall(dev);
2622 static void valleyview_irq_preinstall(struct drm_device *dev)
2624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2627 atomic_set(&dev_priv->irq_received, 0);
2630 I915_WRITE(VLV_IMR, 0);
2631 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2632 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2633 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2636 I915_WRITE(GTIIR, I915_READ(GTIIR));
2637 I915_WRITE(GTIIR, I915_READ(GTIIR));
2638 I915_WRITE(GTIMR, 0xffffffff);
2639 I915_WRITE(GTIER, 0x0);
2640 POSTING_READ(GTIER);
2642 I915_WRITE(DPINVGTT, 0xff);
2644 I915_WRITE(PORT_HOTPLUG_EN, 0);
2645 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2647 I915_WRITE(PIPESTAT(pipe), 0xffff);
2648 I915_WRITE(VLV_IIR, 0xffffffff);
2649 I915_WRITE(VLV_IMR, 0xffffffff);
2650 I915_WRITE(VLV_IER, 0x0);
2651 POSTING_READ(VLV_IER);
2654 static void ibx_hpd_irq_setup(struct drm_device *dev)
2656 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2657 struct drm_mode_config *mode_config = &dev->mode_config;
2658 struct intel_encoder *intel_encoder;
2659 u32 mask = ~I915_READ(SDEIMR);
2662 if (HAS_PCH_IBX(dev)) {
2663 mask &= ~SDE_HOTPLUG_MASK;
2664 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2665 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2666 mask |= hpd_ibx[intel_encoder->hpd_pin];
2668 mask &= ~SDE_HOTPLUG_MASK_CPT;
2669 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2670 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2671 mask |= hpd_cpt[intel_encoder->hpd_pin];
2674 I915_WRITE(SDEIMR, ~mask);
2677 * Enable digital hotplug on the PCH, and configure the DP short pulse
2678 * duration to 2ms (which is the minimum in the Display Port spec)
2680 * This register is the same on all known PCH chips.
2682 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2683 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2684 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2685 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2686 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2687 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2690 static void ibx_irq_postinstall(struct drm_device *dev)
2692 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2695 if (HAS_PCH_NOP(dev))
2698 if (HAS_PCH_IBX(dev)) {
2699 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2700 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2702 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2704 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2707 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2708 I915_WRITE(SDEIMR, ~mask);
2711 static int ironlake_irq_postinstall(struct drm_device *dev)
2713 unsigned long irqflags;
2715 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2716 /* enable kind of interrupts always enabled */
2717 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2718 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2719 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2720 DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
2723 dev_priv->irq_mask = ~display_mask;
2725 /* should always can generate irq */
2726 I915_WRITE(DEIIR, I915_READ(DEIIR));
2727 I915_WRITE(DEIMR, dev_priv->irq_mask);
2728 I915_WRITE(DEIER, display_mask |
2729 DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
2730 POSTING_READ(DEIER);
2732 dev_priv->gt_irq_mask = ~0;
2734 I915_WRITE(GTIIR, I915_READ(GTIIR));
2735 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2737 gt_irqs = GT_RENDER_USER_INTERRUPT;
2740 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2742 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2743 ILK_BSD_USER_INTERRUPT;
2745 I915_WRITE(GTIER, gt_irqs);
2746 POSTING_READ(GTIER);
2748 ibx_irq_postinstall(dev);
2750 if (IS_IRONLAKE_M(dev)) {
2751 /* Enable PCU event interrupts
2753 * spinlocking not required here for correctness since interrupt
2754 * setup is guaranteed to run in single-threaded context. But we
2755 * need it to make the assert_spin_locked happy. */
2756 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2757 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2758 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2764 static int ivybridge_irq_postinstall(struct drm_device *dev)
2766 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2767 /* enable kind of interrupts always enabled */
2769 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2770 DE_PLANEC_FLIP_DONE_IVB |
2771 DE_PLANEB_FLIP_DONE_IVB |
2772 DE_PLANEA_FLIP_DONE_IVB |
2773 DE_AUX_CHANNEL_A_IVB |
2775 u32 pm_irqs = GEN6_PM_RPS_EVENTS;
2778 dev_priv->irq_mask = ~display_mask;
2780 /* should always can generate irq */
2781 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2782 I915_WRITE(DEIIR, I915_READ(DEIIR));
2783 I915_WRITE(DEIMR, dev_priv->irq_mask);
2786 DE_PIPEC_VBLANK_IVB |
2787 DE_PIPEB_VBLANK_IVB |
2788 DE_PIPEA_VBLANK_IVB);
2789 POSTING_READ(DEIER);
2791 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2793 I915_WRITE(GTIIR, I915_READ(GTIIR));
2794 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2796 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2797 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2798 I915_WRITE(GTIER, gt_irqs);
2799 POSTING_READ(GTIER);
2801 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2803 pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2804 PM_VEBOX_CS_ERROR_INTERRUPT;
2806 /* Our enable/disable rps functions may touch these registers so
2807 * make sure to set a known state for only the non-RPS bits.
2808 * The RMW is extra paranoia since this should be called after being set
2809 * to a known state in preinstall.
2811 I915_WRITE(GEN6_PMIMR,
2812 (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2813 I915_WRITE(GEN6_PMIER,
2814 (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2815 POSTING_READ(GEN6_PMIER);
2817 ibx_irq_postinstall(dev);
2822 static int valleyview_irq_postinstall(struct drm_device *dev)
2824 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2827 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2829 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2830 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2831 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2832 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2833 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2836 *Leave vblank interrupts masked initially. enable/disable will
2837 * toggle them based on usage.
2839 dev_priv->irq_mask = (~enable_mask) |
2840 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2841 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2843 I915_WRITE(PORT_HOTPLUG_EN, 0);
2844 POSTING_READ(PORT_HOTPLUG_EN);
2846 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2847 I915_WRITE(VLV_IER, enable_mask);
2848 I915_WRITE(VLV_IIR, 0xffffffff);
2849 I915_WRITE(PIPESTAT(0), 0xffff);
2850 I915_WRITE(PIPESTAT(1), 0xffff);
2851 POSTING_READ(VLV_IER);
2853 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2854 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2855 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2857 I915_WRITE(VLV_IIR, 0xffffffff);
2858 I915_WRITE(VLV_IIR, 0xffffffff);
2860 I915_WRITE(GTIIR, I915_READ(GTIIR));
2861 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2863 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2864 GT_BLT_USER_INTERRUPT;
2865 I915_WRITE(GTIER, gt_irqs);
2866 POSTING_READ(GTIER);
2868 /* ack & enable invalid PTE error interrupts */
2869 #if 0 /* FIXME: add support to irq handler for checking these bits */
2870 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2871 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2874 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2879 static void valleyview_irq_uninstall(struct drm_device *dev)
2881 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2887 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2890 I915_WRITE(PIPESTAT(pipe), 0xffff);
2892 I915_WRITE(HWSTAM, 0xffffffff);
2893 I915_WRITE(PORT_HOTPLUG_EN, 0);
2894 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2896 I915_WRITE(PIPESTAT(pipe), 0xffff);
2897 I915_WRITE(VLV_IIR, 0xffffffff);
2898 I915_WRITE(VLV_IMR, 0xffffffff);
2899 I915_WRITE(VLV_IER, 0x0);
2900 POSTING_READ(VLV_IER);
2903 static void ironlake_irq_uninstall(struct drm_device *dev)
2905 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2910 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2912 I915_WRITE(HWSTAM, 0xffffffff);
2914 I915_WRITE(DEIMR, 0xffffffff);
2915 I915_WRITE(DEIER, 0x0);
2916 I915_WRITE(DEIIR, I915_READ(DEIIR));
2918 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2920 I915_WRITE(GTIMR, 0xffffffff);
2921 I915_WRITE(GTIER, 0x0);
2922 I915_WRITE(GTIIR, I915_READ(GTIIR));
2924 if (HAS_PCH_NOP(dev))
2927 I915_WRITE(SDEIMR, 0xffffffff);
2928 I915_WRITE(SDEIER, 0x0);
2929 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2930 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2931 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2934 static void i8xx_irq_preinstall(struct drm_device * dev)
2936 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2939 atomic_set(&dev_priv->irq_received, 0);
2942 I915_WRITE(PIPESTAT(pipe), 0);
2943 I915_WRITE16(IMR, 0xffff);
2944 I915_WRITE16(IER, 0x0);
2945 POSTING_READ16(IER);
2948 static int i8xx_irq_postinstall(struct drm_device *dev)
2950 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2953 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2955 /* Unmask the interrupts that we always want on. */
2956 dev_priv->irq_mask =
2957 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2958 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2959 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2960 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2961 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2962 I915_WRITE16(IMR, dev_priv->irq_mask);
2965 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2966 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2967 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2968 I915_USER_INTERRUPT);
2969 POSTING_READ16(IER);
2975 * Returns true when a page flip has completed.
2977 static bool i8xx_handle_vblank(struct drm_device *dev,
2980 drm_i915_private_t *dev_priv = dev->dev_private;
2981 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2983 if (!drm_handle_vblank(dev, pipe))
2986 if ((iir & flip_pending) == 0)
2989 intel_prepare_page_flip(dev, pipe);
2991 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2992 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2993 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2994 * the flip is completed (no longer pending). Since this doesn't raise
2995 * an interrupt per se, we watch for the change at vblank.
2997 if (I915_READ16(ISR) & flip_pending)
3000 intel_finish_page_flip(dev, pipe);
3005 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3007 struct drm_device *dev = (struct drm_device *) arg;
3008 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3011 unsigned long irqflags;
3015 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3016 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3018 atomic_inc(&dev_priv->irq_received);
3020 iir = I915_READ16(IIR);
3024 while (iir & ~flip_mask) {
3025 /* Can't rely on pipestat interrupt bit in iir as it might
3026 * have been cleared after the pipestat interrupt was received.
3027 * It doesn't set the bit in iir again, but it still produces
3028 * interrupts (for non-MSI).
3030 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3031 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3032 i915_handle_error(dev, false);
3034 for_each_pipe(pipe) {
3035 int reg = PIPESTAT(pipe);
3036 pipe_stats[pipe] = I915_READ(reg);
3039 * Clear the PIPE*STAT regs before the IIR
3041 if (pipe_stats[pipe] & 0x8000ffff) {
3042 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3043 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3045 I915_WRITE(reg, pipe_stats[pipe]);
3049 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3051 I915_WRITE16(IIR, iir & ~flip_mask);
3052 new_iir = I915_READ16(IIR); /* Flush posted writes */
3054 i915_update_dri1_breadcrumb(dev);
3056 if (iir & I915_USER_INTERRUPT)
3057 notify_ring(dev, &dev_priv->ring[RCS]);
3059 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
3060 i8xx_handle_vblank(dev, 0, iir))
3061 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
3063 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
3064 i8xx_handle_vblank(dev, 1, iir))
3065 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
3073 static void i8xx_irq_uninstall(struct drm_device * dev)
3075 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3078 for_each_pipe(pipe) {
3079 /* Clear enable bits; then clear status bits */
3080 I915_WRITE(PIPESTAT(pipe), 0);
3081 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3083 I915_WRITE16(IMR, 0xffff);
3084 I915_WRITE16(IER, 0x0);
3085 I915_WRITE16(IIR, I915_READ16(IIR));
3088 static void i915_irq_preinstall(struct drm_device * dev)
3090 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3093 atomic_set(&dev_priv->irq_received, 0);
3095 if (I915_HAS_HOTPLUG(dev)) {
3096 I915_WRITE(PORT_HOTPLUG_EN, 0);
3097 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3100 I915_WRITE16(HWSTAM, 0xeffe);
3102 I915_WRITE(PIPESTAT(pipe), 0);
3103 I915_WRITE(IMR, 0xffffffff);
3104 I915_WRITE(IER, 0x0);
3108 static int i915_irq_postinstall(struct drm_device *dev)
3110 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3113 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3115 /* Unmask the interrupts that we always want on. */
3116 dev_priv->irq_mask =
3117 ~(I915_ASLE_INTERRUPT |
3118 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3119 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3120 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3121 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3122 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3125 I915_ASLE_INTERRUPT |
3126 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3127 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3128 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3129 I915_USER_INTERRUPT;
3131 if (I915_HAS_HOTPLUG(dev)) {
3132 I915_WRITE(PORT_HOTPLUG_EN, 0);
3133 POSTING_READ(PORT_HOTPLUG_EN);
3135 /* Enable in IER... */
3136 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3137 /* and unmask in IMR */
3138 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3141 I915_WRITE(IMR, dev_priv->irq_mask);
3142 I915_WRITE(IER, enable_mask);
3145 i915_enable_asle_pipestat(dev);
3151 * Returns true when a page flip has completed.
3153 static bool i915_handle_vblank(struct drm_device *dev,
3154 int plane, int pipe, u32 iir)
3156 drm_i915_private_t *dev_priv = dev->dev_private;
3157 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3159 if (!drm_handle_vblank(dev, pipe))
3162 if ((iir & flip_pending) == 0)
3165 intel_prepare_page_flip(dev, plane);
3167 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3168 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3169 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3170 * the flip is completed (no longer pending). Since this doesn't raise
3171 * an interrupt per se, we watch for the change at vblank.
3173 if (I915_READ(ISR) & flip_pending)
3176 intel_finish_page_flip(dev, pipe);
3181 static irqreturn_t i915_irq_handler(int irq, void *arg)
3183 struct drm_device *dev = (struct drm_device *) arg;
3184 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3185 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3186 unsigned long irqflags;
3188 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3189 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3190 int pipe, ret = IRQ_NONE;
3192 atomic_inc(&dev_priv->irq_received);
3194 iir = I915_READ(IIR);
3196 bool irq_received = (iir & ~flip_mask) != 0;
3197 bool blc_event = false;
3199 /* Can't rely on pipestat interrupt bit in iir as it might
3200 * have been cleared after the pipestat interrupt was received.
3201 * It doesn't set the bit in iir again, but it still produces
3202 * interrupts (for non-MSI).
3204 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3205 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3206 i915_handle_error(dev, false);
3208 for_each_pipe(pipe) {
3209 int reg = PIPESTAT(pipe);
3210 pipe_stats[pipe] = I915_READ(reg);
3212 /* Clear the PIPE*STAT regs before the IIR */
3213 if (pipe_stats[pipe] & 0x8000ffff) {
3214 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3215 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3217 I915_WRITE(reg, pipe_stats[pipe]);
3218 irq_received = true;
3221 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3226 /* Consume port. Then clear IIR or we'll miss events */
3227 if ((I915_HAS_HOTPLUG(dev)) &&
3228 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3229 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3230 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3232 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3234 if (hotplug_trigger) {
3235 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
3236 i915_hpd_irq_setup(dev);
3237 queue_work(dev_priv->wq,
3238 &dev_priv->hotplug_work);
3240 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3241 POSTING_READ(PORT_HOTPLUG_STAT);
3244 I915_WRITE(IIR, iir & ~flip_mask);
3245 new_iir = I915_READ(IIR); /* Flush posted writes */
3247 if (iir & I915_USER_INTERRUPT)
3248 notify_ring(dev, &dev_priv->ring[RCS]);
3250 for_each_pipe(pipe) {
3255 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3256 i915_handle_vblank(dev, plane, pipe, iir))
3257 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3259 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3263 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3264 intel_opregion_asle_intr(dev);
3266 /* With MSI, interrupts are only generated when iir
3267 * transitions from zero to nonzero. If another bit got
3268 * set while we were handling the existing iir bits, then
3269 * we would never get another interrupt.
3271 * This is fine on non-MSI as well, as if we hit this path
3272 * we avoid exiting the interrupt handler only to generate
3275 * Note that for MSI this could cause a stray interrupt report
3276 * if an interrupt landed in the time between writing IIR and
3277 * the posting read. This should be rare enough to never
3278 * trigger the 99% of 100,000 interrupts test for disabling
3283 } while (iir & ~flip_mask);
3285 i915_update_dri1_breadcrumb(dev);
3290 static void i915_irq_uninstall(struct drm_device * dev)
3292 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3295 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3297 if (I915_HAS_HOTPLUG(dev)) {
3298 I915_WRITE(PORT_HOTPLUG_EN, 0);
3299 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3302 I915_WRITE16(HWSTAM, 0xffff);
3303 for_each_pipe(pipe) {
3304 /* Clear enable bits; then clear status bits */
3305 I915_WRITE(PIPESTAT(pipe), 0);
3306 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3308 I915_WRITE(IMR, 0xffffffff);
3309 I915_WRITE(IER, 0x0);
3311 I915_WRITE(IIR, I915_READ(IIR));
3314 static void i965_irq_preinstall(struct drm_device * dev)
3316 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3319 atomic_set(&dev_priv->irq_received, 0);
3321 I915_WRITE(PORT_HOTPLUG_EN, 0);
3322 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3324 I915_WRITE(HWSTAM, 0xeffe);
3326 I915_WRITE(PIPESTAT(pipe), 0);
3327 I915_WRITE(IMR, 0xffffffff);
3328 I915_WRITE(IER, 0x0);
3332 static int i965_irq_postinstall(struct drm_device *dev)
3334 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3338 /* Unmask the interrupts that we always want on. */
3339 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3340 I915_DISPLAY_PORT_INTERRUPT |
3341 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3342 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3343 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3344 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3345 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3347 enable_mask = ~dev_priv->irq_mask;
3348 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3349 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3350 enable_mask |= I915_USER_INTERRUPT;
3353 enable_mask |= I915_BSD_USER_INTERRUPT;
3355 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3358 * Enable some error detection, note the instruction error mask
3359 * bit is reserved, so we leave it masked.
3362 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3363 GM45_ERROR_MEM_PRIV |
3364 GM45_ERROR_CP_PRIV |
3365 I915_ERROR_MEMORY_REFRESH);
3367 error_mask = ~(I915_ERROR_PAGE_TABLE |
3368 I915_ERROR_MEMORY_REFRESH);
3370 I915_WRITE(EMR, error_mask);
3372 I915_WRITE(IMR, dev_priv->irq_mask);
3373 I915_WRITE(IER, enable_mask);
3376 I915_WRITE(PORT_HOTPLUG_EN, 0);
3377 POSTING_READ(PORT_HOTPLUG_EN);
3379 i915_enable_asle_pipestat(dev);
3384 static void i915_hpd_irq_setup(struct drm_device *dev)
3386 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3387 struct drm_mode_config *mode_config = &dev->mode_config;
3388 struct intel_encoder *intel_encoder;
3391 if (I915_HAS_HOTPLUG(dev)) {
3392 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3393 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3394 /* Note HDMI and DP share hotplug bits */
3395 /* enable bits are the same for all generations */
3396 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3397 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3398 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3399 /* Programming the CRT detection parameters tends
3400 to generate a spurious hotplug event about three
3401 seconds later. So just do it once.
3404 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3405 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3406 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3408 /* Ignore TV since it's buggy */
3409 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3413 static irqreturn_t i965_irq_handler(int irq, void *arg)
3415 struct drm_device *dev = (struct drm_device *) arg;
3416 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3418 u32 pipe_stats[I915_MAX_PIPES];
3419 unsigned long irqflags;
3421 int ret = IRQ_NONE, pipe;
3423 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3424 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3426 atomic_inc(&dev_priv->irq_received);
3428 iir = I915_READ(IIR);
3431 bool blc_event = false;
3433 irq_received = (iir & ~flip_mask) != 0;
3435 /* Can't rely on pipestat interrupt bit in iir as it might
3436 * have been cleared after the pipestat interrupt was received.
3437 * It doesn't set the bit in iir again, but it still produces
3438 * interrupts (for non-MSI).
3440 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3441 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3442 i915_handle_error(dev, false);
3444 for_each_pipe(pipe) {
3445 int reg = PIPESTAT(pipe);
3446 pipe_stats[pipe] = I915_READ(reg);
3449 * Clear the PIPE*STAT regs before the IIR
3451 if (pipe_stats[pipe] & 0x8000ffff) {
3452 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3453 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3455 I915_WRITE(reg, pipe_stats[pipe]);
3459 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3466 /* Consume port. Then clear IIR or we'll miss events */
3467 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3468 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3469 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3470 HOTPLUG_INT_STATUS_G4X :
3471 HOTPLUG_INT_STATUS_I915);
3473 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3475 if (hotplug_trigger) {
3476 if (hotplug_irq_storm_detect(dev, hotplug_trigger,
3477 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
3478 i915_hpd_irq_setup(dev);
3479 queue_work(dev_priv->wq,
3480 &dev_priv->hotplug_work);
3482 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3483 I915_READ(PORT_HOTPLUG_STAT);
3486 I915_WRITE(IIR, iir & ~flip_mask);
3487 new_iir = I915_READ(IIR); /* Flush posted writes */
3489 if (iir & I915_USER_INTERRUPT)
3490 notify_ring(dev, &dev_priv->ring[RCS]);
3491 if (iir & I915_BSD_USER_INTERRUPT)
3492 notify_ring(dev, &dev_priv->ring[VCS]);
3494 for_each_pipe(pipe) {
3495 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3496 i915_handle_vblank(dev, pipe, pipe, iir))
3497 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3499 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3504 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3505 intel_opregion_asle_intr(dev);
3507 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3508 gmbus_irq_handler(dev);
3510 /* With MSI, interrupts are only generated when iir
3511 * transitions from zero to nonzero. If another bit got
3512 * set while we were handling the existing iir bits, then
3513 * we would never get another interrupt.
3515 * This is fine on non-MSI as well, as if we hit this path
3516 * we avoid exiting the interrupt handler only to generate
3519 * Note that for MSI this could cause a stray interrupt report
3520 * if an interrupt landed in the time between writing IIR and
3521 * the posting read. This should be rare enough to never
3522 * trigger the 99% of 100,000 interrupts test for disabling
3528 i915_update_dri1_breadcrumb(dev);
3533 static void i965_irq_uninstall(struct drm_device * dev)
3535 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3541 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3543 I915_WRITE(PORT_HOTPLUG_EN, 0);
3544 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3546 I915_WRITE(HWSTAM, 0xffffffff);
3548 I915_WRITE(PIPESTAT(pipe), 0);
3549 I915_WRITE(IMR, 0xffffffff);
3550 I915_WRITE(IER, 0x0);
3553 I915_WRITE(PIPESTAT(pipe),
3554 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3555 I915_WRITE(IIR, I915_READ(IIR));
3558 static void i915_reenable_hotplug_timer_func(unsigned long data)
3560 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3561 struct drm_device *dev = dev_priv->dev;
3562 struct drm_mode_config *mode_config = &dev->mode_config;
3563 unsigned long irqflags;
3566 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3567 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3568 struct drm_connector *connector;
3570 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3573 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3575 list_for_each_entry(connector, &mode_config->connector_list, head) {
3576 struct intel_connector *intel_connector = to_intel_connector(connector);
3578 if (intel_connector->encoder->hpd_pin == i) {
3579 if (connector->polled != intel_connector->polled)
3580 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3581 drm_get_connector_name(connector));
3582 connector->polled = intel_connector->polled;
3583 if (!connector->polled)
3584 connector->polled = DRM_CONNECTOR_POLL_HPD;
3588 if (dev_priv->display.hpd_irq_setup)
3589 dev_priv->display.hpd_irq_setup(dev);
3590 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3593 void intel_irq_init(struct drm_device *dev)
3595 struct drm_i915_private *dev_priv = dev->dev_private;
3597 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3598 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3599 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3600 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3602 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3603 i915_hangcheck_elapsed,
3604 (unsigned long) dev);
3605 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3606 (unsigned long) dev_priv);
3608 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3610 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3611 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3612 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3613 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3614 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3617 if (drm_core_check_feature(dev, DRIVER_MODESET))
3618 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3620 dev->driver->get_vblank_timestamp = NULL;
3621 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3623 if (IS_VALLEYVIEW(dev)) {
3624 dev->driver->irq_handler = valleyview_irq_handler;
3625 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3626 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3627 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3628 dev->driver->enable_vblank = valleyview_enable_vblank;
3629 dev->driver->disable_vblank = valleyview_disable_vblank;
3630 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3631 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3632 /* Share uninstall handlers with ILK/SNB */
3633 dev->driver->irq_handler = ivybridge_irq_handler;
3634 dev->driver->irq_preinstall = ivybridge_irq_preinstall;
3635 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3636 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3637 dev->driver->enable_vblank = ivybridge_enable_vblank;
3638 dev->driver->disable_vblank = ivybridge_disable_vblank;
3639 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3640 } else if (HAS_PCH_SPLIT(dev)) {
3641 dev->driver->irq_handler = ironlake_irq_handler;
3642 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3643 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3644 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3645 dev->driver->enable_vblank = ironlake_enable_vblank;
3646 dev->driver->disable_vblank = ironlake_disable_vblank;
3647 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3649 if (INTEL_INFO(dev)->gen == 2) {
3650 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3651 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3652 dev->driver->irq_handler = i8xx_irq_handler;
3653 dev->driver->irq_uninstall = i8xx_irq_uninstall;
3654 } else if (INTEL_INFO(dev)->gen == 3) {
3655 dev->driver->irq_preinstall = i915_irq_preinstall;
3656 dev->driver->irq_postinstall = i915_irq_postinstall;
3657 dev->driver->irq_uninstall = i915_irq_uninstall;
3658 dev->driver->irq_handler = i915_irq_handler;
3659 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3661 dev->driver->irq_preinstall = i965_irq_preinstall;
3662 dev->driver->irq_postinstall = i965_irq_postinstall;
3663 dev->driver->irq_uninstall = i965_irq_uninstall;
3664 dev->driver->irq_handler = i965_irq_handler;
3665 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3667 dev->driver->enable_vblank = i915_enable_vblank;
3668 dev->driver->disable_vblank = i915_disable_vblank;
3672 void intel_hpd_init(struct drm_device *dev)
3674 struct drm_i915_private *dev_priv = dev->dev_private;
3675 struct drm_mode_config *mode_config = &dev->mode_config;
3676 struct drm_connector *connector;
3679 for (i = 1; i < HPD_NUM_PINS; i++) {
3680 dev_priv->hpd_stats[i].hpd_cnt = 0;
3681 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3683 list_for_each_entry(connector, &mode_config->connector_list, head) {
3684 struct intel_connector *intel_connector = to_intel_connector(connector);
3685 connector->polled = intel_connector->polled;
3686 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3687 connector->polled = DRM_CONNECTOR_POLL_HPD;
3689 if (dev_priv->display.hpd_irq_setup)
3690 dev_priv->display.hpd_irq_setup(dev);