1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
34 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
37 #include "intel_drv.h"
39 static const u32 hpd_ibx[] = {
40 [HPD_CRT] = SDE_CRT_HOTPLUG,
41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
47 static const u32 hpd_cpt[] = {
48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
49 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
50 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
55 static const u32 hpd_mask_i915[] = {
56 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
64 static const u32 hpd_status_gen4[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
73 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
82 static void ibx_hpd_irq_setup(struct drm_device *dev);
83 static void i915_hpd_irq_setup(struct drm_device *dev);
85 /* For display hotplug interrupt */
87 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
89 if ((dev_priv->irq_mask & mask) != 0) {
90 dev_priv->irq_mask &= ~mask;
91 I915_WRITE(DEIMR, dev_priv->irq_mask);
97 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
99 if ((dev_priv->irq_mask & mask) != mask) {
100 dev_priv->irq_mask |= mask;
101 I915_WRITE(DEIMR, dev_priv->irq_mask);
106 static bool ivb_can_enable_err_int(struct drm_device *dev)
108 struct drm_i915_private *dev_priv = dev->dev_private;
109 struct intel_crtc *crtc;
112 for_each_pipe(pipe) {
113 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
115 if (crtc->cpu_fifo_underrun_disabled)
122 static bool cpt_can_enable_serr_int(struct drm_device *dev)
124 struct drm_i915_private *dev_priv = dev->dev_private;
126 struct intel_crtc *crtc;
128 for_each_pipe(pipe) {
129 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
131 if (crtc->pch_fifo_underrun_disabled)
138 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
139 enum pipe pipe, bool enable)
141 struct drm_i915_private *dev_priv = dev->dev_private;
142 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
143 DE_PIPEB_FIFO_UNDERRUN;
146 ironlake_enable_display_irq(dev_priv, bit);
148 ironlake_disable_display_irq(dev_priv, bit);
151 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
154 struct drm_i915_private *dev_priv = dev->dev_private;
157 if (!ivb_can_enable_err_int(dev))
160 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
161 ERR_INT_FIFO_UNDERRUN_B |
162 ERR_INT_FIFO_UNDERRUN_C);
164 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
166 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
170 static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
173 struct drm_device *dev = crtc->base.dev;
174 struct drm_i915_private *dev_priv = dev->dev_private;
175 uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
176 SDE_TRANSB_FIFO_UNDER;
179 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
181 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
183 POSTING_READ(SDEIMR);
186 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
187 enum transcoder pch_transcoder,
190 struct drm_i915_private *dev_priv = dev->dev_private;
193 if (!cpt_can_enable_serr_int(dev))
196 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
197 SERR_INT_TRANS_B_FIFO_UNDERRUN |
198 SERR_INT_TRANS_C_FIFO_UNDERRUN);
200 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
202 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
205 POSTING_READ(SDEIMR);
209 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
212 * @enable: true if we want to report FIFO underrun errors, false otherwise
214 * This function makes us disable or enable CPU fifo underruns for a specific
215 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
216 * reporting for one pipe may also disable all the other CPU error interruts for
217 * the other pipes, due to the fact that there's just one interrupt mask/enable
218 * bit for all the pipes.
220 * Returns the previous state of underrun reporting.
222 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
223 enum pipe pipe, bool enable)
225 struct drm_i915_private *dev_priv = dev->dev_private;
226 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
227 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
231 spin_lock_irqsave(&dev_priv->irq_lock, flags);
233 ret = !intel_crtc->cpu_fifo_underrun_disabled;
238 intel_crtc->cpu_fifo_underrun_disabled = !enable;
240 if (IS_GEN5(dev) || IS_GEN6(dev))
241 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
242 else if (IS_GEN7(dev))
243 ivybridge_set_fifo_underrun_reporting(dev, enable);
246 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
251 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
253 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
254 * @enable: true if we want to report FIFO underrun errors, false otherwise
256 * This function makes us disable or enable PCH fifo underruns for a specific
257 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
258 * underrun reporting for one transcoder may also disable all the other PCH
259 * error interruts for the other transcoders, due to the fact that there's just
260 * one interrupt mask/enable bit for all the transcoders.
262 * Returns the previous state of underrun reporting.
264 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
265 enum transcoder pch_transcoder,
268 struct drm_i915_private *dev_priv = dev->dev_private;
270 struct drm_crtc *crtc;
271 struct intel_crtc *intel_crtc;
275 if (HAS_PCH_LPT(dev)) {
278 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
279 if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
285 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
289 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
291 intel_crtc = to_intel_crtc(crtc);
293 spin_lock_irqsave(&dev_priv->irq_lock, flags);
295 ret = !intel_crtc->pch_fifo_underrun_disabled;
300 intel_crtc->pch_fifo_underrun_disabled = !enable;
302 if (HAS_PCH_IBX(dev))
303 ibx_set_fifo_underrun_reporting(intel_crtc, enable);
305 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
308 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
314 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
316 u32 reg = PIPESTAT(pipe);
317 u32 pipestat = I915_READ(reg) & 0x7fff0000;
319 if ((pipestat & mask) == mask)
322 /* Enable the interrupt, clear any pending status */
323 pipestat |= mask | (mask >> 16);
324 I915_WRITE(reg, pipestat);
329 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
331 u32 reg = PIPESTAT(pipe);
332 u32 pipestat = I915_READ(reg) & 0x7fff0000;
334 if ((pipestat & mask) == 0)
338 I915_WRITE(reg, pipestat);
343 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
345 static void i915_enable_asle_pipestat(struct drm_device *dev)
347 drm_i915_private_t *dev_priv = dev->dev_private;
348 unsigned long irqflags;
350 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
353 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
355 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
356 if (INTEL_INFO(dev)->gen >= 4)
357 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
359 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
363 * i915_pipe_enabled - check if a pipe is enabled
365 * @pipe: pipe to check
367 * Reading certain registers when the pipe is disabled can hang the chip.
368 * Use this routine to make sure the PLL is running and the pipe is active
369 * before reading such registers if unsure.
372 i915_pipe_enabled(struct drm_device *dev, int pipe)
374 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
376 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
377 /* Locking is horribly broken here, but whatever. */
378 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
379 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
381 return intel_crtc->active;
383 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
387 /* Called from drm generic code, passed a 'crtc', which
388 * we use as a pipe index
390 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
392 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
393 unsigned long high_frame;
394 unsigned long low_frame;
395 u32 high1, high2, low;
397 if (!i915_pipe_enabled(dev, pipe)) {
398 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
399 "pipe %c\n", pipe_name(pipe));
403 high_frame = PIPEFRAME(pipe);
404 low_frame = PIPEFRAMEPIXEL(pipe);
407 * High & low register fields aren't synchronized, so make sure
408 * we get a low value that's stable across two reads of the high
412 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
413 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
414 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
415 } while (high1 != high2);
417 high1 >>= PIPE_FRAME_HIGH_SHIFT;
418 low >>= PIPE_FRAME_LOW_SHIFT;
419 return (high1 << 8) | low;
422 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
424 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
425 int reg = PIPE_FRMCOUNT_GM45(pipe);
427 if (!i915_pipe_enabled(dev, pipe)) {
428 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
429 "pipe %c\n", pipe_name(pipe));
433 return I915_READ(reg);
436 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
437 int *vpos, int *hpos)
439 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
440 u32 vbl = 0, position = 0;
441 int vbl_start, vbl_end, htotal, vtotal;
444 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
447 if (!i915_pipe_enabled(dev, pipe)) {
448 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
449 "pipe %c\n", pipe_name(pipe));
454 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
456 if (INTEL_INFO(dev)->gen >= 4) {
457 /* No obvious pixelcount register. Only query vertical
458 * scanout position from Display scan line register.
460 position = I915_READ(PIPEDSL(pipe));
462 /* Decode into vertical scanout position. Don't have
463 * horizontal scanout position.
465 *vpos = position & 0x1fff;
468 /* Have access to pixelcount since start of frame.
469 * We can split this into vertical and horizontal
472 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
474 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
475 *vpos = position / htotal;
476 *hpos = position - (*vpos * htotal);
479 /* Query vblank area. */
480 vbl = I915_READ(VBLANK(cpu_transcoder));
482 /* Test position against vblank region. */
483 vbl_start = vbl & 0x1fff;
484 vbl_end = (vbl >> 16) & 0x1fff;
486 if ((*vpos < vbl_start) || (*vpos > vbl_end))
489 /* Inside "upper part" of vblank area? Apply corrective offset: */
490 if (in_vbl && (*vpos >= vbl_start))
491 *vpos = *vpos - vtotal;
493 /* Readouts valid? */
495 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
499 ret |= DRM_SCANOUTPOS_INVBL;
504 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
506 struct timeval *vblank_time,
509 struct drm_crtc *crtc;
511 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
512 DRM_ERROR("Invalid crtc %d\n", pipe);
516 /* Get drm_crtc to timestamp: */
517 crtc = intel_get_crtc_for_pipe(dev, pipe);
519 DRM_ERROR("Invalid crtc %d\n", pipe);
523 if (!crtc->enabled) {
524 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
528 /* Helper routine in DRM core does all the work: */
529 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
534 static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
536 enum drm_connector_status old_status;
538 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
539 old_status = connector->status;
541 connector->status = connector->funcs->detect(connector, false);
542 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
544 drm_get_connector_name(connector),
545 old_status, connector->status);
546 return (old_status != connector->status);
550 * Handle hotplug events outside the interrupt handler proper.
552 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
554 static void i915_hotplug_work_func(struct work_struct *work)
556 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
558 struct drm_device *dev = dev_priv->dev;
559 struct drm_mode_config *mode_config = &dev->mode_config;
560 struct intel_connector *intel_connector;
561 struct intel_encoder *intel_encoder;
562 struct drm_connector *connector;
563 unsigned long irqflags;
564 bool hpd_disabled = false;
565 bool changed = false;
568 /* HPD irq before everything is fully set up. */
569 if (!dev_priv->enable_hotplug_processing)
572 mutex_lock(&mode_config->mutex);
573 DRM_DEBUG_KMS("running encoder hotplug functions\n");
575 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
577 hpd_event_bits = dev_priv->hpd_event_bits;
578 dev_priv->hpd_event_bits = 0;
579 list_for_each_entry(connector, &mode_config->connector_list, head) {
580 intel_connector = to_intel_connector(connector);
581 intel_encoder = intel_connector->encoder;
582 if (intel_encoder->hpd_pin > HPD_NONE &&
583 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
584 connector->polled == DRM_CONNECTOR_POLL_HPD) {
585 DRM_INFO("HPD interrupt storm detected on connector %s: "
586 "switching from hotplug detection to polling\n",
587 drm_get_connector_name(connector));
588 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
589 connector->polled = DRM_CONNECTOR_POLL_CONNECT
590 | DRM_CONNECTOR_POLL_DISCONNECT;
593 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
594 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
595 drm_get_connector_name(connector), intel_encoder->hpd_pin);
598 /* if there were no outputs to poll, poll was disabled,
599 * therefore make sure it's enabled when disabling HPD on
602 drm_kms_helper_poll_enable(dev);
603 mod_timer(&dev_priv->hotplug_reenable_timer,
604 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
607 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
609 list_for_each_entry(connector, &mode_config->connector_list, head) {
610 intel_connector = to_intel_connector(connector);
611 intel_encoder = intel_connector->encoder;
612 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
613 if (intel_encoder->hot_plug)
614 intel_encoder->hot_plug(intel_encoder);
615 if (intel_hpd_irq_event(dev, connector))
619 mutex_unlock(&mode_config->mutex);
622 drm_kms_helper_hotplug_event(dev);
625 static void ironlake_handle_rps_change(struct drm_device *dev)
627 drm_i915_private_t *dev_priv = dev->dev_private;
628 u32 busy_up, busy_down, max_avg, min_avg;
632 spin_lock_irqsave(&mchdev_lock, flags);
634 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
636 new_delay = dev_priv->ips.cur_delay;
638 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
639 busy_up = I915_READ(RCPREVBSYTUPAVG);
640 busy_down = I915_READ(RCPREVBSYTDNAVG);
641 max_avg = I915_READ(RCBMAXAVG);
642 min_avg = I915_READ(RCBMINAVG);
644 /* Handle RCS change request from hw */
645 if (busy_up > max_avg) {
646 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
647 new_delay = dev_priv->ips.cur_delay - 1;
648 if (new_delay < dev_priv->ips.max_delay)
649 new_delay = dev_priv->ips.max_delay;
650 } else if (busy_down < min_avg) {
651 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
652 new_delay = dev_priv->ips.cur_delay + 1;
653 if (new_delay > dev_priv->ips.min_delay)
654 new_delay = dev_priv->ips.min_delay;
657 if (ironlake_set_drps(dev, new_delay))
658 dev_priv->ips.cur_delay = new_delay;
660 spin_unlock_irqrestore(&mchdev_lock, flags);
665 static void notify_ring(struct drm_device *dev,
666 struct intel_ring_buffer *ring)
668 struct drm_i915_private *dev_priv = dev->dev_private;
670 if (ring->obj == NULL)
673 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
675 wake_up_all(&ring->irq_queue);
676 if (i915_enable_hangcheck) {
677 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
678 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
682 static void gen6_pm_rps_work(struct work_struct *work)
684 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
689 spin_lock_irq(&dev_priv->rps.lock);
690 pm_iir = dev_priv->rps.pm_iir;
691 dev_priv->rps.pm_iir = 0;
692 pm_imr = I915_READ(GEN6_PMIMR);
693 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
694 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
695 spin_unlock_irq(&dev_priv->rps.lock);
697 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
700 mutex_lock(&dev_priv->rps.hw_lock);
702 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
703 new_delay = dev_priv->rps.cur_delay + 1;
706 * For better performance, jump directly
707 * to RPe if we're below it.
709 if (IS_VALLEYVIEW(dev_priv->dev) &&
710 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
711 new_delay = dev_priv->rps.rpe_delay;
713 new_delay = dev_priv->rps.cur_delay - 1;
715 /* sysfs frequency interfaces may have snuck in while servicing the
718 if (new_delay >= dev_priv->rps.min_delay &&
719 new_delay <= dev_priv->rps.max_delay) {
720 if (IS_VALLEYVIEW(dev_priv->dev))
721 valleyview_set_rps(dev_priv->dev, new_delay);
723 gen6_set_rps(dev_priv->dev, new_delay);
726 if (IS_VALLEYVIEW(dev_priv->dev)) {
728 * On VLV, when we enter RC6 we may not be at the minimum
729 * voltage level, so arm a timer to check. It should only
730 * fire when there's activity or once after we've entered
731 * RC6, and then won't be re-armed until the next RPS interrupt.
733 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
734 msecs_to_jiffies(100));
737 mutex_unlock(&dev_priv->rps.hw_lock);
742 * ivybridge_parity_work - Workqueue called when a parity error interrupt
744 * @work: workqueue struct
746 * Doesn't actually do anything except notify userspace. As a consequence of
747 * this event, userspace should try to remap the bad rows since statistically
748 * it is likely the same row is more likely to go bad again.
750 static void ivybridge_parity_work(struct work_struct *work)
752 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
753 l3_parity.error_work);
754 u32 error_status, row, bank, subbank;
755 char *parity_event[5];
759 /* We must turn off DOP level clock gating to access the L3 registers.
760 * In order to prevent a get/put style interface, acquire struct mutex
761 * any time we access those registers.
763 mutex_lock(&dev_priv->dev->struct_mutex);
765 misccpctl = I915_READ(GEN7_MISCCPCTL);
766 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
767 POSTING_READ(GEN7_MISCCPCTL);
769 error_status = I915_READ(GEN7_L3CDERRST1);
770 row = GEN7_PARITY_ERROR_ROW(error_status);
771 bank = GEN7_PARITY_ERROR_BANK(error_status);
772 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
774 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
775 GEN7_L3CDERRST1_ENABLE);
776 POSTING_READ(GEN7_L3CDERRST1);
778 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
780 spin_lock_irqsave(&dev_priv->irq_lock, flags);
781 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
782 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
783 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
785 mutex_unlock(&dev_priv->dev->struct_mutex);
787 parity_event[0] = "L3_PARITY_ERROR=1";
788 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
789 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
790 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
791 parity_event[4] = NULL;
793 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
794 KOBJ_CHANGE, parity_event);
796 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
799 kfree(parity_event[3]);
800 kfree(parity_event[2]);
801 kfree(parity_event[1]);
804 static void ivybridge_handle_parity_error(struct drm_device *dev)
806 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
809 if (!HAS_L3_GPU_CACHE(dev))
812 spin_lock_irqsave(&dev_priv->irq_lock, flags);
813 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
814 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
815 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
817 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
820 static void snb_gt_irq_handler(struct drm_device *dev,
821 struct drm_i915_private *dev_priv,
826 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
827 notify_ring(dev, &dev_priv->ring[RCS]);
828 if (gt_iir & GT_BSD_USER_INTERRUPT)
829 notify_ring(dev, &dev_priv->ring[VCS]);
830 if (gt_iir & GT_BLT_USER_INTERRUPT)
831 notify_ring(dev, &dev_priv->ring[BCS]);
833 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
834 GT_BSD_CS_ERROR_INTERRUPT |
835 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
836 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
837 i915_handle_error(dev, false);
840 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
841 ivybridge_handle_parity_error(dev);
844 /* Legacy way of handling PM interrupts */
845 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
851 * IIR bits should never already be set because IMR should
852 * prevent an interrupt from being shown in IIR. The warning
853 * displays a case where we've unsafely cleared
854 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
855 * type is not a problem, it displays a problem in the logic.
857 * The mask bit in IMR is cleared by dev_priv->rps.work.
860 spin_lock_irqsave(&dev_priv->rps.lock, flags);
861 dev_priv->rps.pm_iir |= pm_iir;
862 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
863 POSTING_READ(GEN6_PMIMR);
864 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
866 queue_work(dev_priv->wq, &dev_priv->rps.work);
869 #define HPD_STORM_DETECT_PERIOD 1000
870 #define HPD_STORM_THRESHOLD 5
872 static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
876 drm_i915_private_t *dev_priv = dev->dev_private;
877 unsigned long irqflags;
881 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
883 for (i = 1; i < HPD_NUM_PINS; i++) {
885 if (!(hpd[i] & hotplug_trigger) ||
886 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
889 dev_priv->hpd_event_bits |= (1 << i);
890 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
891 dev_priv->hpd_stats[i].hpd_last_jiffies
892 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
893 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
894 dev_priv->hpd_stats[i].hpd_cnt = 0;
895 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
896 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
897 dev_priv->hpd_event_bits &= ~(1 << i);
898 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
901 dev_priv->hpd_stats[i].hpd_cnt++;
905 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
910 static void gmbus_irq_handler(struct drm_device *dev)
912 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
914 wake_up_all(&dev_priv->gmbus_wait_queue);
917 static void dp_aux_irq_handler(struct drm_device *dev)
919 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
921 wake_up_all(&dev_priv->gmbus_wait_queue);
924 /* Unlike gen6_queue_rps_work() from which this function is originally derived,
925 * we must be able to deal with other PM interrupts. This is complicated because
926 * of the way in which we use the masks to defer the RPS work (which for
927 * posterity is necessary because of forcewake).
929 static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
934 spin_lock_irqsave(&dev_priv->rps.lock, flags);
935 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
936 if (dev_priv->rps.pm_iir) {
937 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
938 /* never want to mask useful interrupts. (also posting read) */
939 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
940 /* TODO: if queue_work is slow, move it out of the spinlock */
941 queue_work(dev_priv->wq, &dev_priv->rps.work);
943 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
945 if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
946 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
947 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
949 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
950 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
951 i915_handle_error(dev_priv->dev, false);
956 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
958 struct drm_device *dev = (struct drm_device *) arg;
959 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
960 u32 iir, gt_iir, pm_iir;
961 irqreturn_t ret = IRQ_NONE;
962 unsigned long irqflags;
964 u32 pipe_stats[I915_MAX_PIPES];
966 atomic_inc(&dev_priv->irq_received);
969 iir = I915_READ(VLV_IIR);
970 gt_iir = I915_READ(GTIIR);
971 pm_iir = I915_READ(GEN6_PMIIR);
973 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
978 snb_gt_irq_handler(dev, dev_priv, gt_iir);
980 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
981 for_each_pipe(pipe) {
982 int reg = PIPESTAT(pipe);
983 pipe_stats[pipe] = I915_READ(reg);
986 * Clear the PIPE*STAT regs before the IIR
988 if (pipe_stats[pipe] & 0x8000ffff) {
989 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
990 DRM_DEBUG_DRIVER("pipe %c underrun\n",
992 I915_WRITE(reg, pipe_stats[pipe]);
995 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
997 for_each_pipe(pipe) {
998 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
999 drm_handle_vblank(dev, pipe);
1001 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1002 intel_prepare_page_flip(dev, pipe);
1003 intel_finish_page_flip(dev, pipe);
1007 /* Consume port. Then clear IIR or we'll miss events */
1008 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1009 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1010 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1012 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1014 if (hotplug_trigger) {
1015 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
1016 i915_hpd_irq_setup(dev);
1017 queue_work(dev_priv->wq,
1018 &dev_priv->hotplug_work);
1020 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1021 I915_READ(PORT_HOTPLUG_STAT);
1024 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1025 gmbus_irq_handler(dev);
1027 if (pm_iir & GEN6_PM_RPS_EVENTS)
1028 gen6_queue_rps_work(dev_priv, pm_iir);
1030 I915_WRITE(GTIIR, gt_iir);
1031 I915_WRITE(GEN6_PMIIR, pm_iir);
1032 I915_WRITE(VLV_IIR, iir);
1039 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1041 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1043 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1045 if (hotplug_trigger) {
1046 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
1047 ibx_hpd_irq_setup(dev);
1048 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1050 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1051 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1052 SDE_AUDIO_POWER_SHIFT);
1053 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1057 if (pch_iir & SDE_AUX_MASK)
1058 dp_aux_irq_handler(dev);
1060 if (pch_iir & SDE_GMBUS)
1061 gmbus_irq_handler(dev);
1063 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1064 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1066 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1067 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1069 if (pch_iir & SDE_POISON)
1070 DRM_ERROR("PCH poison interrupt\n");
1072 if (pch_iir & SDE_FDI_MASK)
1074 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1076 I915_READ(FDI_RX_IIR(pipe)));
1078 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1079 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1081 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1082 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1084 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1085 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1087 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1089 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1090 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1092 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1095 static void ivb_err_int_handler(struct drm_device *dev)
1097 struct drm_i915_private *dev_priv = dev->dev_private;
1098 u32 err_int = I915_READ(GEN7_ERR_INT);
1100 if (err_int & ERR_INT_POISON)
1101 DRM_ERROR("Poison interrupt\n");
1103 if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1104 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1105 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1107 if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1108 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1109 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1111 if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1112 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1113 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1115 I915_WRITE(GEN7_ERR_INT, err_int);
1118 static void cpt_serr_int_handler(struct drm_device *dev)
1120 struct drm_i915_private *dev_priv = dev->dev_private;
1121 u32 serr_int = I915_READ(SERR_INT);
1123 if (serr_int & SERR_INT_POISON)
1124 DRM_ERROR("PCH poison interrupt\n");
1126 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1127 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1129 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1131 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1132 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1134 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1136 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1137 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1139 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1141 I915_WRITE(SERR_INT, serr_int);
1144 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1146 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1148 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1150 if (hotplug_trigger) {
1151 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
1152 ibx_hpd_irq_setup(dev);
1153 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
1155 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1156 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1157 SDE_AUDIO_POWER_SHIFT_CPT);
1158 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1162 if (pch_iir & SDE_AUX_MASK_CPT)
1163 dp_aux_irq_handler(dev);
1165 if (pch_iir & SDE_GMBUS_CPT)
1166 gmbus_irq_handler(dev);
1168 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1169 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1171 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1172 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1174 if (pch_iir & SDE_FDI_MASK_CPT)
1176 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1178 I915_READ(FDI_RX_IIR(pipe)));
1180 if (pch_iir & SDE_ERROR_CPT)
1181 cpt_serr_int_handler(dev);
1184 static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
1186 struct drm_device *dev = (struct drm_device *) arg;
1187 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1188 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
1189 irqreturn_t ret = IRQ_NONE;
1192 atomic_inc(&dev_priv->irq_received);
1194 /* We get interrupts on unclaimed registers, so check for this before we
1195 * do any I915_{READ,WRITE}. */
1196 if (IS_HASWELL(dev) &&
1197 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1198 DRM_ERROR("Unclaimed register before interrupt\n");
1199 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1202 /* disable master interrupt before clearing iir */
1203 de_ier = I915_READ(DEIER);
1204 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1206 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1207 * interrupts will will be stored on its back queue, and then we'll be
1208 * able to process them after we restore SDEIER (as soon as we restore
1209 * it, we'll get an interrupt if SDEIIR still has something to process
1210 * due to its back queue). */
1211 if (!HAS_PCH_NOP(dev)) {
1212 sde_ier = I915_READ(SDEIER);
1213 I915_WRITE(SDEIER, 0);
1214 POSTING_READ(SDEIER);
1217 /* On Haswell, also mask ERR_INT because we don't want to risk
1218 * generating "unclaimed register" interrupts from inside the interrupt
1220 if (IS_HASWELL(dev))
1221 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1223 gt_iir = I915_READ(GTIIR);
1225 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1226 I915_WRITE(GTIIR, gt_iir);
1230 de_iir = I915_READ(DEIIR);
1232 if (de_iir & DE_ERR_INT_IVB)
1233 ivb_err_int_handler(dev);
1235 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1236 dp_aux_irq_handler(dev);
1238 if (de_iir & DE_GSE_IVB)
1239 intel_opregion_asle_intr(dev);
1241 for (i = 0; i < 3; i++) {
1242 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1243 drm_handle_vblank(dev, i);
1244 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1245 intel_prepare_page_flip(dev, i);
1246 intel_finish_page_flip_plane(dev, i);
1250 /* check event from PCH */
1251 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1252 u32 pch_iir = I915_READ(SDEIIR);
1254 cpt_irq_handler(dev, pch_iir);
1256 /* clear PCH hotplug event before clear CPU irq */
1257 I915_WRITE(SDEIIR, pch_iir);
1260 I915_WRITE(DEIIR, de_iir);
1264 pm_iir = I915_READ(GEN6_PMIIR);
1266 if (IS_HASWELL(dev))
1267 hsw_pm_irq_handler(dev_priv, pm_iir);
1268 else if (pm_iir & GEN6_PM_RPS_EVENTS)
1269 gen6_queue_rps_work(dev_priv, pm_iir);
1270 I915_WRITE(GEN6_PMIIR, pm_iir);
1274 if (IS_HASWELL(dev) && ivb_can_enable_err_int(dev))
1275 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1277 I915_WRITE(DEIER, de_ier);
1278 POSTING_READ(DEIER);
1279 if (!HAS_PCH_NOP(dev)) {
1280 I915_WRITE(SDEIER, sde_ier);
1281 POSTING_READ(SDEIER);
1287 static void ilk_gt_irq_handler(struct drm_device *dev,
1288 struct drm_i915_private *dev_priv,
1292 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1293 notify_ring(dev, &dev_priv->ring[RCS]);
1294 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1295 notify_ring(dev, &dev_priv->ring[VCS]);
1298 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
1300 struct drm_device *dev = (struct drm_device *) arg;
1301 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1303 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
1305 atomic_inc(&dev_priv->irq_received);
1307 /* disable master interrupt before clearing iir */
1308 de_ier = I915_READ(DEIER);
1309 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1310 POSTING_READ(DEIER);
1312 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1313 * interrupts will will be stored on its back queue, and then we'll be
1314 * able to process them after we restore SDEIER (as soon as we restore
1315 * it, we'll get an interrupt if SDEIIR still has something to process
1316 * due to its back queue). */
1317 sde_ier = I915_READ(SDEIER);
1318 I915_WRITE(SDEIER, 0);
1319 POSTING_READ(SDEIER);
1321 de_iir = I915_READ(DEIIR);
1322 gt_iir = I915_READ(GTIIR);
1323 pm_iir = I915_READ(GEN6_PMIIR);
1325 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
1331 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1333 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1335 if (de_iir & DE_AUX_CHANNEL_A)
1336 dp_aux_irq_handler(dev);
1338 if (de_iir & DE_GSE)
1339 intel_opregion_asle_intr(dev);
1341 if (de_iir & DE_PIPEA_VBLANK)
1342 drm_handle_vblank(dev, 0);
1344 if (de_iir & DE_PIPEB_VBLANK)
1345 drm_handle_vblank(dev, 1);
1347 if (de_iir & DE_POISON)
1348 DRM_ERROR("Poison interrupt\n");
1350 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1351 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1352 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1354 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1355 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1356 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1358 if (de_iir & DE_PLANEA_FLIP_DONE) {
1359 intel_prepare_page_flip(dev, 0);
1360 intel_finish_page_flip_plane(dev, 0);
1363 if (de_iir & DE_PLANEB_FLIP_DONE) {
1364 intel_prepare_page_flip(dev, 1);
1365 intel_finish_page_flip_plane(dev, 1);
1368 /* check event from PCH */
1369 if (de_iir & DE_PCH_EVENT) {
1370 u32 pch_iir = I915_READ(SDEIIR);
1372 if (HAS_PCH_CPT(dev))
1373 cpt_irq_handler(dev, pch_iir);
1375 ibx_irq_handler(dev, pch_iir);
1377 /* should clear PCH hotplug event before clear CPU irq */
1378 I915_WRITE(SDEIIR, pch_iir);
1381 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1382 ironlake_handle_rps_change(dev);
1384 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1385 gen6_queue_rps_work(dev_priv, pm_iir);
1387 I915_WRITE(GTIIR, gt_iir);
1388 I915_WRITE(DEIIR, de_iir);
1389 I915_WRITE(GEN6_PMIIR, pm_iir);
1392 I915_WRITE(DEIER, de_ier);
1393 POSTING_READ(DEIER);
1394 I915_WRITE(SDEIER, sde_ier);
1395 POSTING_READ(SDEIER);
1401 * i915_error_work_func - do process context error handling work
1402 * @work: work struct
1404 * Fire an error uevent so userspace can see that a hang or error
1407 static void i915_error_work_func(struct work_struct *work)
1409 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1411 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1413 struct drm_device *dev = dev_priv->dev;
1414 struct intel_ring_buffer *ring;
1415 char *error_event[] = { "ERROR=1", NULL };
1416 char *reset_event[] = { "RESET=1", NULL };
1417 char *reset_done_event[] = { "ERROR=0", NULL };
1420 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1423 * Note that there's only one work item which does gpu resets, so we
1424 * need not worry about concurrent gpu resets potentially incrementing
1425 * error->reset_counter twice. We only need to take care of another
1426 * racing irq/hangcheck declaring the gpu dead for a second time. A
1427 * quick check for that is good enough: schedule_work ensures the
1428 * correct ordering between hang detection and this work item, and since
1429 * the reset in-progress bit is only ever set by code outside of this
1430 * work we don't need to worry about any other races.
1432 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1433 DRM_DEBUG_DRIVER("resetting chip\n");
1434 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1437 ret = i915_reset(dev);
1441 * After all the gem state is reset, increment the reset
1442 * counter and wake up everyone waiting for the reset to
1445 * Since unlock operations are a one-sided barrier only,
1446 * we need to insert a barrier here to order any seqno
1448 * the counter increment.
1450 smp_mb__before_atomic_inc();
1451 atomic_inc(&dev_priv->gpu_error.reset_counter);
1453 kobject_uevent_env(&dev->primary->kdev.kobj,
1454 KOBJ_CHANGE, reset_done_event);
1456 atomic_set(&error->reset_counter, I915_WEDGED);
1459 for_each_ring(ring, dev_priv, i)
1460 wake_up_all(&ring->irq_queue);
1462 intel_display_handle_reset(dev);
1464 wake_up_all(&dev_priv->gpu_error.reset_queue);
1468 /* NB: please notice the memset */
1469 static void i915_get_extra_instdone(struct drm_device *dev,
1472 struct drm_i915_private *dev_priv = dev->dev_private;
1473 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1475 switch(INTEL_INFO(dev)->gen) {
1478 instdone[0] = I915_READ(INSTDONE);
1483 instdone[0] = I915_READ(INSTDONE_I965);
1484 instdone[1] = I915_READ(INSTDONE1);
1487 WARN_ONCE(1, "Unsupported platform\n");
1489 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1490 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1491 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1492 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1497 #ifdef CONFIG_DEBUG_FS
1498 static struct drm_i915_error_object *
1499 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1500 struct drm_i915_gem_object *src,
1501 const int num_pages)
1503 struct drm_i915_error_object *dst;
1507 if (src == NULL || src->pages == NULL)
1510 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1514 reloc_offset = src->gtt_offset;
1515 for (i = 0; i < num_pages; i++) {
1516 unsigned long flags;
1519 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1523 local_irq_save(flags);
1524 if (reloc_offset < dev_priv->gtt.mappable_end &&
1525 src->has_global_gtt_mapping) {
1528 /* Simply ignore tiling or any overlapping fence.
1529 * It's part of the error state, and this hopefully
1530 * captures what the GPU read.
1533 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1535 memcpy_fromio(d, s, PAGE_SIZE);
1536 io_mapping_unmap_atomic(s);
1537 } else if (src->stolen) {
1538 unsigned long offset;
1540 offset = dev_priv->mm.stolen_base;
1541 offset += src->stolen->start;
1542 offset += i << PAGE_SHIFT;
1544 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
1549 page = i915_gem_object_get_page(src, i);
1551 drm_clflush_pages(&page, 1);
1553 s = kmap_atomic(page);
1554 memcpy(d, s, PAGE_SIZE);
1557 drm_clflush_pages(&page, 1);
1559 local_irq_restore(flags);
1563 reloc_offset += PAGE_SIZE;
1565 dst->page_count = num_pages;
1566 dst->gtt_offset = src->gtt_offset;
1572 kfree(dst->pages[i]);
1576 #define i915_error_object_create(dev_priv, src) \
1577 i915_error_object_create_sized((dev_priv), (src), \
1578 (src)->base.size>>PAGE_SHIFT)
1581 i915_error_object_free(struct drm_i915_error_object *obj)
1588 for (page = 0; page < obj->page_count; page++)
1589 kfree(obj->pages[page]);
1595 i915_error_state_free(struct kref *error_ref)
1597 struct drm_i915_error_state *error = container_of(error_ref,
1598 typeof(*error), ref);
1601 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1602 i915_error_object_free(error->ring[i].batchbuffer);
1603 i915_error_object_free(error->ring[i].ringbuffer);
1604 i915_error_object_free(error->ring[i].ctx);
1605 kfree(error->ring[i].requests);
1608 kfree(error->active_bo);
1609 kfree(error->overlay);
1610 kfree(error->display);
1613 static void capture_bo(struct drm_i915_error_buffer *err,
1614 struct drm_i915_gem_object *obj)
1616 err->size = obj->base.size;
1617 err->name = obj->base.name;
1618 err->rseqno = obj->last_read_seqno;
1619 err->wseqno = obj->last_write_seqno;
1620 err->gtt_offset = obj->gtt_offset;
1621 err->read_domains = obj->base.read_domains;
1622 err->write_domain = obj->base.write_domain;
1623 err->fence_reg = obj->fence_reg;
1625 if (obj->pin_count > 0)
1627 if (obj->user_pin_count > 0)
1629 err->tiling = obj->tiling_mode;
1630 err->dirty = obj->dirty;
1631 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1632 err->ring = obj->ring ? obj->ring->id : -1;
1633 err->cache_level = obj->cache_level;
1636 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1637 int count, struct list_head *head)
1639 struct drm_i915_gem_object *obj;
1642 list_for_each_entry(obj, head, mm_list) {
1643 capture_bo(err++, obj);
1651 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1652 int count, struct list_head *head)
1654 struct drm_i915_gem_object *obj;
1657 list_for_each_entry(obj, head, global_list) {
1658 if (obj->pin_count == 0)
1661 capture_bo(err++, obj);
1669 static void i915_gem_record_fences(struct drm_device *dev,
1670 struct drm_i915_error_state *error)
1672 struct drm_i915_private *dev_priv = dev->dev_private;
1676 switch (INTEL_INFO(dev)->gen) {
1679 for (i = 0; i < dev_priv->num_fence_regs; i++)
1680 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1684 for (i = 0; i < 16; i++)
1685 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1688 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1689 for (i = 0; i < 8; i++)
1690 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1692 for (i = 0; i < 8; i++)
1693 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1701 static struct drm_i915_error_object *
1702 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1703 struct intel_ring_buffer *ring)
1705 struct drm_i915_gem_object *obj;
1708 if (!ring->get_seqno)
1711 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1712 u32 acthd = I915_READ(ACTHD);
1714 if (WARN_ON(ring->id != RCS))
1717 obj = ring->private;
1718 if (acthd >= obj->gtt_offset &&
1719 acthd < obj->gtt_offset + obj->base.size)
1720 return i915_error_object_create(dev_priv, obj);
1723 seqno = ring->get_seqno(ring, false);
1724 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1725 if (obj->ring != ring)
1728 if (i915_seqno_passed(seqno, obj->last_read_seqno))
1731 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1734 /* We need to copy these to an anonymous buffer as the simplest
1735 * method to avoid being overwritten by userspace.
1737 return i915_error_object_create(dev_priv, obj);
1743 static void i915_record_ring_state(struct drm_device *dev,
1744 struct drm_i915_error_state *error,
1745 struct intel_ring_buffer *ring)
1747 struct drm_i915_private *dev_priv = dev->dev_private;
1749 if (INTEL_INFO(dev)->gen >= 6) {
1750 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1751 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1752 error->semaphore_mboxes[ring->id][0]
1753 = I915_READ(RING_SYNC_0(ring->mmio_base));
1754 error->semaphore_mboxes[ring->id][1]
1755 = I915_READ(RING_SYNC_1(ring->mmio_base));
1756 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1757 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1760 if (INTEL_INFO(dev)->gen >= 4) {
1761 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1762 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1763 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1764 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1765 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1766 if (ring->id == RCS)
1767 error->bbaddr = I915_READ64(BB_ADDR);
1769 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1770 error->ipeir[ring->id] = I915_READ(IPEIR);
1771 error->ipehr[ring->id] = I915_READ(IPEHR);
1772 error->instdone[ring->id] = I915_READ(INSTDONE);
1775 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1776 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1777 error->seqno[ring->id] = ring->get_seqno(ring, false);
1778 error->acthd[ring->id] = intel_ring_get_active_head(ring);
1779 error->head[ring->id] = I915_READ_HEAD(ring);
1780 error->tail[ring->id] = I915_READ_TAIL(ring);
1781 error->ctl[ring->id] = I915_READ_CTL(ring);
1783 error->cpu_ring_head[ring->id] = ring->head;
1784 error->cpu_ring_tail[ring->id] = ring->tail;
1788 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1789 struct drm_i915_error_state *error,
1790 struct drm_i915_error_ring *ering)
1792 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1793 struct drm_i915_gem_object *obj;
1795 /* Currently render ring is the only HW context user */
1796 if (ring->id != RCS || !error->ccid)
1799 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1800 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1801 ering->ctx = i915_error_object_create_sized(dev_priv,
1807 static void i915_gem_record_rings(struct drm_device *dev,
1808 struct drm_i915_error_state *error)
1810 struct drm_i915_private *dev_priv = dev->dev_private;
1811 struct intel_ring_buffer *ring;
1812 struct drm_i915_gem_request *request;
1815 for_each_ring(ring, dev_priv, i) {
1816 i915_record_ring_state(dev, error, ring);
1818 error->ring[i].batchbuffer =
1819 i915_error_first_batchbuffer(dev_priv, ring);
1821 error->ring[i].ringbuffer =
1822 i915_error_object_create(dev_priv, ring->obj);
1825 i915_gem_record_active_context(ring, error, &error->ring[i]);
1828 list_for_each_entry(request, &ring->request_list, list)
1831 error->ring[i].num_requests = count;
1832 error->ring[i].requests =
1833 kmalloc(count*sizeof(struct drm_i915_error_request),
1835 if (error->ring[i].requests == NULL) {
1836 error->ring[i].num_requests = 0;
1841 list_for_each_entry(request, &ring->request_list, list) {
1842 struct drm_i915_error_request *erq;
1844 erq = &error->ring[i].requests[count++];
1845 erq->seqno = request->seqno;
1846 erq->jiffies = request->emitted_jiffies;
1847 erq->tail = request->tail;
1853 * i915_capture_error_state - capture an error record for later analysis
1856 * Should be called when an error is detected (either a hang or an error
1857 * interrupt) to capture error state from the time of the error. Fills
1858 * out a structure which becomes available in debugfs for user level tools
1861 static void i915_capture_error_state(struct drm_device *dev)
1863 struct drm_i915_private *dev_priv = dev->dev_private;
1864 struct drm_i915_gem_object *obj;
1865 struct drm_i915_error_state *error;
1866 unsigned long flags;
1869 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1870 error = dev_priv->gpu_error.first_error;
1871 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1875 /* Account for pipe specific data like PIPE*STAT */
1876 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1878 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1882 DRM_INFO("capturing error event; look for more information in "
1883 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1884 dev->primary->index);
1886 kref_init(&error->ref);
1887 error->eir = I915_READ(EIR);
1888 error->pgtbl_er = I915_READ(PGTBL_ER);
1889 if (HAS_HW_CONTEXTS(dev))
1890 error->ccid = I915_READ(CCID);
1892 if (HAS_PCH_SPLIT(dev))
1893 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1894 else if (IS_VALLEYVIEW(dev))
1895 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1896 else if (IS_GEN2(dev))
1897 error->ier = I915_READ16(IER);
1899 error->ier = I915_READ(IER);
1901 if (INTEL_INFO(dev)->gen >= 6)
1902 error->derrmr = I915_READ(DERRMR);
1904 if (IS_VALLEYVIEW(dev))
1905 error->forcewake = I915_READ(FORCEWAKE_VLV);
1906 else if (INTEL_INFO(dev)->gen >= 7)
1907 error->forcewake = I915_READ(FORCEWAKE_MT);
1908 else if (INTEL_INFO(dev)->gen == 6)
1909 error->forcewake = I915_READ(FORCEWAKE);
1911 if (!HAS_PCH_SPLIT(dev))
1913 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1915 if (INTEL_INFO(dev)->gen >= 6) {
1916 error->error = I915_READ(ERROR_GEN6);
1917 error->done_reg = I915_READ(DONE_REG);
1920 if (INTEL_INFO(dev)->gen == 7)
1921 error->err_int = I915_READ(GEN7_ERR_INT);
1923 i915_get_extra_instdone(dev, error->extra_instdone);
1925 i915_gem_record_fences(dev, error);
1926 i915_gem_record_rings(dev, error);
1928 /* Record buffers on the active and pinned lists. */
1929 error->active_bo = NULL;
1930 error->pinned_bo = NULL;
1933 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1935 error->active_bo_count = i;
1936 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1939 error->pinned_bo_count = i - error->active_bo_count;
1941 error->active_bo = NULL;
1942 error->pinned_bo = NULL;
1944 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1946 if (error->active_bo)
1948 error->active_bo + error->active_bo_count;
1951 if (error->active_bo)
1952 error->active_bo_count =
1953 capture_active_bo(error->active_bo,
1954 error->active_bo_count,
1955 &dev_priv->mm.active_list);
1957 if (error->pinned_bo)
1958 error->pinned_bo_count =
1959 capture_pinned_bo(error->pinned_bo,
1960 error->pinned_bo_count,
1961 &dev_priv->mm.bound_list);
1963 do_gettimeofday(&error->time);
1965 error->overlay = intel_overlay_capture_error_state(dev);
1966 error->display = intel_display_capture_error_state(dev);
1968 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1969 if (dev_priv->gpu_error.first_error == NULL) {
1970 dev_priv->gpu_error.first_error = error;
1973 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1976 i915_error_state_free(&error->ref);
1979 void i915_destroy_error_state(struct drm_device *dev)
1981 struct drm_i915_private *dev_priv = dev->dev_private;
1982 struct drm_i915_error_state *error;
1983 unsigned long flags;
1985 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1986 error = dev_priv->gpu_error.first_error;
1987 dev_priv->gpu_error.first_error = NULL;
1988 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1991 kref_put(&error->ref, i915_error_state_free);
1994 #define i915_capture_error_state(x)
1997 static void i915_report_and_clear_eir(struct drm_device *dev)
1999 struct drm_i915_private *dev_priv = dev->dev_private;
2000 uint32_t instdone[I915_NUM_INSTDONE_REG];
2001 u32 eir = I915_READ(EIR);
2007 pr_err("render error detected, EIR: 0x%08x\n", eir);
2009 i915_get_extra_instdone(dev, instdone);
2012 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2013 u32 ipeir = I915_READ(IPEIR_I965);
2015 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2016 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2017 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2018 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2019 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2020 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2021 I915_WRITE(IPEIR_I965, ipeir);
2022 POSTING_READ(IPEIR_I965);
2024 if (eir & GM45_ERROR_PAGE_TABLE) {
2025 u32 pgtbl_err = I915_READ(PGTBL_ER);
2026 pr_err("page table error\n");
2027 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2028 I915_WRITE(PGTBL_ER, pgtbl_err);
2029 POSTING_READ(PGTBL_ER);
2033 if (!IS_GEN2(dev)) {
2034 if (eir & I915_ERROR_PAGE_TABLE) {
2035 u32 pgtbl_err = I915_READ(PGTBL_ER);
2036 pr_err("page table error\n");
2037 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2038 I915_WRITE(PGTBL_ER, pgtbl_err);
2039 POSTING_READ(PGTBL_ER);
2043 if (eir & I915_ERROR_MEMORY_REFRESH) {
2044 pr_err("memory refresh error:\n");
2046 pr_err("pipe %c stat: 0x%08x\n",
2047 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2048 /* pipestat has already been acked */
2050 if (eir & I915_ERROR_INSTRUCTION) {
2051 pr_err("instruction error\n");
2052 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2053 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2054 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2055 if (INTEL_INFO(dev)->gen < 4) {
2056 u32 ipeir = I915_READ(IPEIR);
2058 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2059 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2060 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2061 I915_WRITE(IPEIR, ipeir);
2062 POSTING_READ(IPEIR);
2064 u32 ipeir = I915_READ(IPEIR_I965);
2066 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2067 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2068 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2069 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2070 I915_WRITE(IPEIR_I965, ipeir);
2071 POSTING_READ(IPEIR_I965);
2075 I915_WRITE(EIR, eir);
2077 eir = I915_READ(EIR);
2080 * some errors might have become stuck,
2083 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2084 I915_WRITE(EMR, I915_READ(EMR) | eir);
2085 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2090 * i915_handle_error - handle an error interrupt
2093 * Do some basic checking of regsiter state at error interrupt time and
2094 * dump it to the syslog. Also call i915_capture_error_state() to make
2095 * sure we get a record and make it available in debugfs. Fire a uevent
2096 * so userspace knows something bad happened (should trigger collection
2097 * of a ring dump etc.).
2099 void i915_handle_error(struct drm_device *dev, bool wedged)
2101 struct drm_i915_private *dev_priv = dev->dev_private;
2102 struct intel_ring_buffer *ring;
2105 i915_capture_error_state(dev);
2106 i915_report_and_clear_eir(dev);
2109 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2110 &dev_priv->gpu_error.reset_counter);
2113 * Wakeup waiting processes so that the reset work item
2114 * doesn't deadlock trying to grab various locks.
2116 for_each_ring(ring, dev_priv, i)
2117 wake_up_all(&ring->irq_queue);
2120 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
2123 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2125 drm_i915_private_t *dev_priv = dev->dev_private;
2126 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2127 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2128 struct drm_i915_gem_object *obj;
2129 struct intel_unpin_work *work;
2130 unsigned long flags;
2131 bool stall_detected;
2133 /* Ignore early vblank irqs */
2134 if (intel_crtc == NULL)
2137 spin_lock_irqsave(&dev->event_lock, flags);
2138 work = intel_crtc->unpin_work;
2141 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2142 !work->enable_stall_check) {
2143 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2144 spin_unlock_irqrestore(&dev->event_lock, flags);
2148 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2149 obj = work->pending_flip_obj;
2150 if (INTEL_INFO(dev)->gen >= 4) {
2151 int dspsurf = DSPSURF(intel_crtc->plane);
2152 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2155 int dspaddr = DSPADDR(intel_crtc->plane);
2156 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
2157 crtc->y * crtc->fb->pitches[0] +
2158 crtc->x * crtc->fb->bits_per_pixel/8);
2161 spin_unlock_irqrestore(&dev->event_lock, flags);
2163 if (stall_detected) {
2164 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2165 intel_prepare_page_flip(dev, intel_crtc->plane);
2169 /* Called from drm generic code, passed 'crtc' which
2170 * we use as a pipe index
2172 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2174 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2175 unsigned long irqflags;
2177 if (!i915_pipe_enabled(dev, pipe))
2180 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2181 if (INTEL_INFO(dev)->gen >= 4)
2182 i915_enable_pipestat(dev_priv, pipe,
2183 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2185 i915_enable_pipestat(dev_priv, pipe,
2186 PIPE_VBLANK_INTERRUPT_ENABLE);
2188 /* maintain vblank delivery even in deep C-states */
2189 if (dev_priv->info->gen == 3)
2190 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2191 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2196 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2198 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2199 unsigned long irqflags;
2201 if (!i915_pipe_enabled(dev, pipe))
2204 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2205 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
2206 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2207 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2212 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
2214 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2215 unsigned long irqflags;
2217 if (!i915_pipe_enabled(dev, pipe))
2220 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2221 ironlake_enable_display_irq(dev_priv,
2222 DE_PIPEA_VBLANK_IVB << (5 * pipe));
2223 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2228 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2230 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2231 unsigned long irqflags;
2234 if (!i915_pipe_enabled(dev, pipe))
2237 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2238 imr = I915_READ(VLV_IMR);
2240 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2242 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2243 I915_WRITE(VLV_IMR, imr);
2244 i915_enable_pipestat(dev_priv, pipe,
2245 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2246 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2251 /* Called from drm generic code, passed 'crtc' which
2252 * we use as a pipe index
2254 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2256 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2257 unsigned long irqflags;
2259 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2260 if (dev_priv->info->gen == 3)
2261 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2263 i915_disable_pipestat(dev_priv, pipe,
2264 PIPE_VBLANK_INTERRUPT_ENABLE |
2265 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2266 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2269 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2271 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2272 unsigned long irqflags;
2274 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2275 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
2276 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2277 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2280 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
2282 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2283 unsigned long irqflags;
2285 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2286 ironlake_disable_display_irq(dev_priv,
2287 DE_PIPEA_VBLANK_IVB << (pipe * 5));
2288 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2291 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2293 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2294 unsigned long irqflags;
2297 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2298 i915_disable_pipestat(dev_priv, pipe,
2299 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2300 imr = I915_READ(VLV_IMR);
2302 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2304 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2305 I915_WRITE(VLV_IMR, imr);
2306 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2310 ring_last_seqno(struct intel_ring_buffer *ring)
2312 return list_entry(ring->request_list.prev,
2313 struct drm_i915_gem_request, list)->seqno;
2317 ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2319 return (list_empty(&ring->request_list) ||
2320 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2323 static struct intel_ring_buffer *
2324 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2326 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2327 u32 cmd, ipehr, acthd, acthd_min;
2329 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2330 if ((ipehr & ~(0x3 << 16)) !=
2331 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2334 /* ACTHD is likely pointing to the dword after the actual command,
2335 * so scan backwards until we find the MBOX.
2337 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2338 acthd_min = max((int)acthd - 3 * 4, 0);
2340 cmd = ioread32(ring->virtual_start + acthd);
2345 if (acthd < acthd_min)
2349 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2350 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2353 static int semaphore_passed(struct intel_ring_buffer *ring)
2355 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2356 struct intel_ring_buffer *signaller;
2359 ring->hangcheck.deadlock = true;
2361 signaller = semaphore_waits_for(ring, &seqno);
2362 if (signaller == NULL || signaller->hangcheck.deadlock)
2365 /* cursory check for an unkickable deadlock */
2366 ctl = I915_READ_CTL(signaller);
2367 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2370 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2373 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2375 struct intel_ring_buffer *ring;
2378 for_each_ring(ring, dev_priv, i)
2379 ring->hangcheck.deadlock = false;
2382 static enum intel_ring_hangcheck_action
2383 ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2385 struct drm_device *dev = ring->dev;
2386 struct drm_i915_private *dev_priv = dev->dev_private;
2389 if (ring->hangcheck.acthd != acthd)
2395 /* Is the chip hanging on a WAIT_FOR_EVENT?
2396 * If so we can simply poke the RB_WAIT bit
2397 * and break the hang. This should work on
2398 * all but the second generation chipsets.
2400 tmp = I915_READ_CTL(ring);
2401 if (tmp & RING_WAIT) {
2402 DRM_ERROR("Kicking stuck wait on %s\n",
2404 I915_WRITE_CTL(ring, tmp);
2408 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2409 switch (semaphore_passed(ring)) {
2413 DRM_ERROR("Kicking stuck semaphore on %s\n",
2415 I915_WRITE_CTL(ring, tmp);
2426 * This is called when the chip hasn't reported back with completed
2427 * batchbuffers in a long time. We keep track per ring seqno progress and
2428 * if there are no progress, hangcheck score for that ring is increased.
2429 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2430 * we kick the ring. If we see no progress on three subsequent calls
2431 * we assume chip is wedged and try to fix it by resetting the chip.
2433 void i915_hangcheck_elapsed(unsigned long data)
2435 struct drm_device *dev = (struct drm_device *)data;
2436 drm_i915_private_t *dev_priv = dev->dev_private;
2437 struct intel_ring_buffer *ring;
2439 int busy_count = 0, rings_hung = 0;
2440 bool stuck[I915_NUM_RINGS] = { 0 };
2446 if (!i915_enable_hangcheck)
2449 for_each_ring(ring, dev_priv, i) {
2453 semaphore_clear_deadlocks(dev_priv);
2455 seqno = ring->get_seqno(ring, false);
2456 acthd = intel_ring_get_active_head(ring);
2458 if (ring->hangcheck.seqno == seqno) {
2459 if (ring_idle(ring, seqno)) {
2460 if (waitqueue_active(&ring->irq_queue)) {
2461 /* Issue a wake-up to catch stuck h/w. */
2462 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2464 wake_up_all(&ring->irq_queue);
2465 ring->hangcheck.score += HUNG;
2471 /* We always increment the hangcheck score
2472 * if the ring is busy and still processing
2473 * the same request, so that no single request
2474 * can run indefinitely (such as a chain of
2475 * batches). The only time we do not increment
2476 * the hangcheck score on this ring, if this
2477 * ring is in a legitimate wait for another
2478 * ring. In that case the waiting ring is a
2479 * victim and we want to be sure we catch the
2480 * right culprit. Then every time we do kick
2481 * the ring, add a small increment to the
2482 * score so that we can catch a batch that is
2483 * being repeatedly kicked and so responsible
2484 * for stalling the machine.
2486 ring->hangcheck.action = ring_stuck(ring,
2489 switch (ring->hangcheck.action) {
2504 ring->hangcheck.score += score;
2507 /* Gradually reduce the count so that we catch DoS
2508 * attempts across multiple batches.
2510 if (ring->hangcheck.score > 0)
2511 ring->hangcheck.score--;
2514 ring->hangcheck.seqno = seqno;
2515 ring->hangcheck.acthd = acthd;
2519 for_each_ring(ring, dev_priv, i) {
2520 if (ring->hangcheck.score > FIRE) {
2521 DRM_ERROR("%s on %s\n",
2522 stuck[i] ? "stuck" : "no progress",
2529 return i915_handle_error(dev, true);
2532 /* Reset timer case chip hangs without another request
2534 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2535 round_jiffies_up(jiffies +
2536 DRM_I915_HANGCHECK_JIFFIES));
2539 static void ibx_irq_preinstall(struct drm_device *dev)
2541 struct drm_i915_private *dev_priv = dev->dev_private;
2543 if (HAS_PCH_NOP(dev))
2546 /* south display irq */
2547 I915_WRITE(SDEIMR, 0xffffffff);
2549 * SDEIER is also touched by the interrupt handler to work around missed
2550 * PCH interrupts. Hence we can't update it after the interrupt handler
2551 * is enabled - instead we unconditionally enable all PCH interrupt
2552 * sources here, but then only unmask them as needed with SDEIMR.
2554 I915_WRITE(SDEIER, 0xffffffff);
2555 POSTING_READ(SDEIER);
2560 static void ironlake_irq_preinstall(struct drm_device *dev)
2562 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2564 atomic_set(&dev_priv->irq_received, 0);
2566 I915_WRITE(HWSTAM, 0xeffe);
2568 /* XXX hotplug from PCH */
2570 I915_WRITE(DEIMR, 0xffffffff);
2571 I915_WRITE(DEIER, 0x0);
2572 POSTING_READ(DEIER);
2575 I915_WRITE(GTIMR, 0xffffffff);
2576 I915_WRITE(GTIER, 0x0);
2577 POSTING_READ(GTIER);
2579 ibx_irq_preinstall(dev);
2582 static void ivybridge_irq_preinstall(struct drm_device *dev)
2584 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2586 atomic_set(&dev_priv->irq_received, 0);
2588 I915_WRITE(HWSTAM, 0xeffe);
2590 /* XXX hotplug from PCH */
2592 I915_WRITE(DEIMR, 0xffffffff);
2593 I915_WRITE(DEIER, 0x0);
2594 POSTING_READ(DEIER);
2597 I915_WRITE(GTIMR, 0xffffffff);
2598 I915_WRITE(GTIER, 0x0);
2599 POSTING_READ(GTIER);
2601 /* Power management */
2602 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2603 I915_WRITE(GEN6_PMIER, 0x0);
2604 POSTING_READ(GEN6_PMIER);
2606 ibx_irq_preinstall(dev);
2609 static void valleyview_irq_preinstall(struct drm_device *dev)
2611 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2614 atomic_set(&dev_priv->irq_received, 0);
2617 I915_WRITE(VLV_IMR, 0);
2618 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2619 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2620 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2623 I915_WRITE(GTIIR, I915_READ(GTIIR));
2624 I915_WRITE(GTIIR, I915_READ(GTIIR));
2625 I915_WRITE(GTIMR, 0xffffffff);
2626 I915_WRITE(GTIER, 0x0);
2627 POSTING_READ(GTIER);
2629 I915_WRITE(DPINVGTT, 0xff);
2631 I915_WRITE(PORT_HOTPLUG_EN, 0);
2632 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2634 I915_WRITE(PIPESTAT(pipe), 0xffff);
2635 I915_WRITE(VLV_IIR, 0xffffffff);
2636 I915_WRITE(VLV_IMR, 0xffffffff);
2637 I915_WRITE(VLV_IER, 0x0);
2638 POSTING_READ(VLV_IER);
2641 static void ibx_hpd_irq_setup(struct drm_device *dev)
2643 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2644 struct drm_mode_config *mode_config = &dev->mode_config;
2645 struct intel_encoder *intel_encoder;
2646 u32 mask = ~I915_READ(SDEIMR);
2649 if (HAS_PCH_IBX(dev)) {
2650 mask &= ~SDE_HOTPLUG_MASK;
2651 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2652 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2653 mask |= hpd_ibx[intel_encoder->hpd_pin];
2655 mask &= ~SDE_HOTPLUG_MASK_CPT;
2656 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2657 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2658 mask |= hpd_cpt[intel_encoder->hpd_pin];
2661 I915_WRITE(SDEIMR, ~mask);
2664 * Enable digital hotplug on the PCH, and configure the DP short pulse
2665 * duration to 2ms (which is the minimum in the Display Port spec)
2667 * This register is the same on all known PCH chips.
2669 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2670 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2671 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2672 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2673 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2674 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2677 static void ibx_irq_postinstall(struct drm_device *dev)
2679 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2682 if (HAS_PCH_NOP(dev))
2685 if (HAS_PCH_IBX(dev)) {
2686 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2687 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2689 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2691 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2694 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2695 I915_WRITE(SDEIMR, ~mask);
2698 static int ironlake_irq_postinstall(struct drm_device *dev)
2700 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2701 /* enable kind of interrupts always enabled */
2702 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2703 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2704 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2705 DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
2708 dev_priv->irq_mask = ~display_mask;
2710 /* should always can generate irq */
2711 I915_WRITE(DEIIR, I915_READ(DEIIR));
2712 I915_WRITE(DEIMR, dev_priv->irq_mask);
2713 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
2714 POSTING_READ(DEIER);
2716 dev_priv->gt_irq_mask = ~0;
2718 I915_WRITE(GTIIR, I915_READ(GTIIR));
2719 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2721 gt_irqs = GT_RENDER_USER_INTERRUPT;
2724 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2726 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2727 ILK_BSD_USER_INTERRUPT;
2729 I915_WRITE(GTIER, gt_irqs);
2730 POSTING_READ(GTIER);
2732 ibx_irq_postinstall(dev);
2734 if (IS_IRONLAKE_M(dev)) {
2735 /* Clear & enable PCU event interrupts */
2736 I915_WRITE(DEIIR, DE_PCU_EVENT);
2737 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
2738 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2744 static int ivybridge_irq_postinstall(struct drm_device *dev)
2746 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2747 /* enable kind of interrupts always enabled */
2749 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2750 DE_PLANEC_FLIP_DONE_IVB |
2751 DE_PLANEB_FLIP_DONE_IVB |
2752 DE_PLANEA_FLIP_DONE_IVB |
2753 DE_AUX_CHANNEL_A_IVB |
2755 u32 pm_irqs = GEN6_PM_RPS_EVENTS;
2758 dev_priv->irq_mask = ~display_mask;
2760 /* should always can generate irq */
2761 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2762 I915_WRITE(DEIIR, I915_READ(DEIIR));
2763 I915_WRITE(DEIMR, dev_priv->irq_mask);
2766 DE_PIPEC_VBLANK_IVB |
2767 DE_PIPEB_VBLANK_IVB |
2768 DE_PIPEA_VBLANK_IVB);
2769 POSTING_READ(DEIER);
2771 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2773 I915_WRITE(GTIIR, I915_READ(GTIIR));
2774 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2776 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2777 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2778 I915_WRITE(GTIER, gt_irqs);
2779 POSTING_READ(GTIER);
2781 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2783 pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2784 PM_VEBOX_CS_ERROR_INTERRUPT;
2786 /* Our enable/disable rps functions may touch these registers so
2787 * make sure to set a known state for only the non-RPS bits.
2788 * The RMW is extra paranoia since this should be called after being set
2789 * to a known state in preinstall.
2791 I915_WRITE(GEN6_PMIMR,
2792 (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2793 I915_WRITE(GEN6_PMIER,
2794 (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2795 POSTING_READ(GEN6_PMIER);
2797 ibx_irq_postinstall(dev);
2802 static int valleyview_irq_postinstall(struct drm_device *dev)
2804 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2807 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2809 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2810 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2811 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2812 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2813 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2816 *Leave vblank interrupts masked initially. enable/disable will
2817 * toggle them based on usage.
2819 dev_priv->irq_mask = (~enable_mask) |
2820 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2821 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2823 I915_WRITE(PORT_HOTPLUG_EN, 0);
2824 POSTING_READ(PORT_HOTPLUG_EN);
2826 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2827 I915_WRITE(VLV_IER, enable_mask);
2828 I915_WRITE(VLV_IIR, 0xffffffff);
2829 I915_WRITE(PIPESTAT(0), 0xffff);
2830 I915_WRITE(PIPESTAT(1), 0xffff);
2831 POSTING_READ(VLV_IER);
2833 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2834 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2835 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2837 I915_WRITE(VLV_IIR, 0xffffffff);
2838 I915_WRITE(VLV_IIR, 0xffffffff);
2840 I915_WRITE(GTIIR, I915_READ(GTIIR));
2841 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2843 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2844 GT_BLT_USER_INTERRUPT;
2845 I915_WRITE(GTIER, gt_irqs);
2846 POSTING_READ(GTIER);
2848 /* ack & enable invalid PTE error interrupts */
2849 #if 0 /* FIXME: add support to irq handler for checking these bits */
2850 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2851 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2854 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2859 static void valleyview_irq_uninstall(struct drm_device *dev)
2861 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2867 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2870 I915_WRITE(PIPESTAT(pipe), 0xffff);
2872 I915_WRITE(HWSTAM, 0xffffffff);
2873 I915_WRITE(PORT_HOTPLUG_EN, 0);
2874 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2876 I915_WRITE(PIPESTAT(pipe), 0xffff);
2877 I915_WRITE(VLV_IIR, 0xffffffff);
2878 I915_WRITE(VLV_IMR, 0xffffffff);
2879 I915_WRITE(VLV_IER, 0x0);
2880 POSTING_READ(VLV_IER);
2883 static void ironlake_irq_uninstall(struct drm_device *dev)
2885 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2890 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2892 I915_WRITE(HWSTAM, 0xffffffff);
2894 I915_WRITE(DEIMR, 0xffffffff);
2895 I915_WRITE(DEIER, 0x0);
2896 I915_WRITE(DEIIR, I915_READ(DEIIR));
2898 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2900 I915_WRITE(GTIMR, 0xffffffff);
2901 I915_WRITE(GTIER, 0x0);
2902 I915_WRITE(GTIIR, I915_READ(GTIIR));
2904 if (HAS_PCH_NOP(dev))
2907 I915_WRITE(SDEIMR, 0xffffffff);
2908 I915_WRITE(SDEIER, 0x0);
2909 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2910 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2911 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2914 static void i8xx_irq_preinstall(struct drm_device * dev)
2916 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2919 atomic_set(&dev_priv->irq_received, 0);
2922 I915_WRITE(PIPESTAT(pipe), 0);
2923 I915_WRITE16(IMR, 0xffff);
2924 I915_WRITE16(IER, 0x0);
2925 POSTING_READ16(IER);
2928 static int i8xx_irq_postinstall(struct drm_device *dev)
2930 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2933 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2935 /* Unmask the interrupts that we always want on. */
2936 dev_priv->irq_mask =
2937 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2938 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2939 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2940 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2941 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2942 I915_WRITE16(IMR, dev_priv->irq_mask);
2945 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2946 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2947 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2948 I915_USER_INTERRUPT);
2949 POSTING_READ16(IER);
2955 * Returns true when a page flip has completed.
2957 static bool i8xx_handle_vblank(struct drm_device *dev,
2960 drm_i915_private_t *dev_priv = dev->dev_private;
2961 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2963 if (!drm_handle_vblank(dev, pipe))
2966 if ((iir & flip_pending) == 0)
2969 intel_prepare_page_flip(dev, pipe);
2971 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2972 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2973 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2974 * the flip is completed (no longer pending). Since this doesn't raise
2975 * an interrupt per se, we watch for the change at vblank.
2977 if (I915_READ16(ISR) & flip_pending)
2980 intel_finish_page_flip(dev, pipe);
2985 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2987 struct drm_device *dev = (struct drm_device *) arg;
2988 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2991 unsigned long irqflags;
2995 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2996 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2998 atomic_inc(&dev_priv->irq_received);
3000 iir = I915_READ16(IIR);
3004 while (iir & ~flip_mask) {
3005 /* Can't rely on pipestat interrupt bit in iir as it might
3006 * have been cleared after the pipestat interrupt was received.
3007 * It doesn't set the bit in iir again, but it still produces
3008 * interrupts (for non-MSI).
3010 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3011 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3012 i915_handle_error(dev, false);
3014 for_each_pipe(pipe) {
3015 int reg = PIPESTAT(pipe);
3016 pipe_stats[pipe] = I915_READ(reg);
3019 * Clear the PIPE*STAT regs before the IIR
3021 if (pipe_stats[pipe] & 0x8000ffff) {
3022 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3023 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3025 I915_WRITE(reg, pipe_stats[pipe]);
3029 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3031 I915_WRITE16(IIR, iir & ~flip_mask);
3032 new_iir = I915_READ16(IIR); /* Flush posted writes */
3034 i915_update_dri1_breadcrumb(dev);
3036 if (iir & I915_USER_INTERRUPT)
3037 notify_ring(dev, &dev_priv->ring[RCS]);
3039 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
3040 i8xx_handle_vblank(dev, 0, iir))
3041 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
3043 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
3044 i8xx_handle_vblank(dev, 1, iir))
3045 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
3053 static void i8xx_irq_uninstall(struct drm_device * dev)
3055 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3058 for_each_pipe(pipe) {
3059 /* Clear enable bits; then clear status bits */
3060 I915_WRITE(PIPESTAT(pipe), 0);
3061 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3063 I915_WRITE16(IMR, 0xffff);
3064 I915_WRITE16(IER, 0x0);
3065 I915_WRITE16(IIR, I915_READ16(IIR));
3068 static void i915_irq_preinstall(struct drm_device * dev)
3070 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3073 atomic_set(&dev_priv->irq_received, 0);
3075 if (I915_HAS_HOTPLUG(dev)) {
3076 I915_WRITE(PORT_HOTPLUG_EN, 0);
3077 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3080 I915_WRITE16(HWSTAM, 0xeffe);
3082 I915_WRITE(PIPESTAT(pipe), 0);
3083 I915_WRITE(IMR, 0xffffffff);
3084 I915_WRITE(IER, 0x0);
3088 static int i915_irq_postinstall(struct drm_device *dev)
3090 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3093 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3095 /* Unmask the interrupts that we always want on. */
3096 dev_priv->irq_mask =
3097 ~(I915_ASLE_INTERRUPT |
3098 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3099 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3100 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3101 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3102 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3105 I915_ASLE_INTERRUPT |
3106 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3107 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3108 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3109 I915_USER_INTERRUPT;
3111 if (I915_HAS_HOTPLUG(dev)) {
3112 I915_WRITE(PORT_HOTPLUG_EN, 0);
3113 POSTING_READ(PORT_HOTPLUG_EN);
3115 /* Enable in IER... */
3116 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3117 /* and unmask in IMR */
3118 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3121 I915_WRITE(IMR, dev_priv->irq_mask);
3122 I915_WRITE(IER, enable_mask);
3125 i915_enable_asle_pipestat(dev);
3131 * Returns true when a page flip has completed.
3133 static bool i915_handle_vblank(struct drm_device *dev,
3134 int plane, int pipe, u32 iir)
3136 drm_i915_private_t *dev_priv = dev->dev_private;
3137 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3139 if (!drm_handle_vblank(dev, pipe))
3142 if ((iir & flip_pending) == 0)
3145 intel_prepare_page_flip(dev, plane);
3147 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3148 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3149 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3150 * the flip is completed (no longer pending). Since this doesn't raise
3151 * an interrupt per se, we watch for the change at vblank.
3153 if (I915_READ(ISR) & flip_pending)
3156 intel_finish_page_flip(dev, pipe);
3161 static irqreturn_t i915_irq_handler(int irq, void *arg)
3163 struct drm_device *dev = (struct drm_device *) arg;
3164 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3165 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3166 unsigned long irqflags;
3168 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3169 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3170 int pipe, ret = IRQ_NONE;
3172 atomic_inc(&dev_priv->irq_received);
3174 iir = I915_READ(IIR);
3176 bool irq_received = (iir & ~flip_mask) != 0;
3177 bool blc_event = false;
3179 /* Can't rely on pipestat interrupt bit in iir as it might
3180 * have been cleared after the pipestat interrupt was received.
3181 * It doesn't set the bit in iir again, but it still produces
3182 * interrupts (for non-MSI).
3184 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3185 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3186 i915_handle_error(dev, false);
3188 for_each_pipe(pipe) {
3189 int reg = PIPESTAT(pipe);
3190 pipe_stats[pipe] = I915_READ(reg);
3192 /* Clear the PIPE*STAT regs before the IIR */
3193 if (pipe_stats[pipe] & 0x8000ffff) {
3194 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3195 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3197 I915_WRITE(reg, pipe_stats[pipe]);
3198 irq_received = true;
3201 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3206 /* Consume port. Then clear IIR or we'll miss events */
3207 if ((I915_HAS_HOTPLUG(dev)) &&
3208 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3209 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3210 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3212 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3214 if (hotplug_trigger) {
3215 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
3216 i915_hpd_irq_setup(dev);
3217 queue_work(dev_priv->wq,
3218 &dev_priv->hotplug_work);
3220 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3221 POSTING_READ(PORT_HOTPLUG_STAT);
3224 I915_WRITE(IIR, iir & ~flip_mask);
3225 new_iir = I915_READ(IIR); /* Flush posted writes */
3227 if (iir & I915_USER_INTERRUPT)
3228 notify_ring(dev, &dev_priv->ring[RCS]);
3230 for_each_pipe(pipe) {
3235 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3236 i915_handle_vblank(dev, plane, pipe, iir))
3237 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3239 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3243 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3244 intel_opregion_asle_intr(dev);
3246 /* With MSI, interrupts are only generated when iir
3247 * transitions from zero to nonzero. If another bit got
3248 * set while we were handling the existing iir bits, then
3249 * we would never get another interrupt.
3251 * This is fine on non-MSI as well, as if we hit this path
3252 * we avoid exiting the interrupt handler only to generate
3255 * Note that for MSI this could cause a stray interrupt report
3256 * if an interrupt landed in the time between writing IIR and
3257 * the posting read. This should be rare enough to never
3258 * trigger the 99% of 100,000 interrupts test for disabling
3263 } while (iir & ~flip_mask);
3265 i915_update_dri1_breadcrumb(dev);
3270 static void i915_irq_uninstall(struct drm_device * dev)
3272 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3275 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3277 if (I915_HAS_HOTPLUG(dev)) {
3278 I915_WRITE(PORT_HOTPLUG_EN, 0);
3279 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3282 I915_WRITE16(HWSTAM, 0xffff);
3283 for_each_pipe(pipe) {
3284 /* Clear enable bits; then clear status bits */
3285 I915_WRITE(PIPESTAT(pipe), 0);
3286 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3288 I915_WRITE(IMR, 0xffffffff);
3289 I915_WRITE(IER, 0x0);
3291 I915_WRITE(IIR, I915_READ(IIR));
3294 static void i965_irq_preinstall(struct drm_device * dev)
3296 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3299 atomic_set(&dev_priv->irq_received, 0);
3301 I915_WRITE(PORT_HOTPLUG_EN, 0);
3302 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3304 I915_WRITE(HWSTAM, 0xeffe);
3306 I915_WRITE(PIPESTAT(pipe), 0);
3307 I915_WRITE(IMR, 0xffffffff);
3308 I915_WRITE(IER, 0x0);
3312 static int i965_irq_postinstall(struct drm_device *dev)
3314 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3318 /* Unmask the interrupts that we always want on. */
3319 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3320 I915_DISPLAY_PORT_INTERRUPT |
3321 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3322 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3323 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3324 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3325 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3327 enable_mask = ~dev_priv->irq_mask;
3328 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3329 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3330 enable_mask |= I915_USER_INTERRUPT;
3333 enable_mask |= I915_BSD_USER_INTERRUPT;
3335 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3338 * Enable some error detection, note the instruction error mask
3339 * bit is reserved, so we leave it masked.
3342 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3343 GM45_ERROR_MEM_PRIV |
3344 GM45_ERROR_CP_PRIV |
3345 I915_ERROR_MEMORY_REFRESH);
3347 error_mask = ~(I915_ERROR_PAGE_TABLE |
3348 I915_ERROR_MEMORY_REFRESH);
3350 I915_WRITE(EMR, error_mask);
3352 I915_WRITE(IMR, dev_priv->irq_mask);
3353 I915_WRITE(IER, enable_mask);
3356 I915_WRITE(PORT_HOTPLUG_EN, 0);
3357 POSTING_READ(PORT_HOTPLUG_EN);
3359 i915_enable_asle_pipestat(dev);
3364 static void i915_hpd_irq_setup(struct drm_device *dev)
3366 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3367 struct drm_mode_config *mode_config = &dev->mode_config;
3368 struct intel_encoder *intel_encoder;
3371 if (I915_HAS_HOTPLUG(dev)) {
3372 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3373 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3374 /* Note HDMI and DP share hotplug bits */
3375 /* enable bits are the same for all generations */
3376 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3377 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3378 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3379 /* Programming the CRT detection parameters tends
3380 to generate a spurious hotplug event about three
3381 seconds later. So just do it once.
3384 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3385 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3386 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3388 /* Ignore TV since it's buggy */
3389 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3393 static irqreturn_t i965_irq_handler(int irq, void *arg)
3395 struct drm_device *dev = (struct drm_device *) arg;
3396 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3398 u32 pipe_stats[I915_MAX_PIPES];
3399 unsigned long irqflags;
3401 int ret = IRQ_NONE, pipe;
3403 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3404 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3406 atomic_inc(&dev_priv->irq_received);
3408 iir = I915_READ(IIR);
3411 bool blc_event = false;
3413 irq_received = (iir & ~flip_mask) != 0;
3415 /* Can't rely on pipestat interrupt bit in iir as it might
3416 * have been cleared after the pipestat interrupt was received.
3417 * It doesn't set the bit in iir again, but it still produces
3418 * interrupts (for non-MSI).
3420 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3421 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3422 i915_handle_error(dev, false);
3424 for_each_pipe(pipe) {
3425 int reg = PIPESTAT(pipe);
3426 pipe_stats[pipe] = I915_READ(reg);
3429 * Clear the PIPE*STAT regs before the IIR
3431 if (pipe_stats[pipe] & 0x8000ffff) {
3432 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3433 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3435 I915_WRITE(reg, pipe_stats[pipe]);
3439 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3446 /* Consume port. Then clear IIR or we'll miss events */
3447 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3448 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3449 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3450 HOTPLUG_INT_STATUS_G4X :
3451 HOTPLUG_INT_STATUS_I915);
3453 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3455 if (hotplug_trigger) {
3456 if (hotplug_irq_storm_detect(dev, hotplug_trigger,
3457 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
3458 i915_hpd_irq_setup(dev);
3459 queue_work(dev_priv->wq,
3460 &dev_priv->hotplug_work);
3462 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3463 I915_READ(PORT_HOTPLUG_STAT);
3466 I915_WRITE(IIR, iir & ~flip_mask);
3467 new_iir = I915_READ(IIR); /* Flush posted writes */
3469 if (iir & I915_USER_INTERRUPT)
3470 notify_ring(dev, &dev_priv->ring[RCS]);
3471 if (iir & I915_BSD_USER_INTERRUPT)
3472 notify_ring(dev, &dev_priv->ring[VCS]);
3474 for_each_pipe(pipe) {
3475 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3476 i915_handle_vblank(dev, pipe, pipe, iir))
3477 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3479 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3484 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3485 intel_opregion_asle_intr(dev);
3487 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3488 gmbus_irq_handler(dev);
3490 /* With MSI, interrupts are only generated when iir
3491 * transitions from zero to nonzero. If another bit got
3492 * set while we were handling the existing iir bits, then
3493 * we would never get another interrupt.
3495 * This is fine on non-MSI as well, as if we hit this path
3496 * we avoid exiting the interrupt handler only to generate
3499 * Note that for MSI this could cause a stray interrupt report
3500 * if an interrupt landed in the time between writing IIR and
3501 * the posting read. This should be rare enough to never
3502 * trigger the 99% of 100,000 interrupts test for disabling
3508 i915_update_dri1_breadcrumb(dev);
3513 static void i965_irq_uninstall(struct drm_device * dev)
3515 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3521 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3523 I915_WRITE(PORT_HOTPLUG_EN, 0);
3524 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3526 I915_WRITE(HWSTAM, 0xffffffff);
3528 I915_WRITE(PIPESTAT(pipe), 0);
3529 I915_WRITE(IMR, 0xffffffff);
3530 I915_WRITE(IER, 0x0);
3533 I915_WRITE(PIPESTAT(pipe),
3534 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3535 I915_WRITE(IIR, I915_READ(IIR));
3538 static void i915_reenable_hotplug_timer_func(unsigned long data)
3540 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3541 struct drm_device *dev = dev_priv->dev;
3542 struct drm_mode_config *mode_config = &dev->mode_config;
3543 unsigned long irqflags;
3546 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3547 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3548 struct drm_connector *connector;
3550 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3553 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3555 list_for_each_entry(connector, &mode_config->connector_list, head) {
3556 struct intel_connector *intel_connector = to_intel_connector(connector);
3558 if (intel_connector->encoder->hpd_pin == i) {
3559 if (connector->polled != intel_connector->polled)
3560 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3561 drm_get_connector_name(connector));
3562 connector->polled = intel_connector->polled;
3563 if (!connector->polled)
3564 connector->polled = DRM_CONNECTOR_POLL_HPD;
3568 if (dev_priv->display.hpd_irq_setup)
3569 dev_priv->display.hpd_irq_setup(dev);
3570 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3573 void intel_irq_init(struct drm_device *dev)
3575 struct drm_i915_private *dev_priv = dev->dev_private;
3577 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3578 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3579 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3580 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3582 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3583 i915_hangcheck_elapsed,
3584 (unsigned long) dev);
3585 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3586 (unsigned long) dev_priv);
3588 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3590 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3591 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3592 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3593 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3594 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3597 if (drm_core_check_feature(dev, DRIVER_MODESET))
3598 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3600 dev->driver->get_vblank_timestamp = NULL;
3601 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3603 if (IS_VALLEYVIEW(dev)) {
3604 dev->driver->irq_handler = valleyview_irq_handler;
3605 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3606 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3607 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3608 dev->driver->enable_vblank = valleyview_enable_vblank;
3609 dev->driver->disable_vblank = valleyview_disable_vblank;
3610 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3611 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3612 /* Share uninstall handlers with ILK/SNB */
3613 dev->driver->irq_handler = ivybridge_irq_handler;
3614 dev->driver->irq_preinstall = ivybridge_irq_preinstall;
3615 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3616 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3617 dev->driver->enable_vblank = ivybridge_enable_vblank;
3618 dev->driver->disable_vblank = ivybridge_disable_vblank;
3619 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3620 } else if (HAS_PCH_SPLIT(dev)) {
3621 dev->driver->irq_handler = ironlake_irq_handler;
3622 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3623 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3624 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3625 dev->driver->enable_vblank = ironlake_enable_vblank;
3626 dev->driver->disable_vblank = ironlake_disable_vblank;
3627 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3629 if (INTEL_INFO(dev)->gen == 2) {
3630 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3631 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3632 dev->driver->irq_handler = i8xx_irq_handler;
3633 dev->driver->irq_uninstall = i8xx_irq_uninstall;
3634 } else if (INTEL_INFO(dev)->gen == 3) {
3635 dev->driver->irq_preinstall = i915_irq_preinstall;
3636 dev->driver->irq_postinstall = i915_irq_postinstall;
3637 dev->driver->irq_uninstall = i915_irq_uninstall;
3638 dev->driver->irq_handler = i915_irq_handler;
3639 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3641 dev->driver->irq_preinstall = i965_irq_preinstall;
3642 dev->driver->irq_postinstall = i965_irq_postinstall;
3643 dev->driver->irq_uninstall = i965_irq_uninstall;
3644 dev->driver->irq_handler = i965_irq_handler;
3645 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3647 dev->driver->enable_vblank = i915_enable_vblank;
3648 dev->driver->disable_vblank = i915_disable_vblank;
3652 void intel_hpd_init(struct drm_device *dev)
3654 struct drm_i915_private *dev_priv = dev->dev_private;
3655 struct drm_mode_config *mode_config = &dev->mode_config;
3656 struct drm_connector *connector;
3659 for (i = 1; i < HPD_NUM_PINS; i++) {
3660 dev_priv->hpd_stats[i].hpd_cnt = 0;
3661 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3663 list_for_each_entry(connector, &mode_config->connector_list, head) {
3664 struct intel_connector *intel_connector = to_intel_connector(connector);
3665 connector->polled = intel_connector->polled;
3666 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3667 connector->polled = DRM_CONNECTOR_POLL_HPD;
3669 if (dev_priv->display.hpd_irq_setup)
3670 dev_priv->display.hpd_irq_setup(dev);