]> rtime.felk.cvut.cz Git - linux-imx.git/blobdiff - drivers/gpu/drm/i915/intel_display.c
drm/i915: reject modes the LPT FDI receiver can't handle
[linux-imx.git] / drivers / gpu / drm / i915 / intel_display.c
index 029e008979b6147d5aebad646f161c22e02c26e3..41e2d9508ef6757111552a7f29238c20bd79e5fc 100644 (file)
@@ -1149,14 +1149,9 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
        u32 val;
        bool cur_state;
 
-       if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
-                       DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
-                       return;
-       } else {
-               reg = FDI_RX_CTL(pipe);
-               val = I915_READ(reg);
-               cur_state = !!(val & FDI_RX_ENABLE);
-       }
+       reg = FDI_RX_CTL(pipe);
+       val = I915_READ(reg);
+       cur_state = !!(val & FDI_RX_ENABLE);
        WARN(cur_state != state,
             "FDI RX state assertion failure (expected %s, current %s)\n",
             state_string(state), state_string(cur_state));
@@ -1189,10 +1184,6 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
        int reg;
        u32 val;
 
-       if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
-               DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
-               return;
-       }
        reg = FDI_RX_CTL(pipe);
        val = I915_READ(reg);
        WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1821,9 +1812,15 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
 {
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
                                                                      pipe);
+       enum transcoder pch_transcoder;
        int reg;
        u32 val;
 
+       if (IS_HASWELL(dev_priv->dev))
+               pch_transcoder = TRANSCODER_A;
+       else
+               pch_transcoder = pipe;
+
        /*
         * A pipe without a PLL won't actually be able to drive bits from
         * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
@@ -1834,8 +1831,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
        else {
                if (pch_port) {
                        /* if driving the PCH, we need FDI enabled */
-                       assert_fdi_rx_pll_enabled(dev_priv, pipe);
-                       assert_fdi_tx_pll_enabled(dev_priv, pipe);
+                       assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
+                       assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
                }
                /* FIXME: assert CPU port conditions for SNB+ */
        }
@@ -2924,9 +2921,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
        /* Ironlake workaround, disable clock pointer after downing FDI */
        if (HAS_PCH_IBX(dev)) {
                I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
-               I915_WRITE(FDI_RX_CHICKEN(pipe),
-                          I915_READ(FDI_RX_CHICKEN(pipe) &
-                                    ~FDI_RX_PHASE_SYNC_POINTER_EN));
        } else if (HAS_PCH_CPT(dev)) {
                cpt_phase_pointer_disable(dev, pipe);
        }
@@ -3393,7 +3387,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
                 * as some pre-programmed values are broken,
                 * e.g. x201.
                 */
-               I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+               if (IS_IVYBRIDGE(dev))
+                       I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+                                                PF_PIPE_SEL_IVB(pipe));
+               else
+                       I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
                I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
                I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
        }
@@ -3469,7 +3467,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                 * as some pre-programmed values are broken,
                 * e.g. x201.
                 */
-               I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+               I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
+                                        PF_PIPE_SEL_IVB(pipe));
                I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
                I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
        }
@@ -5229,6 +5228,17 @@ static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
        }
 }
 
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
+{
+       /*
+        * Account for spread spectrum to avoid
+        * oversubscribing the link. Max center spread
+        * is 2.5%; use 5% for safety's sake.
+        */
+       u32 bps = target_clock * bpp * 21 / 20;
+       return bps / (link_bw * 8) + 1;
+}
+
 static void ironlake_set_m_n(struct drm_crtc *crtc,
                             struct drm_display_mode *mode,
                             struct drm_display_mode *adjusted_mode)
@@ -5282,15 +5292,9 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
        else
                target_clock = adjusted_mode->clock;
 
-       if (!lane) {
-               /*
-                * Account for spread spectrum to avoid
-                * oversubscribing the link. Max center spread
-                * is 2.5%; use 5% for safety's sake.
-                */
-               u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
-               lane = bps / (link_bw * 8) + 1;
-       }
+       if (!lane)
+               lane = ironlake_get_lanes_required(target_clock, link_bw,
+                                                  intel_crtc->bpp);
 
        intel_crtc->fdi_lanes = lane;
 
@@ -6600,24 +6604,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
                DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
        if (IS_ERR(fb)) {
                DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
-               goto fail;
+               return false;
        }
 
        if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
                DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
                if (old->release_fb)
                        old->release_fb->funcs->destroy(old->release_fb);
-               goto fail;
+               return false;
        }
 
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev, intel_crtc->pipe);
-
        return true;
-fail:
-       connector->encoder = NULL;
-       encoder->crtc = NULL;
-       return false;
 }
 
 void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -6904,14 +6903,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 {
        struct intel_unpin_work *work =
                container_of(__work, struct intel_unpin_work, work);
+       struct drm_device *dev = work->crtc->dev;
 
-       mutex_lock(&work->dev->struct_mutex);
+       mutex_lock(&dev->struct_mutex);
        intel_unpin_fb_obj(work->old_fb_obj);
        drm_gem_object_unreference(&work->pending_flip_obj->base);
        drm_gem_object_unreference(&work->old_fb_obj->base);
 
-       intel_update_fbc(work->dev);
-       mutex_unlock(&work->dev->struct_mutex);
+       intel_update_fbc(dev);
+       mutex_unlock(&dev->struct_mutex);
+
+       BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
+       atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
+
        kfree(work);
 }
 
@@ -6922,8 +6926,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
        struct drm_i915_gem_object *obj;
-       struct drm_pending_vblank_event *e;
-       struct timeval tvbl;
        unsigned long flags;
 
        /* Ignore early vblank irqs */
@@ -6932,24 +6934,22 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
-       if (work == NULL || !work->pending) {
+
+       /* Ensure we don't miss a work->pending update ... */
+       smp_rmb();
+
+       if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
                spin_unlock_irqrestore(&dev->event_lock, flags);
                return;
        }
 
-       intel_crtc->unpin_work = NULL;
+       /* and that the unpin work is consistent wrt ->pending. */
+       smp_rmb();
 
-       if (work->event) {
-               e = work->event;
-               e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
-
-               e->event.tv_sec = tvbl.tv_sec;
-               e->event.tv_usec = tvbl.tv_usec;
+       intel_crtc->unpin_work = NULL;
 
-               list_add_tail(&e->base.link,
-                             &e->base.file_priv->event_list);
-               wake_up_interruptible(&e->base.file_priv->event_wait);
-       }
+       if (work->event)
+               drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
 
        drm_vblank_put(dev, intel_crtc->pipe);
 
@@ -6959,9 +6959,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 
        atomic_clear_mask(1 << intel_crtc->plane,
                          &obj->pending_flip.counter);
-
        wake_up(&dev_priv->pending_flip_queue);
-       schedule_work(&work->work);
+
+       queue_work(dev_priv->wq, &work->work);
 
        trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
 }
@@ -6989,16 +6989,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
        unsigned long flags;
 
+       /* NB: An MMIO update of the plane base pointer will also
+        * generate a page-flip completion irq, i.e. every modeset
+        * is also accompanied by a spurious intel_prepare_page_flip().
+        */
        spin_lock_irqsave(&dev->event_lock, flags);
-       if (intel_crtc->unpin_work) {
-               if ((++intel_crtc->unpin_work->pending) > 1)
-                       DRM_ERROR("Prepared flip multiple times\n");
-       } else {
-               DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
-       }
+       if (intel_crtc->unpin_work)
+               atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
+inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+       /* Ensure that the work item is consistent when activating it ... */
+       smp_wmb();
+       atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+       /* and that it is marked active as soon as the irq could fire. */
+       smp_wmb();
+}
+
 static int intel_gen2_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
@@ -7032,6 +7041,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, fb->pitches[0]);
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -7072,6 +7083,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, MI_NOOP);
 
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -7118,6 +7130,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -7160,6 +7174,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -7214,6 +7230,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, (MI_NOOP));
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
@@ -7262,7 +7280,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                return -ENOMEM;
 
        work->event = event;
-       work->dev = crtc->dev;
+       work->crtc = crtc;
        intel_fb = to_intel_framebuffer(crtc->fb);
        work->old_fb_obj = intel_fb->obj;
        INIT_WORK(&work->work, intel_unpin_work_fn);
@@ -7287,6 +7305,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        intel_fb = to_intel_framebuffer(fb);
        obj = intel_fb->obj;
 
+       if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
+               flush_workqueue(dev_priv->wq);
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                goto cleanup;
@@ -7305,6 +7326,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
         * the flip occurs and the object is no longer visible.
         */
        atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+       atomic_inc(&intel_crtc->unpin_work_count);
 
        ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
        if (ret)
@@ -7319,6 +7341,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        return 0;
 
 cleanup_pending:
+       atomic_dec(&intel_crtc->unpin_work_count);
        atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
        drm_gem_object_unreference(&work->old_fb_obj->base);
        drm_gem_object_unreference(&obj->base);
@@ -7614,7 +7637,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
                                dev->mode_config.dpms_property;
 
                        connector->dpms = DRM_MODE_DPMS_ON;
-                       drm_connector_property_set_value(connector,
+                       drm_object_property_set_value(&connector->base,
                                                         dpms_property,
                                                         DRM_MODE_DPMS_ON);
 
@@ -8268,7 +8291,9 @@ static void intel_setup_outputs(struct drm_device *dev)
                I915_WRITE(PFIT_CONTROL, 0);
        }
 
-       intel_crt_init(dev);
+       if (!(IS_HASWELL(dev) &&
+             (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
+               intel_crt_init(dev);
 
        if (IS_HASWELL(dev)) {
                int found;
@@ -8649,6 +8674,34 @@ struct intel_quirk {
        void (*hook)(struct drm_device *dev);
 };
 
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+       void (*hook)(struct drm_device *dev);
+       const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+       DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+       return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+       {
+               .dmi_id_list = &(const struct dmi_system_id[]) {
+                       {
+                               .callback = intel_dmi_reverse_brightness,
+                               .ident = "NCR Corporation",
+                               .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+                                           DMI_MATCH(DMI_PRODUCT_NAME, ""),
+                               },
+                       },
+                       { }  /* terminating entry */
+               },
+               .hook = quirk_invert_brightness,
+       },
+};
+
 static struct intel_quirk intel_quirks[] = {
        /* HP Mini needs pipe A force quirk (LP: #322104) */
        { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
@@ -8688,6 +8741,10 @@ static void intel_init_quirks(struct drm_device *dev)
                     q->subsystem_device == PCI_ANY_ID))
                        q->hook(dev);
        }
+       for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+               if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+                       intel_dmi_quirks[i].hook(dev);
+       }
 }
 
 /* Disable the VGA plane that we never use */
@@ -8961,7 +9018,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
 
 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
  * and i915 state tracking structures. */
-void intel_modeset_setup_hw_state(struct drm_device *dev)
+void intel_modeset_setup_hw_state(struct drm_device *dev,
+                                 bool force_restore)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum pipe pipe;
@@ -9060,7 +9118,15 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
                intel_sanitize_crtc(crtc);
        }
 
-       intel_modeset_update_staged_output_state(dev);
+       if (force_restore) {
+               for_each_pipe(pipe) {
+                       crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+                       intel_set_mode(&crtc->base, &crtc->base.mode,
+                                      crtc->base.x, crtc->base.y, crtc->base.fb);
+               }
+       } else {
+               intel_modeset_update_staged_output_state(dev);
+       }
 
        intel_modeset_check_state(dev);
 
@@ -9073,7 +9139,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
 
        intel_setup_overlay(dev);
 
-       intel_modeset_setup_hw_state(dev);
+       intel_modeset_setup_hw_state(dev, false);
 }
 
 void intel_modeset_cleanup(struct drm_device *dev)