]> rtime.felk.cvut.cz Git - linux-imx.git/blobdiff - drivers/gpu/drm/i915/i915_gem.c
Merge tag 'v3.10-rc7' into drm-next
[linux-imx.git] / drivers / gpu / drm / i915 / i915_gem.c
index 970ad17c99ab1092522999a4382737b5dbffbafe..e5b6a92e710263767a2cdc82f23cf8204e19dcc4 100644 (file)
@@ -2039,6 +2039,11 @@ i915_add_request(struct intel_ring_buffer *ring,
        request->seqno = intel_ring_get_seqno(ring);
        request->ring = ring;
        request->tail = request_ring_position;
+       request->ctx = ring->last_context;
+
+       if (request->ctx)
+               i915_gem_context_reference(request->ctx);
+
        request->emitted_jiffies = jiffies;
        was_empty = list_empty(&ring->request_list);
        list_add_tail(&request->list, &ring->request_list);
@@ -2091,6 +2096,17 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
+static void i915_gem_free_request(struct drm_i915_gem_request *request)
+{
+       list_del(&request->list);
+       i915_gem_request_remove_from_client(request);
+
+       if (request->ctx)
+               i915_gem_context_unreference(request->ctx);
+
+       kfree(request);
+}
+
 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
                                      struct intel_ring_buffer *ring)
 {
@@ -2101,9 +2117,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
                                           struct drm_i915_gem_request,
                                           list);
 
-               list_del(&request->list);
-               i915_gem_request_remove_from_client(request);
-               kfree(request);
+               i915_gem_free_request(request);
        }
 
        while (!list_empty(&ring->active_list)) {
@@ -2195,9 +2209,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
                 */
                ring->last_retired_head = request->tail;
 
-               list_del(&request->list);
-               i915_gem_request_remove_from_client(request);
-               kfree(request);
+               i915_gem_free_request(request);
        }
 
        /* Move any buffers on the active list that are no longer referenced
@@ -2678,18 +2690,33 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
        return fence - dev_priv->fence_regs;
 }
 
+struct write_fence {
+       struct drm_device *dev;
+       struct drm_i915_gem_object *obj;
+       int fence;
+};
+
 static void i915_gem_write_fence__ipi(void *data)
 {
+       struct write_fence *args = data;
+
+       /* Required for SNB+ with LLC */
        wbinvd();
+
+       /* Required for VLV */
+       i915_gem_write_fence(args->dev, args->fence, args->obj);
 }
 
 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
                                         struct drm_i915_fence_reg *fence,
                                         bool enable)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int fence_reg = fence_number(dev_priv, fence);
+       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct write_fence args = {
+               .dev = obj->base.dev,
+               .fence = fence_number(dev_priv, fence),
+               .obj = enable ? obj : NULL,
+       };
 
        /* In order to fully serialize access to the fenced region and
         * the update to the fence register we need to take extreme
@@ -2700,13 +2727,19 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
         * SNB+ we need to take a step further and emit an explicit wbinvd()
         * on each processor in order to manually flush all memory
         * transactions before updating the fence register.
+        *
+        * However, Valleyview complicates matter. There the wbinvd is
+        * insufficient and unlike SNB/IVB requires the serialising
+        * register write. (Note that that register write by itself is
+        * conversely not sufficient for SNB+.) To compromise, we do both.
         */
-       if (HAS_LLC(obj->base.dev))
-               on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
-       i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
+       if (INTEL_INFO(args.dev)->gen >= 6)
+               on_each_cpu(i915_gem_write_fence__ipi, &args, 1);
+       else
+               i915_gem_write_fence(args.dev, args.fence, args.obj);
 
        if (enable) {
-               obj->fence_reg = fence_reg;
+               obj->fence_reg = args.fence;
                fence->obj = obj;
                list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
        } else {
@@ -2932,6 +2965,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
        struct drm_mm_node *node;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
+       size_t gtt_max = map_and_fenceable ?
+               dev_priv->gtt.mappable_end : dev_priv->gtt.total;
        int ret;
 
        fence_size = i915_gem_get_gtt_size(dev,
@@ -2958,9 +2993,11 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
        /* If the object is bigger than the entire aperture, reject it early
         * before evicting everything in a vain attempt to find space.
         */
-       if (obj->base.size >
-           (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
-               DRM_ERROR("Attempting to bind an object larger than the aperture\n");
+       if (obj->base.size > gtt_max) {
+               DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%ld\n",
+                         obj->base.size,
+                         map_and_fenceable ? "mappable" : "total",
+                         gtt_max);
                return -E2BIG;
        }
 
@@ -2976,14 +3013,10 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
                return -ENOMEM;
        }
 
- search_free:
-       if (map_and_fenceable)
-               ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
-                                                         size, alignment, obj->cache_level,
-                                                         0, dev_priv->gtt.mappable_end);
-       else
-               ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
-                                                size, alignment, obj->cache_level);
+search_free:
+       ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+                                                 size, alignment,
+                                                 obj->cache_level, 0, gtt_max);
        if (ret) {
                ret = i915_gem_evict_something(dev, size, alignment,
                                               obj->cache_level,
@@ -3977,12 +4010,21 @@ static int i915_gem_init_rings(struct drm_device *dev)
                        goto cleanup_bsd_ring;
        }
 
+       if (HAS_VEBOX(dev)) {
+               ret = intel_init_vebox_ring_buffer(dev);
+               if (ret)
+                       goto cleanup_blt_ring;
+       }
+
+
        ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
        if (ret)
-               goto cleanup_blt_ring;
+               goto cleanup_vebox_ring;
 
        return 0;
 
+cleanup_vebox_ring:
+       intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
 cleanup_blt_ring:
        intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
 cleanup_bsd_ring: