]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/gpu/drm/i915/i915_gem.c
Merge tag 'v3.10-rc2' into drm-intel-next-queued
[linux-imx.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/i915_drm.h>
30 #include "i915_drv.h"
31 #include "i915_trace.h"
32 #include "intel_drv.h"
33 #include <linux/shmem_fs.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/dma-buf.h>
38
39 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
40 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
41 static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
42                                                     unsigned alignment,
43                                                     bool map_and_fenceable,
44                                                     bool nonblocking);
45 static int i915_gem_phys_pwrite(struct drm_device *dev,
46                                 struct drm_i915_gem_object *obj,
47                                 struct drm_i915_gem_pwrite *args,
48                                 struct drm_file *file);
49
50 static void i915_gem_write_fence(struct drm_device *dev, int reg,
51                                  struct drm_i915_gem_object *obj);
52 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53                                          struct drm_i915_fence_reg *fence,
54                                          bool enable);
55
56 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
57                                     struct shrink_control *sc);
58 static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
59 static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
60 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
61
62 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
63 {
64         if (obj->tiling_mode)
65                 i915_gem_release_mmap(obj);
66
67         /* As we do not have an associated fence register, we will force
68          * a tiling change if we ever need to acquire one.
69          */
70         obj->fence_dirty = false;
71         obj->fence_reg = I915_FENCE_REG_NONE;
72 }
73
74 /* some bookkeeping */
75 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
76                                   size_t size)
77 {
78         dev_priv->mm.object_count++;
79         dev_priv->mm.object_memory += size;
80 }
81
82 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83                                      size_t size)
84 {
85         dev_priv->mm.object_count--;
86         dev_priv->mm.object_memory -= size;
87 }
88
89 static int
90 i915_gem_wait_for_error(struct i915_gpu_error *error)
91 {
92         int ret;
93
94 #define EXIT_COND (!i915_reset_in_progress(error))
95         if (EXIT_COND)
96                 return 0;
97
98         /* GPU is already declared terminally dead, give up. */
99         if (i915_terminally_wedged(error))
100                 return -EIO;
101
102         /*
103          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
104          * userspace. If it takes that long something really bad is going on and
105          * we should simply try to bail out and fail as gracefully as possible.
106          */
107         ret = wait_event_interruptible_timeout(error->reset_queue,
108                                                EXIT_COND,
109                                                10*HZ);
110         if (ret == 0) {
111                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
112                 return -EIO;
113         } else if (ret < 0) {
114                 return ret;
115         }
116 #undef EXIT_COND
117
118         return 0;
119 }
120
121 int i915_mutex_lock_interruptible(struct drm_device *dev)
122 {
123         struct drm_i915_private *dev_priv = dev->dev_private;
124         int ret;
125
126         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
127         if (ret)
128                 return ret;
129
130         ret = mutex_lock_interruptible(&dev->struct_mutex);
131         if (ret)
132                 return ret;
133
134         WARN_ON(i915_verify_lists(dev));
135         return 0;
136 }
137
138 static inline bool
139 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
140 {
141         return obj->gtt_space && !obj->active;
142 }
143
144 int
145 i915_gem_init_ioctl(struct drm_device *dev, void *data,
146                     struct drm_file *file)
147 {
148         struct drm_i915_private *dev_priv = dev->dev_private;
149         struct drm_i915_gem_init *args = data;
150
151         if (drm_core_check_feature(dev, DRIVER_MODESET))
152                 return -ENODEV;
153
154         if (args->gtt_start >= args->gtt_end ||
155             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
156                 return -EINVAL;
157
158         /* GEM with user mode setting was never supported on ilk and later. */
159         if (INTEL_INFO(dev)->gen >= 5)
160                 return -ENODEV;
161
162         mutex_lock(&dev->struct_mutex);
163         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
164                                   args->gtt_end);
165         dev_priv->gtt.mappable_end = args->gtt_end;
166         mutex_unlock(&dev->struct_mutex);
167
168         return 0;
169 }
170
171 int
172 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
173                             struct drm_file *file)
174 {
175         struct drm_i915_private *dev_priv = dev->dev_private;
176         struct drm_i915_gem_get_aperture *args = data;
177         struct drm_i915_gem_object *obj;
178         size_t pinned;
179
180         pinned = 0;
181         mutex_lock(&dev->struct_mutex);
182         list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
183                 if (obj->pin_count)
184                         pinned += obj->gtt_space->size;
185         mutex_unlock(&dev->struct_mutex);
186
187         args->aper_size = dev_priv->gtt.total;
188         args->aper_available_size = args->aper_size - pinned;
189
190         return 0;
191 }
192
193 void *i915_gem_object_alloc(struct drm_device *dev)
194 {
195         struct drm_i915_private *dev_priv = dev->dev_private;
196         return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
197 }
198
199 void i915_gem_object_free(struct drm_i915_gem_object *obj)
200 {
201         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
202         kmem_cache_free(dev_priv->slab, obj);
203 }
204
205 static int
206 i915_gem_create(struct drm_file *file,
207                 struct drm_device *dev,
208                 uint64_t size,
209                 uint32_t *handle_p)
210 {
211         struct drm_i915_gem_object *obj;
212         int ret;
213         u32 handle;
214
215         size = roundup(size, PAGE_SIZE);
216         if (size == 0)
217                 return -EINVAL;
218
219         /* Allocate the new object */
220         obj = i915_gem_alloc_object(dev, size);
221         if (obj == NULL)
222                 return -ENOMEM;
223
224         ret = drm_gem_handle_create(file, &obj->base, &handle);
225         if (ret) {
226                 drm_gem_object_release(&obj->base);
227                 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
228                 i915_gem_object_free(obj);
229                 return ret;
230         }
231
232         /* drop reference from allocate - handle holds it now */
233         drm_gem_object_unreference(&obj->base);
234         trace_i915_gem_object_create(obj);
235
236         *handle_p = handle;
237         return 0;
238 }
239
240 int
241 i915_gem_dumb_create(struct drm_file *file,
242                      struct drm_device *dev,
243                      struct drm_mode_create_dumb *args)
244 {
245         /* have to work out size/pitch and return them */
246         args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
247         args->size = args->pitch * args->height;
248         return i915_gem_create(file, dev,
249                                args->size, &args->handle);
250 }
251
252 int i915_gem_dumb_destroy(struct drm_file *file,
253                           struct drm_device *dev,
254                           uint32_t handle)
255 {
256         return drm_gem_handle_delete(file, handle);
257 }
258
259 /**
260  * Creates a new mm object and returns a handle to it.
261  */
262 int
263 i915_gem_create_ioctl(struct drm_device *dev, void *data,
264                       struct drm_file *file)
265 {
266         struct drm_i915_gem_create *args = data;
267
268         return i915_gem_create(file, dev,
269                                args->size, &args->handle);
270 }
271
272 static inline int
273 __copy_to_user_swizzled(char __user *cpu_vaddr,
274                         const char *gpu_vaddr, int gpu_offset,
275                         int length)
276 {
277         int ret, cpu_offset = 0;
278
279         while (length > 0) {
280                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
281                 int this_length = min(cacheline_end - gpu_offset, length);
282                 int swizzled_gpu_offset = gpu_offset ^ 64;
283
284                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
285                                      gpu_vaddr + swizzled_gpu_offset,
286                                      this_length);
287                 if (ret)
288                         return ret + length;
289
290                 cpu_offset += this_length;
291                 gpu_offset += this_length;
292                 length -= this_length;
293         }
294
295         return 0;
296 }
297
298 static inline int
299 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
300                           const char __user *cpu_vaddr,
301                           int length)
302 {
303         int ret, cpu_offset = 0;
304
305         while (length > 0) {
306                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
307                 int this_length = min(cacheline_end - gpu_offset, length);
308                 int swizzled_gpu_offset = gpu_offset ^ 64;
309
310                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
311                                        cpu_vaddr + cpu_offset,
312                                        this_length);
313                 if (ret)
314                         return ret + length;
315
316                 cpu_offset += this_length;
317                 gpu_offset += this_length;
318                 length -= this_length;
319         }
320
321         return 0;
322 }
323
324 /* Per-page copy function for the shmem pread fastpath.
325  * Flushes invalid cachelines before reading the target if
326  * needs_clflush is set. */
327 static int
328 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
329                  char __user *user_data,
330                  bool page_do_bit17_swizzling, bool needs_clflush)
331 {
332         char *vaddr;
333         int ret;
334
335         if (unlikely(page_do_bit17_swizzling))
336                 return -EINVAL;
337
338         vaddr = kmap_atomic(page);
339         if (needs_clflush)
340                 drm_clflush_virt_range(vaddr + shmem_page_offset,
341                                        page_length);
342         ret = __copy_to_user_inatomic(user_data,
343                                       vaddr + shmem_page_offset,
344                                       page_length);
345         kunmap_atomic(vaddr);
346
347         return ret ? -EFAULT : 0;
348 }
349
350 static void
351 shmem_clflush_swizzled_range(char *addr, unsigned long length,
352                              bool swizzled)
353 {
354         if (unlikely(swizzled)) {
355                 unsigned long start = (unsigned long) addr;
356                 unsigned long end = (unsigned long) addr + length;
357
358                 /* For swizzling simply ensure that we always flush both
359                  * channels. Lame, but simple and it works. Swizzled
360                  * pwrite/pread is far from a hotpath - current userspace
361                  * doesn't use it at all. */
362                 start = round_down(start, 128);
363                 end = round_up(end, 128);
364
365                 drm_clflush_virt_range((void *)start, end - start);
366         } else {
367                 drm_clflush_virt_range(addr, length);
368         }
369
370 }
371
372 /* Only difference to the fast-path function is that this can handle bit17
373  * and uses non-atomic copy and kmap functions. */
374 static int
375 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
376                  char __user *user_data,
377                  bool page_do_bit17_swizzling, bool needs_clflush)
378 {
379         char *vaddr;
380         int ret;
381
382         vaddr = kmap(page);
383         if (needs_clflush)
384                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
385                                              page_length,
386                                              page_do_bit17_swizzling);
387
388         if (page_do_bit17_swizzling)
389                 ret = __copy_to_user_swizzled(user_data,
390                                               vaddr, shmem_page_offset,
391                                               page_length);
392         else
393                 ret = __copy_to_user(user_data,
394                                      vaddr + shmem_page_offset,
395                                      page_length);
396         kunmap(page);
397
398         return ret ? - EFAULT : 0;
399 }
400
401 static int
402 i915_gem_shmem_pread(struct drm_device *dev,
403                      struct drm_i915_gem_object *obj,
404                      struct drm_i915_gem_pread *args,
405                      struct drm_file *file)
406 {
407         char __user *user_data;
408         ssize_t remain;
409         loff_t offset;
410         int shmem_page_offset, page_length, ret = 0;
411         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
412         int prefaulted = 0;
413         int needs_clflush = 0;
414         struct sg_page_iter sg_iter;
415
416         user_data = to_user_ptr(args->data_ptr);
417         remain = args->size;
418
419         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
420
421         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
422                 /* If we're not in the cpu read domain, set ourself into the gtt
423                  * read domain and manually flush cachelines (if required). This
424                  * optimizes for the case when the gpu will dirty the data
425                  * anyway again before the next pread happens. */
426                 if (obj->cache_level == I915_CACHE_NONE)
427                         needs_clflush = 1;
428                 if (obj->gtt_space) {
429                         ret = i915_gem_object_set_to_gtt_domain(obj, false);
430                         if (ret)
431                                 return ret;
432                 }
433         }
434
435         ret = i915_gem_object_get_pages(obj);
436         if (ret)
437                 return ret;
438
439         i915_gem_object_pin_pages(obj);
440
441         offset = args->offset;
442
443         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
444                          offset >> PAGE_SHIFT) {
445                 struct page *page = sg_page_iter_page(&sg_iter);
446
447                 if (remain <= 0)
448                         break;
449
450                 /* Operation in this page
451                  *
452                  * shmem_page_offset = offset within page in shmem file
453                  * page_length = bytes to copy for this page
454                  */
455                 shmem_page_offset = offset_in_page(offset);
456                 page_length = remain;
457                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
458                         page_length = PAGE_SIZE - shmem_page_offset;
459
460                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
461                         (page_to_phys(page) & (1 << 17)) != 0;
462
463                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
464                                        user_data, page_do_bit17_swizzling,
465                                        needs_clflush);
466                 if (ret == 0)
467                         goto next_page;
468
469                 mutex_unlock(&dev->struct_mutex);
470
471                 if (!prefaulted) {
472                         ret = fault_in_multipages_writeable(user_data, remain);
473                         /* Userspace is tricking us, but we've already clobbered
474                          * its pages with the prefault and promised to write the
475                          * data up to the first fault. Hence ignore any errors
476                          * and just continue. */
477                         (void)ret;
478                         prefaulted = 1;
479                 }
480
481                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
482                                        user_data, page_do_bit17_swizzling,
483                                        needs_clflush);
484
485                 mutex_lock(&dev->struct_mutex);
486
487 next_page:
488                 mark_page_accessed(page);
489
490                 if (ret)
491                         goto out;
492
493                 remain -= page_length;
494                 user_data += page_length;
495                 offset += page_length;
496         }
497
498 out:
499         i915_gem_object_unpin_pages(obj);
500
501         return ret;
502 }
503
504 /**
505  * Reads data from the object referenced by handle.
506  *
507  * On error, the contents of *data are undefined.
508  */
509 int
510 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
511                      struct drm_file *file)
512 {
513         struct drm_i915_gem_pread *args = data;
514         struct drm_i915_gem_object *obj;
515         int ret = 0;
516
517         if (args->size == 0)
518                 return 0;
519
520         if (!access_ok(VERIFY_WRITE,
521                        to_user_ptr(args->data_ptr),
522                        args->size))
523                 return -EFAULT;
524
525         ret = i915_mutex_lock_interruptible(dev);
526         if (ret)
527                 return ret;
528
529         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
530         if (&obj->base == NULL) {
531                 ret = -ENOENT;
532                 goto unlock;
533         }
534
535         /* Bounds check source.  */
536         if (args->offset > obj->base.size ||
537             args->size > obj->base.size - args->offset) {
538                 ret = -EINVAL;
539                 goto out;
540         }
541
542         /* prime objects have no backing filp to GEM pread/pwrite
543          * pages from.
544          */
545         if (!obj->base.filp) {
546                 ret = -EINVAL;
547                 goto out;
548         }
549
550         trace_i915_gem_object_pread(obj, args->offset, args->size);
551
552         ret = i915_gem_shmem_pread(dev, obj, args, file);
553
554 out:
555         drm_gem_object_unreference(&obj->base);
556 unlock:
557         mutex_unlock(&dev->struct_mutex);
558         return ret;
559 }
560
561 /* This is the fast write path which cannot handle
562  * page faults in the source data
563  */
564
565 static inline int
566 fast_user_write(struct io_mapping *mapping,
567                 loff_t page_base, int page_offset,
568                 char __user *user_data,
569                 int length)
570 {
571         void __iomem *vaddr_atomic;
572         void *vaddr;
573         unsigned long unwritten;
574
575         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
576         /* We can use the cpu mem copy function because this is X86. */
577         vaddr = (void __force*)vaddr_atomic + page_offset;
578         unwritten = __copy_from_user_inatomic_nocache(vaddr,
579                                                       user_data, length);
580         io_mapping_unmap_atomic(vaddr_atomic);
581         return unwritten;
582 }
583
584 /**
585  * This is the fast pwrite path, where we copy the data directly from the
586  * user into the GTT, uncached.
587  */
588 static int
589 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
590                          struct drm_i915_gem_object *obj,
591                          struct drm_i915_gem_pwrite *args,
592                          struct drm_file *file)
593 {
594         drm_i915_private_t *dev_priv = dev->dev_private;
595         ssize_t remain;
596         loff_t offset, page_base;
597         char __user *user_data;
598         int page_offset, page_length, ret;
599
600         ret = i915_gem_object_pin(obj, 0, true, true);
601         if (ret)
602                 goto out;
603
604         ret = i915_gem_object_set_to_gtt_domain(obj, true);
605         if (ret)
606                 goto out_unpin;
607
608         ret = i915_gem_object_put_fence(obj);
609         if (ret)
610                 goto out_unpin;
611
612         user_data = to_user_ptr(args->data_ptr);
613         remain = args->size;
614
615         offset = obj->gtt_offset + args->offset;
616
617         while (remain > 0) {
618                 /* Operation in this page
619                  *
620                  * page_base = page offset within aperture
621                  * page_offset = offset within page
622                  * page_length = bytes to copy for this page
623                  */
624                 page_base = offset & PAGE_MASK;
625                 page_offset = offset_in_page(offset);
626                 page_length = remain;
627                 if ((page_offset + remain) > PAGE_SIZE)
628                         page_length = PAGE_SIZE - page_offset;
629
630                 /* If we get a fault while copying data, then (presumably) our
631                  * source page isn't available.  Return the error and we'll
632                  * retry in the slow path.
633                  */
634                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
635                                     page_offset, user_data, page_length)) {
636                         ret = -EFAULT;
637                         goto out_unpin;
638                 }
639
640                 remain -= page_length;
641                 user_data += page_length;
642                 offset += page_length;
643         }
644
645 out_unpin:
646         i915_gem_object_unpin(obj);
647 out:
648         return ret;
649 }
650
651 /* Per-page copy function for the shmem pwrite fastpath.
652  * Flushes invalid cachelines before writing to the target if
653  * needs_clflush_before is set and flushes out any written cachelines after
654  * writing if needs_clflush is set. */
655 static int
656 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
657                   char __user *user_data,
658                   bool page_do_bit17_swizzling,
659                   bool needs_clflush_before,
660                   bool needs_clflush_after)
661 {
662         char *vaddr;
663         int ret;
664
665         if (unlikely(page_do_bit17_swizzling))
666                 return -EINVAL;
667
668         vaddr = kmap_atomic(page);
669         if (needs_clflush_before)
670                 drm_clflush_virt_range(vaddr + shmem_page_offset,
671                                        page_length);
672         ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
673                                                 user_data,
674                                                 page_length);
675         if (needs_clflush_after)
676                 drm_clflush_virt_range(vaddr + shmem_page_offset,
677                                        page_length);
678         kunmap_atomic(vaddr);
679
680         return ret ? -EFAULT : 0;
681 }
682
683 /* Only difference to the fast-path function is that this can handle bit17
684  * and uses non-atomic copy and kmap functions. */
685 static int
686 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
687                   char __user *user_data,
688                   bool page_do_bit17_swizzling,
689                   bool needs_clflush_before,
690                   bool needs_clflush_after)
691 {
692         char *vaddr;
693         int ret;
694
695         vaddr = kmap(page);
696         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
697                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
698                                              page_length,
699                                              page_do_bit17_swizzling);
700         if (page_do_bit17_swizzling)
701                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
702                                                 user_data,
703                                                 page_length);
704         else
705                 ret = __copy_from_user(vaddr + shmem_page_offset,
706                                        user_data,
707                                        page_length);
708         if (needs_clflush_after)
709                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
710                                              page_length,
711                                              page_do_bit17_swizzling);
712         kunmap(page);
713
714         return ret ? -EFAULT : 0;
715 }
716
717 static int
718 i915_gem_shmem_pwrite(struct drm_device *dev,
719                       struct drm_i915_gem_object *obj,
720                       struct drm_i915_gem_pwrite *args,
721                       struct drm_file *file)
722 {
723         ssize_t remain;
724         loff_t offset;
725         char __user *user_data;
726         int shmem_page_offset, page_length, ret = 0;
727         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
728         int hit_slowpath = 0;
729         int needs_clflush_after = 0;
730         int needs_clflush_before = 0;
731         struct sg_page_iter sg_iter;
732
733         user_data = to_user_ptr(args->data_ptr);
734         remain = args->size;
735
736         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
737
738         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
739                 /* If we're not in the cpu write domain, set ourself into the gtt
740                  * write domain and manually flush cachelines (if required). This
741                  * optimizes for the case when the gpu will use the data
742                  * right away and we therefore have to clflush anyway. */
743                 if (obj->cache_level == I915_CACHE_NONE)
744                         needs_clflush_after = 1;
745                 if (obj->gtt_space) {
746                         ret = i915_gem_object_set_to_gtt_domain(obj, true);
747                         if (ret)
748                                 return ret;
749                 }
750         }
751         /* Same trick applies for invalidate partially written cachelines before
752          * writing.  */
753         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
754             && obj->cache_level == I915_CACHE_NONE)
755                 needs_clflush_before = 1;
756
757         ret = i915_gem_object_get_pages(obj);
758         if (ret)
759                 return ret;
760
761         i915_gem_object_pin_pages(obj);
762
763         offset = args->offset;
764         obj->dirty = 1;
765
766         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
767                          offset >> PAGE_SHIFT) {
768                 struct page *page = sg_page_iter_page(&sg_iter);
769                 int partial_cacheline_write;
770
771                 if (remain <= 0)
772                         break;
773
774                 /* Operation in this page
775                  *
776                  * shmem_page_offset = offset within page in shmem file
777                  * page_length = bytes to copy for this page
778                  */
779                 shmem_page_offset = offset_in_page(offset);
780
781                 page_length = remain;
782                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
783                         page_length = PAGE_SIZE - shmem_page_offset;
784
785                 /* If we don't overwrite a cacheline completely we need to be
786                  * careful to have up-to-date data by first clflushing. Don't
787                  * overcomplicate things and flush the entire patch. */
788                 partial_cacheline_write = needs_clflush_before &&
789                         ((shmem_page_offset | page_length)
790                                 & (boot_cpu_data.x86_clflush_size - 1));
791
792                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
793                         (page_to_phys(page) & (1 << 17)) != 0;
794
795                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
796                                         user_data, page_do_bit17_swizzling,
797                                         partial_cacheline_write,
798                                         needs_clflush_after);
799                 if (ret == 0)
800                         goto next_page;
801
802                 hit_slowpath = 1;
803                 mutex_unlock(&dev->struct_mutex);
804                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
805                                         user_data, page_do_bit17_swizzling,
806                                         partial_cacheline_write,
807                                         needs_clflush_after);
808
809                 mutex_lock(&dev->struct_mutex);
810
811 next_page:
812                 set_page_dirty(page);
813                 mark_page_accessed(page);
814
815                 if (ret)
816                         goto out;
817
818                 remain -= page_length;
819                 user_data += page_length;
820                 offset += page_length;
821         }
822
823 out:
824         i915_gem_object_unpin_pages(obj);
825
826         if (hit_slowpath) {
827                 /*
828                  * Fixup: Flush cpu caches in case we didn't flush the dirty
829                  * cachelines in-line while writing and the object moved
830                  * out of the cpu write domain while we've dropped the lock.
831                  */
832                 if (!needs_clflush_after &&
833                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
834                         i915_gem_clflush_object(obj);
835                         i915_gem_chipset_flush(dev);
836                 }
837         }
838
839         if (needs_clflush_after)
840                 i915_gem_chipset_flush(dev);
841
842         return ret;
843 }
844
845 /**
846  * Writes data to the object referenced by handle.
847  *
848  * On error, the contents of the buffer that were to be modified are undefined.
849  */
850 int
851 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
852                       struct drm_file *file)
853 {
854         struct drm_i915_gem_pwrite *args = data;
855         struct drm_i915_gem_object *obj;
856         int ret;
857
858         if (args->size == 0)
859                 return 0;
860
861         if (!access_ok(VERIFY_READ,
862                        to_user_ptr(args->data_ptr),
863                        args->size))
864                 return -EFAULT;
865
866         ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
867                                            args->size);
868         if (ret)
869                 return -EFAULT;
870
871         ret = i915_mutex_lock_interruptible(dev);
872         if (ret)
873                 return ret;
874
875         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
876         if (&obj->base == NULL) {
877                 ret = -ENOENT;
878                 goto unlock;
879         }
880
881         /* Bounds check destination. */
882         if (args->offset > obj->base.size ||
883             args->size > obj->base.size - args->offset) {
884                 ret = -EINVAL;
885                 goto out;
886         }
887
888         /* prime objects have no backing filp to GEM pread/pwrite
889          * pages from.
890          */
891         if (!obj->base.filp) {
892                 ret = -EINVAL;
893                 goto out;
894         }
895
896         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
897
898         ret = -EFAULT;
899         /* We can only do the GTT pwrite on untiled buffers, as otherwise
900          * it would end up going through the fenced access, and we'll get
901          * different detiling behavior between reading and writing.
902          * pread/pwrite currently are reading and writing from the CPU
903          * perspective, requiring manual detiling by the client.
904          */
905         if (obj->phys_obj) {
906                 ret = i915_gem_phys_pwrite(dev, obj, args, file);
907                 goto out;
908         }
909
910         if (obj->cache_level == I915_CACHE_NONE &&
911             obj->tiling_mode == I915_TILING_NONE &&
912             obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
913                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
914                 /* Note that the gtt paths might fail with non-page-backed user
915                  * pointers (e.g. gtt mappings when moving data between
916                  * textures). Fallback to the shmem path in that case. */
917         }
918
919         if (ret == -EFAULT || ret == -ENOSPC)
920                 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
921
922 out:
923         drm_gem_object_unreference(&obj->base);
924 unlock:
925         mutex_unlock(&dev->struct_mutex);
926         return ret;
927 }
928
929 int
930 i915_gem_check_wedge(struct i915_gpu_error *error,
931                      bool interruptible)
932 {
933         if (i915_reset_in_progress(error)) {
934                 /* Non-interruptible callers can't handle -EAGAIN, hence return
935                  * -EIO unconditionally for these. */
936                 if (!interruptible)
937                         return -EIO;
938
939                 /* Recovery complete, but the reset failed ... */
940                 if (i915_terminally_wedged(error))
941                         return -EIO;
942
943                 return -EAGAIN;
944         }
945
946         return 0;
947 }
948
949 /*
950  * Compare seqno against outstanding lazy request. Emit a request if they are
951  * equal.
952  */
953 static int
954 i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
955 {
956         int ret;
957
958         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
959
960         ret = 0;
961         if (seqno == ring->outstanding_lazy_request)
962                 ret = i915_add_request(ring, NULL, NULL);
963
964         return ret;
965 }
966
967 /**
968  * __wait_seqno - wait until execution of seqno has finished
969  * @ring: the ring expected to report seqno
970  * @seqno: duh!
971  * @reset_counter: reset sequence associated with the given seqno
972  * @interruptible: do an interruptible wait (normally yes)
973  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
974  *
975  * Note: It is of utmost importance that the passed in seqno and reset_counter
976  * values have been read by the caller in an smp safe manner. Where read-side
977  * locks are involved, it is sufficient to read the reset_counter before
978  * unlocking the lock that protects the seqno. For lockless tricks, the
979  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
980  * inserted.
981  *
982  * Returns 0 if the seqno was found within the alloted time. Else returns the
983  * errno with remaining time filled in timeout argument.
984  */
985 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
986                         unsigned reset_counter,
987                         bool interruptible, struct timespec *timeout)
988 {
989         drm_i915_private_t *dev_priv = ring->dev->dev_private;
990         struct timespec before, now, wait_time={1,0};
991         unsigned long timeout_jiffies;
992         long end;
993         bool wait_forever = true;
994         int ret;
995
996         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
997                 return 0;
998
999         trace_i915_gem_request_wait_begin(ring, seqno);
1000
1001         if (timeout != NULL) {
1002                 wait_time = *timeout;
1003                 wait_forever = false;
1004         }
1005
1006         timeout_jiffies = timespec_to_jiffies(&wait_time);
1007
1008         if (WARN_ON(!ring->irq_get(ring)))
1009                 return -ENODEV;
1010
1011         /* Record current time in case interrupted by signal, or wedged * */
1012         getrawmonotonic(&before);
1013
1014 #define EXIT_COND \
1015         (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1016          i915_reset_in_progress(&dev_priv->gpu_error) || \
1017          reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1018         do {
1019                 if (interruptible)
1020                         end = wait_event_interruptible_timeout(ring->irq_queue,
1021                                                                EXIT_COND,
1022                                                                timeout_jiffies);
1023                 else
1024                         end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1025                                                  timeout_jiffies);
1026
1027                 /* We need to check whether any gpu reset happened in between
1028                  * the caller grabbing the seqno and now ... */
1029                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1030                         end = -EAGAIN;
1031
1032                 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1033                  * gone. */
1034                 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1035                 if (ret)
1036                         end = ret;
1037         } while (end == 0 && wait_forever);
1038
1039         getrawmonotonic(&now);
1040
1041         ring->irq_put(ring);
1042         trace_i915_gem_request_wait_end(ring, seqno);
1043 #undef EXIT_COND
1044
1045         if (timeout) {
1046                 struct timespec sleep_time = timespec_sub(now, before);
1047                 *timeout = timespec_sub(*timeout, sleep_time);
1048                 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1049                         set_normalized_timespec(timeout, 0, 0);
1050         }
1051
1052         switch (end) {
1053         case -EIO:
1054         case -EAGAIN: /* Wedged */
1055         case -ERESTARTSYS: /* Signal */
1056                 return (int)end;
1057         case 0: /* Timeout */
1058                 return -ETIME;
1059         default: /* Completed */
1060                 WARN_ON(end < 0); /* We're not aware of other errors */
1061                 return 0;
1062         }
1063 }
1064
1065 /**
1066  * Waits for a sequence number to be signaled, and cleans up the
1067  * request and object lists appropriately for that event.
1068  */
1069 int
1070 i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1071 {
1072         struct drm_device *dev = ring->dev;
1073         struct drm_i915_private *dev_priv = dev->dev_private;
1074         bool interruptible = dev_priv->mm.interruptible;
1075         int ret;
1076
1077         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1078         BUG_ON(seqno == 0);
1079
1080         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1081         if (ret)
1082                 return ret;
1083
1084         ret = i915_gem_check_olr(ring, seqno);
1085         if (ret)
1086                 return ret;
1087
1088         return __wait_seqno(ring, seqno,
1089                             atomic_read(&dev_priv->gpu_error.reset_counter),
1090                             interruptible, NULL);
1091 }
1092
1093 /**
1094  * Ensures that all rendering to the object has completed and the object is
1095  * safe to unbind from the GTT or access from the CPU.
1096  */
1097 static __must_check int
1098 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1099                                bool readonly)
1100 {
1101         struct intel_ring_buffer *ring = obj->ring;
1102         u32 seqno;
1103         int ret;
1104
1105         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1106         if (seqno == 0)
1107                 return 0;
1108
1109         ret = i915_wait_seqno(ring, seqno);
1110         if (ret)
1111                 return ret;
1112
1113         i915_gem_retire_requests_ring(ring);
1114
1115         /* Manually manage the write flush as we may have not yet
1116          * retired the buffer.
1117          */
1118         if (obj->last_write_seqno &&
1119             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1120                 obj->last_write_seqno = 0;
1121                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1122         }
1123
1124         return 0;
1125 }
1126
1127 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1128  * as the object state may change during this call.
1129  */
1130 static __must_check int
1131 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1132                                             bool readonly)
1133 {
1134         struct drm_device *dev = obj->base.dev;
1135         struct drm_i915_private *dev_priv = dev->dev_private;
1136         struct intel_ring_buffer *ring = obj->ring;
1137         unsigned reset_counter;
1138         u32 seqno;
1139         int ret;
1140
1141         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1142         BUG_ON(!dev_priv->mm.interruptible);
1143
1144         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1145         if (seqno == 0)
1146                 return 0;
1147
1148         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1149         if (ret)
1150                 return ret;
1151
1152         ret = i915_gem_check_olr(ring, seqno);
1153         if (ret)
1154                 return ret;
1155
1156         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1157         mutex_unlock(&dev->struct_mutex);
1158         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1159         mutex_lock(&dev->struct_mutex);
1160
1161         i915_gem_retire_requests_ring(ring);
1162
1163         /* Manually manage the write flush as we may have not yet
1164          * retired the buffer.
1165          */
1166         if (obj->last_write_seqno &&
1167             i915_seqno_passed(seqno, obj->last_write_seqno)) {
1168                 obj->last_write_seqno = 0;
1169                 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1170         }
1171
1172         return ret;
1173 }
1174
1175 /**
1176  * Called when user space prepares to use an object with the CPU, either
1177  * through the mmap ioctl's mapping or a GTT mapping.
1178  */
1179 int
1180 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1181                           struct drm_file *file)
1182 {
1183         struct drm_i915_gem_set_domain *args = data;
1184         struct drm_i915_gem_object *obj;
1185         uint32_t read_domains = args->read_domains;
1186         uint32_t write_domain = args->write_domain;
1187         int ret;
1188
1189         /* Only handle setting domains to types used by the CPU. */
1190         if (write_domain & I915_GEM_GPU_DOMAINS)
1191                 return -EINVAL;
1192
1193         if (read_domains & I915_GEM_GPU_DOMAINS)
1194                 return -EINVAL;
1195
1196         /* Having something in the write domain implies it's in the read
1197          * domain, and only that read domain.  Enforce that in the request.
1198          */
1199         if (write_domain != 0 && read_domains != write_domain)
1200                 return -EINVAL;
1201
1202         ret = i915_mutex_lock_interruptible(dev);
1203         if (ret)
1204                 return ret;
1205
1206         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1207         if (&obj->base == NULL) {
1208                 ret = -ENOENT;
1209                 goto unlock;
1210         }
1211
1212         /* Try to flush the object off the GPU without holding the lock.
1213          * We will repeat the flush holding the lock in the normal manner
1214          * to catch cases where we are gazumped.
1215          */
1216         ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1217         if (ret)
1218                 goto unref;
1219
1220         if (read_domains & I915_GEM_DOMAIN_GTT) {
1221                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1222
1223                 /* Silently promote "you're not bound, there was nothing to do"
1224                  * to success, since the client was just asking us to
1225                  * make sure everything was done.
1226                  */
1227                 if (ret == -EINVAL)
1228                         ret = 0;
1229         } else {
1230                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1231         }
1232
1233 unref:
1234         drm_gem_object_unreference(&obj->base);
1235 unlock:
1236         mutex_unlock(&dev->struct_mutex);
1237         return ret;
1238 }
1239
1240 /**
1241  * Called when user space has done writes to this buffer
1242  */
1243 int
1244 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1245                          struct drm_file *file)
1246 {
1247         struct drm_i915_gem_sw_finish *args = data;
1248         struct drm_i915_gem_object *obj;
1249         int ret = 0;
1250
1251         ret = i915_mutex_lock_interruptible(dev);
1252         if (ret)
1253                 return ret;
1254
1255         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1256         if (&obj->base == NULL) {
1257                 ret = -ENOENT;
1258                 goto unlock;
1259         }
1260
1261         /* Pinned buffers may be scanout, so flush the cache */
1262         if (obj->pin_count)
1263                 i915_gem_object_flush_cpu_write_domain(obj);
1264
1265         drm_gem_object_unreference(&obj->base);
1266 unlock:
1267         mutex_unlock(&dev->struct_mutex);
1268         return ret;
1269 }
1270
1271 /**
1272  * Maps the contents of an object, returning the address it is mapped
1273  * into.
1274  *
1275  * While the mapping holds a reference on the contents of the object, it doesn't
1276  * imply a ref on the object itself.
1277  */
1278 int
1279 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1280                     struct drm_file *file)
1281 {
1282         struct drm_i915_gem_mmap *args = data;
1283         struct drm_gem_object *obj;
1284         unsigned long addr;
1285
1286         obj = drm_gem_object_lookup(dev, file, args->handle);
1287         if (obj == NULL)
1288                 return -ENOENT;
1289
1290         /* prime objects have no backing filp to GEM mmap
1291          * pages from.
1292          */
1293         if (!obj->filp) {
1294                 drm_gem_object_unreference_unlocked(obj);
1295                 return -EINVAL;
1296         }
1297
1298         addr = vm_mmap(obj->filp, 0, args->size,
1299                        PROT_READ | PROT_WRITE, MAP_SHARED,
1300                        args->offset);
1301         drm_gem_object_unreference_unlocked(obj);
1302         if (IS_ERR((void *)addr))
1303                 return addr;
1304
1305         args->addr_ptr = (uint64_t) addr;
1306
1307         return 0;
1308 }
1309
1310 /**
1311  * i915_gem_fault - fault a page into the GTT
1312  * vma: VMA in question
1313  * vmf: fault info
1314  *
1315  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1316  * from userspace.  The fault handler takes care of binding the object to
1317  * the GTT (if needed), allocating and programming a fence register (again,
1318  * only if needed based on whether the old reg is still valid or the object
1319  * is tiled) and inserting a new PTE into the faulting process.
1320  *
1321  * Note that the faulting process may involve evicting existing objects
1322  * from the GTT and/or fence registers to make room.  So performance may
1323  * suffer if the GTT working set is large or there are few fence registers
1324  * left.
1325  */
1326 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1327 {
1328         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1329         struct drm_device *dev = obj->base.dev;
1330         drm_i915_private_t *dev_priv = dev->dev_private;
1331         pgoff_t page_offset;
1332         unsigned long pfn;
1333         int ret = 0;
1334         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1335
1336         /* We don't use vmf->pgoff since that has the fake offset */
1337         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1338                 PAGE_SHIFT;
1339
1340         ret = i915_mutex_lock_interruptible(dev);
1341         if (ret)
1342                 goto out;
1343
1344         trace_i915_gem_object_fault(obj, page_offset, true, write);
1345
1346         /* Access to snoopable pages through the GTT is incoherent. */
1347         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1348                 ret = -EINVAL;
1349                 goto unlock;
1350         }
1351
1352         /* Now bind it into the GTT if needed */
1353         ret = i915_gem_object_pin(obj, 0, true, false);
1354         if (ret)
1355                 goto unlock;
1356
1357         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1358         if (ret)
1359                 goto unpin;
1360
1361         ret = i915_gem_object_get_fence(obj);
1362         if (ret)
1363                 goto unpin;
1364
1365         obj->fault_mappable = true;
1366
1367         pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
1368                 page_offset;
1369
1370         /* Finally, remap it using the new GTT offset */
1371         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1372 unpin:
1373         i915_gem_object_unpin(obj);
1374 unlock:
1375         mutex_unlock(&dev->struct_mutex);
1376 out:
1377         switch (ret) {
1378         case -EIO:
1379                 /* If this -EIO is due to a gpu hang, give the reset code a
1380                  * chance to clean up the mess. Otherwise return the proper
1381                  * SIGBUS. */
1382                 if (i915_terminally_wedged(&dev_priv->gpu_error))
1383                         return VM_FAULT_SIGBUS;
1384         case -EAGAIN:
1385                 /* Give the error handler a chance to run and move the
1386                  * objects off the GPU active list. Next time we service the
1387                  * fault, we should be able to transition the page into the
1388                  * GTT without touching the GPU (and so avoid further
1389                  * EIO/EGAIN). If the GPU is wedged, then there is no issue
1390                  * with coherency, just lost writes.
1391                  */
1392                 set_need_resched();
1393         case 0:
1394         case -ERESTARTSYS:
1395         case -EINTR:
1396         case -EBUSY:
1397                 /*
1398                  * EBUSY is ok: this just means that another thread
1399                  * already did the job.
1400                  */
1401                 return VM_FAULT_NOPAGE;
1402         case -ENOMEM:
1403                 return VM_FAULT_OOM;
1404         case -ENOSPC:
1405                 return VM_FAULT_SIGBUS;
1406         default:
1407                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1408                 return VM_FAULT_SIGBUS;
1409         }
1410 }
1411
1412 /**
1413  * i915_gem_release_mmap - remove physical page mappings
1414  * @obj: obj in question
1415  *
1416  * Preserve the reservation of the mmapping with the DRM core code, but
1417  * relinquish ownership of the pages back to the system.
1418  *
1419  * It is vital that we remove the page mapping if we have mapped a tiled
1420  * object through the GTT and then lose the fence register due to
1421  * resource pressure. Similarly if the object has been moved out of the
1422  * aperture, than pages mapped into userspace must be revoked. Removing the
1423  * mapping will then trigger a page fault on the next user access, allowing
1424  * fixup by i915_gem_fault().
1425  */
1426 void
1427 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1428 {
1429         if (!obj->fault_mappable)
1430                 return;
1431
1432         if (obj->base.dev->dev_mapping)
1433                 unmap_mapping_range(obj->base.dev->dev_mapping,
1434                                     (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1435                                     obj->base.size, 1);
1436
1437         obj->fault_mappable = false;
1438 }
1439
1440 uint32_t
1441 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1442 {
1443         uint32_t gtt_size;
1444
1445         if (INTEL_INFO(dev)->gen >= 4 ||
1446             tiling_mode == I915_TILING_NONE)
1447                 return size;
1448
1449         /* Previous chips need a power-of-two fence region when tiling */
1450         if (INTEL_INFO(dev)->gen == 3)
1451                 gtt_size = 1024*1024;
1452         else
1453                 gtt_size = 512*1024;
1454
1455         while (gtt_size < size)
1456                 gtt_size <<= 1;
1457
1458         return gtt_size;
1459 }
1460
1461 /**
1462  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1463  * @obj: object to check
1464  *
1465  * Return the required GTT alignment for an object, taking into account
1466  * potential fence register mapping.
1467  */
1468 uint32_t
1469 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1470                            int tiling_mode, bool fenced)
1471 {
1472         /*
1473          * Minimum alignment is 4k (GTT page size), but might be greater
1474          * if a fence register is needed for the object.
1475          */
1476         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1477             tiling_mode == I915_TILING_NONE)
1478                 return 4096;
1479
1480         /*
1481          * Previous chips need to be aligned to the size of the smallest
1482          * fence register that can contain the object.
1483          */
1484         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1485 }
1486
1487 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1488 {
1489         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1490         int ret;
1491
1492         if (obj->base.map_list.map)
1493                 return 0;
1494
1495         dev_priv->mm.shrinker_no_lock_stealing = true;
1496
1497         ret = drm_gem_create_mmap_offset(&obj->base);
1498         if (ret != -ENOSPC)
1499                 goto out;
1500
1501         /* Badly fragmented mmap space? The only way we can recover
1502          * space is by destroying unwanted objects. We can't randomly release
1503          * mmap_offsets as userspace expects them to be persistent for the
1504          * lifetime of the objects. The closest we can is to release the
1505          * offsets on purgeable objects by truncating it and marking it purged,
1506          * which prevents userspace from ever using that object again.
1507          */
1508         i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1509         ret = drm_gem_create_mmap_offset(&obj->base);
1510         if (ret != -ENOSPC)
1511                 goto out;
1512
1513         i915_gem_shrink_all(dev_priv);
1514         ret = drm_gem_create_mmap_offset(&obj->base);
1515 out:
1516         dev_priv->mm.shrinker_no_lock_stealing = false;
1517
1518         return ret;
1519 }
1520
1521 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1522 {
1523         if (!obj->base.map_list.map)
1524                 return;
1525
1526         drm_gem_free_mmap_offset(&obj->base);
1527 }
1528
1529 int
1530 i915_gem_mmap_gtt(struct drm_file *file,
1531                   struct drm_device *dev,
1532                   uint32_t handle,
1533                   uint64_t *offset)
1534 {
1535         struct drm_i915_private *dev_priv = dev->dev_private;
1536         struct drm_i915_gem_object *obj;
1537         int ret;
1538
1539         ret = i915_mutex_lock_interruptible(dev);
1540         if (ret)
1541                 return ret;
1542
1543         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1544         if (&obj->base == NULL) {
1545                 ret = -ENOENT;
1546                 goto unlock;
1547         }
1548
1549         if (obj->base.size > dev_priv->gtt.mappable_end) {
1550                 ret = -E2BIG;
1551                 goto out;
1552         }
1553
1554         if (obj->madv != I915_MADV_WILLNEED) {
1555                 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1556                 ret = -EINVAL;
1557                 goto out;
1558         }
1559
1560         ret = i915_gem_object_create_mmap_offset(obj);
1561         if (ret)
1562                 goto out;
1563
1564         *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1565
1566 out:
1567         drm_gem_object_unreference(&obj->base);
1568 unlock:
1569         mutex_unlock(&dev->struct_mutex);
1570         return ret;
1571 }
1572
1573 /**
1574  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1575  * @dev: DRM device
1576  * @data: GTT mapping ioctl data
1577  * @file: GEM object info
1578  *
1579  * Simply returns the fake offset to userspace so it can mmap it.
1580  * The mmap call will end up in drm_gem_mmap(), which will set things
1581  * up so we can get faults in the handler above.
1582  *
1583  * The fault handler will take care of binding the object into the GTT
1584  * (since it may have been evicted to make room for something), allocating
1585  * a fence register, and mapping the appropriate aperture address into
1586  * userspace.
1587  */
1588 int
1589 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1590                         struct drm_file *file)
1591 {
1592         struct drm_i915_gem_mmap_gtt *args = data;
1593
1594         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1595 }
1596
1597 /* Immediately discard the backing storage */
1598 static void
1599 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1600 {
1601         struct inode *inode;
1602
1603         i915_gem_object_free_mmap_offset(obj);
1604
1605         if (obj->base.filp == NULL)
1606                 return;
1607
1608         /* Our goal here is to return as much of the memory as
1609          * is possible back to the system as we are called from OOM.
1610          * To do this we must instruct the shmfs to drop all of its
1611          * backing pages, *now*.
1612          */
1613         inode = file_inode(obj->base.filp);
1614         shmem_truncate_range(inode, 0, (loff_t)-1);
1615
1616         obj->madv = __I915_MADV_PURGED;
1617 }
1618
1619 static inline int
1620 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1621 {
1622         return obj->madv == I915_MADV_DONTNEED;
1623 }
1624
1625 static void
1626 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1627 {
1628         struct sg_page_iter sg_iter;
1629         int ret;
1630
1631         BUG_ON(obj->madv == __I915_MADV_PURGED);
1632
1633         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1634         if (ret) {
1635                 /* In the event of a disaster, abandon all caches and
1636                  * hope for the best.
1637                  */
1638                 WARN_ON(ret != -EIO);
1639                 i915_gem_clflush_object(obj);
1640                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1641         }
1642
1643         if (i915_gem_object_needs_bit17_swizzle(obj))
1644                 i915_gem_object_save_bit_17_swizzle(obj);
1645
1646         if (obj->madv == I915_MADV_DONTNEED)
1647                 obj->dirty = 0;
1648
1649         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1650                 struct page *page = sg_page_iter_page(&sg_iter);
1651
1652                 if (obj->dirty)
1653                         set_page_dirty(page);
1654
1655                 if (obj->madv == I915_MADV_WILLNEED)
1656                         mark_page_accessed(page);
1657
1658                 page_cache_release(page);
1659         }
1660         obj->dirty = 0;
1661
1662         sg_free_table(obj->pages);
1663         kfree(obj->pages);
1664 }
1665
1666 int
1667 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1668 {
1669         const struct drm_i915_gem_object_ops *ops = obj->ops;
1670
1671         if (obj->pages == NULL)
1672                 return 0;
1673
1674         BUG_ON(obj->gtt_space);
1675
1676         if (obj->pages_pin_count)
1677                 return -EBUSY;
1678
1679         /* ->put_pages might need to allocate memory for the bit17 swizzle
1680          * array, hence protect them from being reaped by removing them from gtt
1681          * lists early. */
1682         list_del(&obj->gtt_list);
1683
1684         ops->put_pages(obj);
1685         obj->pages = NULL;
1686
1687         if (i915_gem_object_is_purgeable(obj))
1688                 i915_gem_object_truncate(obj);
1689
1690         return 0;
1691 }
1692
1693 static long
1694 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1695                   bool purgeable_only)
1696 {
1697         struct drm_i915_gem_object *obj, *next;
1698         long count = 0;
1699
1700         list_for_each_entry_safe(obj, next,
1701                                  &dev_priv->mm.unbound_list,
1702                                  gtt_list) {
1703                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1704                     i915_gem_object_put_pages(obj) == 0) {
1705                         count += obj->base.size >> PAGE_SHIFT;
1706                         if (count >= target)
1707                                 return count;
1708                 }
1709         }
1710
1711         list_for_each_entry_safe(obj, next,
1712                                  &dev_priv->mm.inactive_list,
1713                                  mm_list) {
1714                 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1715                     i915_gem_object_unbind(obj) == 0 &&
1716                     i915_gem_object_put_pages(obj) == 0) {
1717                         count += obj->base.size >> PAGE_SHIFT;
1718                         if (count >= target)
1719                                 return count;
1720                 }
1721         }
1722
1723         return count;
1724 }
1725
1726 static long
1727 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1728 {
1729         return __i915_gem_shrink(dev_priv, target, true);
1730 }
1731
1732 static void
1733 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1734 {
1735         struct drm_i915_gem_object *obj, *next;
1736
1737         i915_gem_evict_everything(dev_priv->dev);
1738
1739         list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1740                 i915_gem_object_put_pages(obj);
1741 }
1742
1743 static int
1744 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1745 {
1746         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1747         int page_count, i;
1748         struct address_space *mapping;
1749         struct sg_table *st;
1750         struct scatterlist *sg;
1751         struct sg_page_iter sg_iter;
1752         struct page *page;
1753         unsigned long last_pfn = 0;     /* suppress gcc warning */
1754         gfp_t gfp;
1755
1756         /* Assert that the object is not currently in any GPU domain. As it
1757          * wasn't in the GTT, there shouldn't be any way it could have been in
1758          * a GPU cache
1759          */
1760         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1761         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1762
1763         st = kmalloc(sizeof(*st), GFP_KERNEL);
1764         if (st == NULL)
1765                 return -ENOMEM;
1766
1767         page_count = obj->base.size / PAGE_SIZE;
1768         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1769                 sg_free_table(st);
1770                 kfree(st);
1771                 return -ENOMEM;
1772         }
1773
1774         /* Get the list of pages out of our struct file.  They'll be pinned
1775          * at this point until we release them.
1776          *
1777          * Fail silently without starting the shrinker
1778          */
1779         mapping = file_inode(obj->base.filp)->i_mapping;
1780         gfp = mapping_gfp_mask(mapping);
1781         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1782         gfp &= ~(__GFP_IO | __GFP_WAIT);
1783         sg = st->sgl;
1784         st->nents = 0;
1785         for (i = 0; i < page_count; i++) {
1786                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1787                 if (IS_ERR(page)) {
1788                         i915_gem_purge(dev_priv, page_count);
1789                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1790                 }
1791                 if (IS_ERR(page)) {
1792                         /* We've tried hard to allocate the memory by reaping
1793                          * our own buffer, now let the real VM do its job and
1794                          * go down in flames if truly OOM.
1795                          */
1796                         gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1797                         gfp |= __GFP_IO | __GFP_WAIT;
1798
1799                         i915_gem_shrink_all(dev_priv);
1800                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1801                         if (IS_ERR(page))
1802                                 goto err_pages;
1803
1804                         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1805                         gfp &= ~(__GFP_IO | __GFP_WAIT);
1806                 }
1807
1808                 if (!i || page_to_pfn(page) != last_pfn + 1) {
1809                         if (i)
1810                                 sg = sg_next(sg);
1811                         st->nents++;
1812                         sg_set_page(sg, page, PAGE_SIZE, 0);
1813                 } else {
1814                         sg->length += PAGE_SIZE;
1815                 }
1816                 last_pfn = page_to_pfn(page);
1817         }
1818
1819         sg_mark_end(sg);
1820         obj->pages = st;
1821
1822         if (i915_gem_object_needs_bit17_swizzle(obj))
1823                 i915_gem_object_do_bit_17_swizzle(obj);
1824
1825         return 0;
1826
1827 err_pages:
1828         sg_mark_end(sg);
1829         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1830                 page_cache_release(sg_page_iter_page(&sg_iter));
1831         sg_free_table(st);
1832         kfree(st);
1833         return PTR_ERR(page);
1834 }
1835
1836 /* Ensure that the associated pages are gathered from the backing storage
1837  * and pinned into our object. i915_gem_object_get_pages() may be called
1838  * multiple times before they are released by a single call to
1839  * i915_gem_object_put_pages() - once the pages are no longer referenced
1840  * either as a result of memory pressure (reaping pages under the shrinker)
1841  * or as the object is itself released.
1842  */
1843 int
1844 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1845 {
1846         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1847         const struct drm_i915_gem_object_ops *ops = obj->ops;
1848         int ret;
1849
1850         if (obj->pages)
1851                 return 0;
1852
1853         if (obj->madv != I915_MADV_WILLNEED) {
1854                 DRM_ERROR("Attempting to obtain a purgeable object\n");
1855                 return -EINVAL;
1856         }
1857
1858         BUG_ON(obj->pages_pin_count);
1859
1860         ret = ops->get_pages(obj);
1861         if (ret)
1862                 return ret;
1863
1864         list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
1865         return 0;
1866 }
1867
1868 void
1869 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1870                                struct intel_ring_buffer *ring)
1871 {
1872         struct drm_device *dev = obj->base.dev;
1873         struct drm_i915_private *dev_priv = dev->dev_private;
1874         u32 seqno = intel_ring_get_seqno(ring);
1875
1876         BUG_ON(ring == NULL);
1877         obj->ring = ring;
1878
1879         /* Add a reference if we're newly entering the active list. */
1880         if (!obj->active) {
1881                 drm_gem_object_reference(&obj->base);
1882                 obj->active = 1;
1883         }
1884
1885         /* Move from whatever list we were on to the tail of execution. */
1886         list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1887         list_move_tail(&obj->ring_list, &ring->active_list);
1888
1889         obj->last_read_seqno = seqno;
1890
1891         if (obj->fenced_gpu_access) {
1892                 obj->last_fenced_seqno = seqno;
1893
1894                 /* Bump MRU to take account of the delayed flush */
1895                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1896                         struct drm_i915_fence_reg *reg;
1897
1898                         reg = &dev_priv->fence_regs[obj->fence_reg];
1899                         list_move_tail(&reg->lru_list,
1900                                        &dev_priv->mm.fence_list);
1901                 }
1902         }
1903 }
1904
1905 static void
1906 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1907 {
1908         struct drm_device *dev = obj->base.dev;
1909         struct drm_i915_private *dev_priv = dev->dev_private;
1910
1911         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1912         BUG_ON(!obj->active);
1913
1914         list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1915
1916         list_del_init(&obj->ring_list);
1917         obj->ring = NULL;
1918
1919         obj->last_read_seqno = 0;
1920         obj->last_write_seqno = 0;
1921         obj->base.write_domain = 0;
1922
1923         obj->last_fenced_seqno = 0;
1924         obj->fenced_gpu_access = false;
1925
1926         obj->active = 0;
1927         drm_gem_object_unreference(&obj->base);
1928
1929         WARN_ON(i915_verify_lists(dev));
1930 }
1931
1932 static int
1933 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1934 {
1935         struct drm_i915_private *dev_priv = dev->dev_private;
1936         struct intel_ring_buffer *ring;
1937         int ret, i, j;
1938
1939         /* Carefully retire all requests without writing to the rings */
1940         for_each_ring(ring, dev_priv, i) {
1941                 ret = intel_ring_idle(ring);
1942                 if (ret)
1943                         return ret;
1944         }
1945         i915_gem_retire_requests(dev);
1946
1947         /* Finally reset hw state */
1948         for_each_ring(ring, dev_priv, i) {
1949                 intel_ring_init_seqno(ring, seqno);
1950
1951                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1952                         ring->sync_seqno[j] = 0;
1953         }
1954
1955         return 0;
1956 }
1957
1958 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1959 {
1960         struct drm_i915_private *dev_priv = dev->dev_private;
1961         int ret;
1962
1963         if (seqno == 0)
1964                 return -EINVAL;
1965
1966         /* HWS page needs to be set less than what we
1967          * will inject to ring
1968          */
1969         ret = i915_gem_init_seqno(dev, seqno - 1);
1970         if (ret)
1971                 return ret;
1972
1973         /* Carefully set the last_seqno value so that wrap
1974          * detection still works
1975          */
1976         dev_priv->next_seqno = seqno;
1977         dev_priv->last_seqno = seqno - 1;
1978         if (dev_priv->last_seqno == 0)
1979                 dev_priv->last_seqno--;
1980
1981         return 0;
1982 }
1983
1984 int
1985 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1986 {
1987         struct drm_i915_private *dev_priv = dev->dev_private;
1988
1989         /* reserve 0 for non-seqno */
1990         if (dev_priv->next_seqno == 0) {
1991                 int ret = i915_gem_init_seqno(dev, 0);
1992                 if (ret)
1993                         return ret;
1994
1995                 dev_priv->next_seqno = 1;
1996         }
1997
1998         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
1999         return 0;
2000 }
2001
2002 int
2003 i915_add_request(struct intel_ring_buffer *ring,
2004                  struct drm_file *file,
2005                  u32 *out_seqno)
2006 {
2007         drm_i915_private_t *dev_priv = ring->dev->dev_private;
2008         struct drm_i915_gem_request *request;
2009         u32 request_ring_position;
2010         int was_empty;
2011         int ret;
2012
2013         /*
2014          * Emit any outstanding flushes - execbuf can fail to emit the flush
2015          * after having emitted the batchbuffer command. Hence we need to fix
2016          * things up similar to emitting the lazy request. The difference here
2017          * is that the flush _must_ happen before the next request, no matter
2018          * what.
2019          */
2020         ret = intel_ring_flush_all_caches(ring);
2021         if (ret)
2022                 return ret;
2023
2024         request = kmalloc(sizeof(*request), GFP_KERNEL);
2025         if (request == NULL)
2026                 return -ENOMEM;
2027
2028
2029         /* Record the position of the start of the request so that
2030          * should we detect the updated seqno part-way through the
2031          * GPU processing the request, we never over-estimate the
2032          * position of the head.
2033          */
2034         request_ring_position = intel_ring_get_tail(ring);
2035
2036         ret = ring->add_request(ring);
2037         if (ret) {
2038                 kfree(request);
2039                 return ret;
2040         }
2041
2042         request->seqno = intel_ring_get_seqno(ring);
2043         request->ring = ring;
2044         request->tail = request_ring_position;
2045         request->ctx = ring->last_context;
2046
2047         if (request->ctx)
2048                 i915_gem_context_reference(request->ctx);
2049
2050         request->emitted_jiffies = jiffies;
2051         was_empty = list_empty(&ring->request_list);
2052         list_add_tail(&request->list, &ring->request_list);
2053         request->file_priv = NULL;
2054
2055         if (file) {
2056                 struct drm_i915_file_private *file_priv = file->driver_priv;
2057
2058                 spin_lock(&file_priv->mm.lock);
2059                 request->file_priv = file_priv;
2060                 list_add_tail(&request->client_list,
2061                               &file_priv->mm.request_list);
2062                 spin_unlock(&file_priv->mm.lock);
2063         }
2064
2065         trace_i915_gem_request_add(ring, request->seqno);
2066         ring->outstanding_lazy_request = 0;
2067
2068         if (!dev_priv->mm.suspended) {
2069                 if (i915_enable_hangcheck) {
2070                         mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2071                                   round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2072                 }
2073                 if (was_empty) {
2074                         queue_delayed_work(dev_priv->wq,
2075                                            &dev_priv->mm.retire_work,
2076                                            round_jiffies_up_relative(HZ));
2077                         intel_mark_busy(dev_priv->dev);
2078                 }
2079         }
2080
2081         if (out_seqno)
2082                 *out_seqno = request->seqno;
2083         return 0;
2084 }
2085
2086 static inline void
2087 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2088 {
2089         struct drm_i915_file_private *file_priv = request->file_priv;
2090
2091         if (!file_priv)
2092                 return;
2093
2094         spin_lock(&file_priv->mm.lock);
2095         if (request->file_priv) {
2096                 list_del(&request->client_list);
2097                 request->file_priv = NULL;
2098         }
2099         spin_unlock(&file_priv->mm.lock);
2100 }
2101
2102 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2103 {
2104         list_del(&request->list);
2105         i915_gem_request_remove_from_client(request);
2106
2107         if (request->ctx)
2108                 i915_gem_context_unreference(request->ctx);
2109
2110         kfree(request);
2111 }
2112
2113 static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2114                                       struct intel_ring_buffer *ring)
2115 {
2116         while (!list_empty(&ring->request_list)) {
2117                 struct drm_i915_gem_request *request;
2118
2119                 request = list_first_entry(&ring->request_list,
2120                                            struct drm_i915_gem_request,
2121                                            list);
2122
2123                 i915_gem_free_request(request);
2124         }
2125
2126         while (!list_empty(&ring->active_list)) {
2127                 struct drm_i915_gem_object *obj;
2128
2129                 obj = list_first_entry(&ring->active_list,
2130                                        struct drm_i915_gem_object,
2131                                        ring_list);
2132
2133                 i915_gem_object_move_to_inactive(obj);
2134         }
2135 }
2136
2137 static void i915_gem_reset_fences(struct drm_device *dev)
2138 {
2139         struct drm_i915_private *dev_priv = dev->dev_private;
2140         int i;
2141
2142         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2143                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2144
2145                 if (reg->obj)
2146                         i915_gem_object_fence_lost(reg->obj);
2147
2148                 i915_gem_write_fence(dev, i, NULL);
2149
2150                 reg->pin_count = 0;
2151                 reg->obj = NULL;
2152                 INIT_LIST_HEAD(&reg->lru_list);
2153         }
2154
2155         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
2156 }
2157
2158 void i915_gem_reset(struct drm_device *dev)
2159 {
2160         struct drm_i915_private *dev_priv = dev->dev_private;
2161         struct drm_i915_gem_object *obj;
2162         struct intel_ring_buffer *ring;
2163         int i;
2164
2165         for_each_ring(ring, dev_priv, i)
2166                 i915_gem_reset_ring_lists(dev_priv, ring);
2167
2168         /* Move everything out of the GPU domains to ensure we do any
2169          * necessary invalidation upon reuse.
2170          */
2171         list_for_each_entry(obj,
2172                             &dev_priv->mm.inactive_list,
2173                             mm_list)
2174         {
2175                 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2176         }
2177
2178         /* The fence registers are invalidated so clear them out */
2179         i915_gem_reset_fences(dev);
2180 }
2181
2182 /**
2183  * This function clears the request list as sequence numbers are passed.
2184  */
2185 void
2186 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2187 {
2188         uint32_t seqno;
2189
2190         if (list_empty(&ring->request_list))
2191                 return;
2192
2193         WARN_ON(i915_verify_lists(ring->dev));
2194
2195         seqno = ring->get_seqno(ring, true);
2196
2197         while (!list_empty(&ring->request_list)) {
2198                 struct drm_i915_gem_request *request;
2199
2200                 request = list_first_entry(&ring->request_list,
2201                                            struct drm_i915_gem_request,
2202                                            list);
2203
2204                 if (!i915_seqno_passed(seqno, request->seqno))
2205                         break;
2206
2207                 trace_i915_gem_request_retire(ring, request->seqno);
2208                 /* We know the GPU must have read the request to have
2209                  * sent us the seqno + interrupt, so use the position
2210                  * of tail of the request to update the last known position
2211                  * of the GPU head.
2212                  */
2213                 ring->last_retired_head = request->tail;
2214
2215                 i915_gem_free_request(request);
2216         }
2217
2218         /* Move any buffers on the active list that are no longer referenced
2219          * by the ringbuffer to the flushing/inactive lists as appropriate.
2220          */
2221         while (!list_empty(&ring->active_list)) {
2222                 struct drm_i915_gem_object *obj;
2223
2224                 obj = list_first_entry(&ring->active_list,
2225                                       struct drm_i915_gem_object,
2226                                       ring_list);
2227
2228                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2229                         break;
2230
2231                 i915_gem_object_move_to_inactive(obj);
2232         }
2233
2234         if (unlikely(ring->trace_irq_seqno &&
2235                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2236                 ring->irq_put(ring);
2237                 ring->trace_irq_seqno = 0;
2238         }
2239
2240         WARN_ON(i915_verify_lists(ring->dev));
2241 }
2242
2243 void
2244 i915_gem_retire_requests(struct drm_device *dev)
2245 {
2246         drm_i915_private_t *dev_priv = dev->dev_private;
2247         struct intel_ring_buffer *ring;
2248         int i;
2249
2250         for_each_ring(ring, dev_priv, i)
2251                 i915_gem_retire_requests_ring(ring);
2252 }
2253
2254 static void
2255 i915_gem_retire_work_handler(struct work_struct *work)
2256 {
2257         drm_i915_private_t *dev_priv;
2258         struct drm_device *dev;
2259         struct intel_ring_buffer *ring;
2260         bool idle;
2261         int i;
2262
2263         dev_priv = container_of(work, drm_i915_private_t,
2264                                 mm.retire_work.work);
2265         dev = dev_priv->dev;
2266
2267         /* Come back later if the device is busy... */
2268         if (!mutex_trylock(&dev->struct_mutex)) {
2269                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2270                                    round_jiffies_up_relative(HZ));
2271                 return;
2272         }
2273
2274         i915_gem_retire_requests(dev);
2275
2276         /* Send a periodic flush down the ring so we don't hold onto GEM
2277          * objects indefinitely.
2278          */
2279         idle = true;
2280         for_each_ring(ring, dev_priv, i) {
2281                 if (ring->gpu_caches_dirty)
2282                         i915_add_request(ring, NULL, NULL);
2283
2284                 idle &= list_empty(&ring->request_list);
2285         }
2286
2287         if (!dev_priv->mm.suspended && !idle)
2288                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2289                                    round_jiffies_up_relative(HZ));
2290         if (idle)
2291                 intel_mark_idle(dev);
2292
2293         mutex_unlock(&dev->struct_mutex);
2294 }
2295
2296 /**
2297  * Ensures that an object will eventually get non-busy by flushing any required
2298  * write domains, emitting any outstanding lazy request and retiring and
2299  * completed requests.
2300  */
2301 static int
2302 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2303 {
2304         int ret;
2305
2306         if (obj->active) {
2307                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2308                 if (ret)
2309                         return ret;
2310
2311                 i915_gem_retire_requests_ring(obj->ring);
2312         }
2313
2314         return 0;
2315 }
2316
2317 /**
2318  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2319  * @DRM_IOCTL_ARGS: standard ioctl arguments
2320  *
2321  * Returns 0 if successful, else an error is returned with the remaining time in
2322  * the timeout parameter.
2323  *  -ETIME: object is still busy after timeout
2324  *  -ERESTARTSYS: signal interrupted the wait
2325  *  -ENONENT: object doesn't exist
2326  * Also possible, but rare:
2327  *  -EAGAIN: GPU wedged
2328  *  -ENOMEM: damn
2329  *  -ENODEV: Internal IRQ fail
2330  *  -E?: The add request failed
2331  *
2332  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2333  * non-zero timeout parameter the wait ioctl will wait for the given number of
2334  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2335  * without holding struct_mutex the object may become re-busied before this
2336  * function completes. A similar but shorter * race condition exists in the busy
2337  * ioctl
2338  */
2339 int
2340 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2341 {
2342         drm_i915_private_t *dev_priv = dev->dev_private;
2343         struct drm_i915_gem_wait *args = data;
2344         struct drm_i915_gem_object *obj;
2345         struct intel_ring_buffer *ring = NULL;
2346         struct timespec timeout_stack, *timeout = NULL;
2347         unsigned reset_counter;
2348         u32 seqno = 0;
2349         int ret = 0;
2350
2351         if (args->timeout_ns >= 0) {
2352                 timeout_stack = ns_to_timespec(args->timeout_ns);
2353                 timeout = &timeout_stack;
2354         }
2355
2356         ret = i915_mutex_lock_interruptible(dev);
2357         if (ret)
2358                 return ret;
2359
2360         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2361         if (&obj->base == NULL) {
2362                 mutex_unlock(&dev->struct_mutex);
2363                 return -ENOENT;
2364         }
2365
2366         /* Need to make sure the object gets inactive eventually. */
2367         ret = i915_gem_object_flush_active(obj);
2368         if (ret)
2369                 goto out;
2370
2371         if (obj->active) {
2372                 seqno = obj->last_read_seqno;
2373                 ring = obj->ring;
2374         }
2375
2376         if (seqno == 0)
2377                  goto out;
2378
2379         /* Do this after OLR check to make sure we make forward progress polling
2380          * on this IOCTL with a 0 timeout (like busy ioctl)
2381          */
2382         if (!args->timeout_ns) {
2383                 ret = -ETIME;
2384                 goto out;
2385         }
2386
2387         drm_gem_object_unreference(&obj->base);
2388         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2389         mutex_unlock(&dev->struct_mutex);
2390
2391         ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2392         if (timeout)
2393                 args->timeout_ns = timespec_to_ns(timeout);
2394         return ret;
2395
2396 out:
2397         drm_gem_object_unreference(&obj->base);
2398         mutex_unlock(&dev->struct_mutex);
2399         return ret;
2400 }
2401
2402 /**
2403  * i915_gem_object_sync - sync an object to a ring.
2404  *
2405  * @obj: object which may be in use on another ring.
2406  * @to: ring we wish to use the object on. May be NULL.
2407  *
2408  * This code is meant to abstract object synchronization with the GPU.
2409  * Calling with NULL implies synchronizing the object with the CPU
2410  * rather than a particular GPU ring.
2411  *
2412  * Returns 0 if successful, else propagates up the lower layer error.
2413  */
2414 int
2415 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2416                      struct intel_ring_buffer *to)
2417 {
2418         struct intel_ring_buffer *from = obj->ring;
2419         u32 seqno;
2420         int ret, idx;
2421
2422         if (from == NULL || to == from)
2423                 return 0;
2424
2425         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2426                 return i915_gem_object_wait_rendering(obj, false);
2427
2428         idx = intel_ring_sync_index(from, to);
2429
2430         seqno = obj->last_read_seqno;
2431         if (seqno <= from->sync_seqno[idx])
2432                 return 0;
2433
2434         ret = i915_gem_check_olr(obj->ring, seqno);
2435         if (ret)
2436                 return ret;
2437
2438         ret = to->sync_to(to, from, seqno);
2439         if (!ret)
2440                 /* We use last_read_seqno because sync_to()
2441                  * might have just caused seqno wrap under
2442                  * the radar.
2443                  */
2444                 from->sync_seqno[idx] = obj->last_read_seqno;
2445
2446         return ret;
2447 }
2448
2449 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2450 {
2451         u32 old_write_domain, old_read_domains;
2452
2453         /* Force a pagefault for domain tracking on next user access */
2454         i915_gem_release_mmap(obj);
2455
2456         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2457                 return;
2458
2459         /* Wait for any direct GTT access to complete */
2460         mb();
2461
2462         old_read_domains = obj->base.read_domains;
2463         old_write_domain = obj->base.write_domain;
2464
2465         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2466         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2467
2468         trace_i915_gem_object_change_domain(obj,
2469                                             old_read_domains,
2470                                             old_write_domain);
2471 }
2472
2473 /**
2474  * Unbinds an object from the GTT aperture.
2475  */
2476 int
2477 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2478 {
2479         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2480         int ret;
2481
2482         if (obj->gtt_space == NULL)
2483                 return 0;
2484
2485         if (obj->pin_count)
2486                 return -EBUSY;
2487
2488         BUG_ON(obj->pages == NULL);
2489
2490         ret = i915_gem_object_finish_gpu(obj);
2491         if (ret)
2492                 return ret;
2493         /* Continue on if we fail due to EIO, the GPU is hung so we
2494          * should be safe and we need to cleanup or else we might
2495          * cause memory corruption through use-after-free.
2496          */
2497
2498         i915_gem_object_finish_gtt(obj);
2499
2500         /* release the fence reg _after_ flushing */
2501         ret = i915_gem_object_put_fence(obj);
2502         if (ret)
2503                 return ret;
2504
2505         trace_i915_gem_object_unbind(obj);
2506
2507         if (obj->has_global_gtt_mapping)
2508                 i915_gem_gtt_unbind_object(obj);
2509         if (obj->has_aliasing_ppgtt_mapping) {
2510                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2511                 obj->has_aliasing_ppgtt_mapping = 0;
2512         }
2513         i915_gem_gtt_finish_object(obj);
2514
2515         list_del(&obj->mm_list);
2516         list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
2517         /* Avoid an unnecessary call to unbind on rebind. */
2518         obj->map_and_fenceable = true;
2519
2520         drm_mm_put_block(obj->gtt_space);
2521         obj->gtt_space = NULL;
2522         obj->gtt_offset = 0;
2523
2524         return 0;
2525 }
2526
2527 int i915_gpu_idle(struct drm_device *dev)
2528 {
2529         drm_i915_private_t *dev_priv = dev->dev_private;
2530         struct intel_ring_buffer *ring;
2531         int ret, i;
2532
2533         /* Flush everything onto the inactive list. */
2534         for_each_ring(ring, dev_priv, i) {
2535                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2536                 if (ret)
2537                         return ret;
2538
2539                 ret = intel_ring_idle(ring);
2540                 if (ret)
2541                         return ret;
2542         }
2543
2544         return 0;
2545 }
2546
2547 static void i965_write_fence_reg(struct drm_device *dev, int reg,
2548                                  struct drm_i915_gem_object *obj)
2549 {
2550         drm_i915_private_t *dev_priv = dev->dev_private;
2551         int fence_reg;
2552         int fence_pitch_shift;
2553         uint64_t val;
2554
2555         if (INTEL_INFO(dev)->gen >= 6) {
2556                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2557                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2558         } else {
2559                 fence_reg = FENCE_REG_965_0;
2560                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2561         }
2562
2563         if (obj) {
2564                 u32 size = obj->gtt_space->size;
2565
2566                 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2567                                  0xfffff000) << 32;
2568                 val |= obj->gtt_offset & 0xfffff000;
2569                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2570                 if (obj->tiling_mode == I915_TILING_Y)
2571                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2572                 val |= I965_FENCE_REG_VALID;
2573         } else
2574                 val = 0;
2575
2576         fence_reg += reg * 8;
2577         I915_WRITE64(fence_reg, val);
2578         POSTING_READ(fence_reg);
2579 }
2580
2581 static void i915_write_fence_reg(struct drm_device *dev, int reg,
2582                                  struct drm_i915_gem_object *obj)
2583 {
2584         drm_i915_private_t *dev_priv = dev->dev_private;
2585         u32 val;
2586
2587         if (obj) {
2588                 u32 size = obj->gtt_space->size;
2589                 int pitch_val;
2590                 int tile_width;
2591
2592                 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2593                      (size & -size) != size ||
2594                      (obj->gtt_offset & (size - 1)),
2595                      "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2596                      obj->gtt_offset, obj->map_and_fenceable, size);
2597
2598                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2599                         tile_width = 128;
2600                 else
2601                         tile_width = 512;
2602
2603                 /* Note: pitch better be a power of two tile widths */
2604                 pitch_val = obj->stride / tile_width;
2605                 pitch_val = ffs(pitch_val) - 1;
2606
2607                 val = obj->gtt_offset;
2608                 if (obj->tiling_mode == I915_TILING_Y)
2609                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2610                 val |= I915_FENCE_SIZE_BITS(size);
2611                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2612                 val |= I830_FENCE_REG_VALID;
2613         } else
2614                 val = 0;
2615
2616         if (reg < 8)
2617                 reg = FENCE_REG_830_0 + reg * 4;
2618         else
2619                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2620
2621         I915_WRITE(reg, val);
2622         POSTING_READ(reg);
2623 }
2624
2625 static void i830_write_fence_reg(struct drm_device *dev, int reg,
2626                                 struct drm_i915_gem_object *obj)
2627 {
2628         drm_i915_private_t *dev_priv = dev->dev_private;
2629         uint32_t val;
2630
2631         if (obj) {
2632                 u32 size = obj->gtt_space->size;
2633                 uint32_t pitch_val;
2634
2635                 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2636                      (size & -size) != size ||
2637                      (obj->gtt_offset & (size - 1)),
2638                      "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2639                      obj->gtt_offset, size);
2640
2641                 pitch_val = obj->stride / 128;
2642                 pitch_val = ffs(pitch_val) - 1;
2643
2644                 val = obj->gtt_offset;
2645                 if (obj->tiling_mode == I915_TILING_Y)
2646                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2647                 val |= I830_FENCE_SIZE_BITS(size);
2648                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2649                 val |= I830_FENCE_REG_VALID;
2650         } else
2651                 val = 0;
2652
2653         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2654         POSTING_READ(FENCE_REG_830_0 + reg * 4);
2655 }
2656
2657 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2658 {
2659         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2660 }
2661
2662 static void i915_gem_write_fence(struct drm_device *dev, int reg,
2663                                  struct drm_i915_gem_object *obj)
2664 {
2665         struct drm_i915_private *dev_priv = dev->dev_private;
2666
2667         /* Ensure that all CPU reads are completed before installing a fence
2668          * and all writes before removing the fence.
2669          */
2670         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2671                 mb();
2672
2673         switch (INTEL_INFO(dev)->gen) {
2674         case 7:
2675         case 6:
2676         case 5:
2677         case 4: i965_write_fence_reg(dev, reg, obj); break;
2678         case 3: i915_write_fence_reg(dev, reg, obj); break;
2679         case 2: i830_write_fence_reg(dev, reg, obj); break;
2680         default: BUG();
2681         }
2682
2683         /* And similarly be paranoid that no direct access to this region
2684          * is reordered to before the fence is installed.
2685          */
2686         if (i915_gem_object_needs_mb(obj))
2687                 mb();
2688 }
2689
2690 static inline int fence_number(struct drm_i915_private *dev_priv,
2691                                struct drm_i915_fence_reg *fence)
2692 {
2693         return fence - dev_priv->fence_regs;
2694 }
2695
2696 static void i915_gem_write_fence__ipi(void *data)
2697 {
2698         wbinvd();
2699 }
2700
2701 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2702                                          struct drm_i915_fence_reg *fence,
2703                                          bool enable)
2704 {
2705         struct drm_device *dev = obj->base.dev;
2706         struct drm_i915_private *dev_priv = dev->dev_private;
2707         int fence_reg = fence_number(dev_priv, fence);
2708
2709         /* In order to fully serialize access to the fenced region and
2710          * the update to the fence register we need to take extreme
2711          * measures on SNB+. In theory, the write to the fence register
2712          * flushes all memory transactions before, and coupled with the
2713          * mb() placed around the register write we serialise all memory
2714          * operations with respect to the changes in the tiler. Yet, on
2715          * SNB+ we need to take a step further and emit an explicit wbinvd()
2716          * on each processor in order to manually flush all memory
2717          * transactions before updating the fence register.
2718          */
2719         if (HAS_LLC(obj->base.dev))
2720                 on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
2721         i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
2722
2723         if (enable) {
2724                 obj->fence_reg = fence_reg;
2725                 fence->obj = obj;
2726                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2727         } else {
2728                 obj->fence_reg = I915_FENCE_REG_NONE;
2729                 fence->obj = NULL;
2730                 list_del_init(&fence->lru_list);
2731         }
2732 }
2733
2734 static int
2735 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2736 {
2737         if (obj->last_fenced_seqno) {
2738                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2739                 if (ret)
2740                         return ret;
2741
2742                 obj->last_fenced_seqno = 0;
2743         }
2744
2745         obj->fenced_gpu_access = false;
2746         return 0;
2747 }
2748
2749 int
2750 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2751 {
2752         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2753         struct drm_i915_fence_reg *fence;
2754         int ret;
2755
2756         ret = i915_gem_object_wait_fence(obj);
2757         if (ret)
2758                 return ret;
2759
2760         if (obj->fence_reg == I915_FENCE_REG_NONE)
2761                 return 0;
2762
2763         fence = &dev_priv->fence_regs[obj->fence_reg];
2764
2765         i915_gem_object_fence_lost(obj);
2766         i915_gem_object_update_fence(obj, fence, false);
2767
2768         return 0;
2769 }
2770
2771 static struct drm_i915_fence_reg *
2772 i915_find_fence_reg(struct drm_device *dev)
2773 {
2774         struct drm_i915_private *dev_priv = dev->dev_private;
2775         struct drm_i915_fence_reg *reg, *avail;
2776         int i;
2777
2778         /* First try to find a free reg */
2779         avail = NULL;
2780         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2781                 reg = &dev_priv->fence_regs[i];
2782                 if (!reg->obj)
2783                         return reg;
2784
2785                 if (!reg->pin_count)
2786                         avail = reg;
2787         }
2788
2789         if (avail == NULL)
2790                 return NULL;
2791
2792         /* None available, try to steal one or wait for a user to finish */
2793         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2794                 if (reg->pin_count)
2795                         continue;
2796
2797                 return reg;
2798         }
2799
2800         return NULL;
2801 }
2802
2803 /**
2804  * i915_gem_object_get_fence - set up fencing for an object
2805  * @obj: object to map through a fence reg
2806  *
2807  * When mapping objects through the GTT, userspace wants to be able to write
2808  * to them without having to worry about swizzling if the object is tiled.
2809  * This function walks the fence regs looking for a free one for @obj,
2810  * stealing one if it can't find any.
2811  *
2812  * It then sets up the reg based on the object's properties: address, pitch
2813  * and tiling format.
2814  *
2815  * For an untiled surface, this removes any existing fence.
2816  */
2817 int
2818 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2819 {
2820         struct drm_device *dev = obj->base.dev;
2821         struct drm_i915_private *dev_priv = dev->dev_private;
2822         bool enable = obj->tiling_mode != I915_TILING_NONE;
2823         struct drm_i915_fence_reg *reg;
2824         int ret;
2825
2826         /* Have we updated the tiling parameters upon the object and so
2827          * will need to serialise the write to the associated fence register?
2828          */
2829         if (obj->fence_dirty) {
2830                 ret = i915_gem_object_wait_fence(obj);
2831                 if (ret)
2832                         return ret;
2833         }
2834
2835         /* Just update our place in the LRU if our fence is getting reused. */
2836         if (obj->fence_reg != I915_FENCE_REG_NONE) {
2837                 reg = &dev_priv->fence_regs[obj->fence_reg];
2838                 if (!obj->fence_dirty) {
2839                         list_move_tail(&reg->lru_list,
2840                                        &dev_priv->mm.fence_list);
2841                         return 0;
2842                 }
2843         } else if (enable) {
2844                 reg = i915_find_fence_reg(dev);
2845                 if (reg == NULL)
2846                         return -EDEADLK;
2847
2848                 if (reg->obj) {
2849                         struct drm_i915_gem_object *old = reg->obj;
2850
2851                         ret = i915_gem_object_wait_fence(old);
2852                         if (ret)
2853                                 return ret;
2854
2855                         i915_gem_object_fence_lost(old);
2856                 }
2857         } else
2858                 return 0;
2859
2860         i915_gem_object_update_fence(obj, reg, enable);
2861         obj->fence_dirty = false;
2862
2863         return 0;
2864 }
2865
2866 static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2867                                      struct drm_mm_node *gtt_space,
2868                                      unsigned long cache_level)
2869 {
2870         struct drm_mm_node *other;
2871
2872         /* On non-LLC machines we have to be careful when putting differing
2873          * types of snoopable memory together to avoid the prefetcher
2874          * crossing memory domains and dying.
2875          */
2876         if (HAS_LLC(dev))
2877                 return true;
2878
2879         if (gtt_space == NULL)
2880                 return true;
2881
2882         if (list_empty(&gtt_space->node_list))
2883                 return true;
2884
2885         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2886         if (other->allocated && !other->hole_follows && other->color != cache_level)
2887                 return false;
2888
2889         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2890         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2891                 return false;
2892
2893         return true;
2894 }
2895
2896 static void i915_gem_verify_gtt(struct drm_device *dev)
2897 {
2898 #if WATCH_GTT
2899         struct drm_i915_private *dev_priv = dev->dev_private;
2900         struct drm_i915_gem_object *obj;
2901         int err = 0;
2902
2903         list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
2904                 if (obj->gtt_space == NULL) {
2905                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
2906                         err++;
2907                         continue;
2908                 }
2909
2910                 if (obj->cache_level != obj->gtt_space->color) {
2911                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
2912                                obj->gtt_space->start,
2913                                obj->gtt_space->start + obj->gtt_space->size,
2914                                obj->cache_level,
2915                                obj->gtt_space->color);
2916                         err++;
2917                         continue;
2918                 }
2919
2920                 if (!i915_gem_valid_gtt_space(dev,
2921                                               obj->gtt_space,
2922                                               obj->cache_level)) {
2923                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
2924                                obj->gtt_space->start,
2925                                obj->gtt_space->start + obj->gtt_space->size,
2926                                obj->cache_level);
2927                         err++;
2928                         continue;
2929                 }
2930         }
2931
2932         WARN_ON(err);
2933 #endif
2934 }
2935
2936 /**
2937  * Finds free space in the GTT aperture and binds the object there.
2938  */
2939 static int
2940 i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2941                             unsigned alignment,
2942                             bool map_and_fenceable,
2943                             bool nonblocking)
2944 {
2945         struct drm_device *dev = obj->base.dev;
2946         drm_i915_private_t *dev_priv = dev->dev_private;
2947         struct drm_mm_node *node;
2948         u32 size, fence_size, fence_alignment, unfenced_alignment;
2949         bool mappable, fenceable;
2950         int ret;
2951
2952         fence_size = i915_gem_get_gtt_size(dev,
2953                                            obj->base.size,
2954                                            obj->tiling_mode);
2955         fence_alignment = i915_gem_get_gtt_alignment(dev,
2956                                                      obj->base.size,
2957                                                      obj->tiling_mode, true);
2958         unfenced_alignment =
2959                 i915_gem_get_gtt_alignment(dev,
2960                                                     obj->base.size,
2961                                                     obj->tiling_mode, false);
2962
2963         if (alignment == 0)
2964                 alignment = map_and_fenceable ? fence_alignment :
2965                                                 unfenced_alignment;
2966         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2967                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2968                 return -EINVAL;
2969         }
2970
2971         size = map_and_fenceable ? fence_size : obj->base.size;
2972
2973         /* If the object is bigger than the entire aperture, reject it early
2974          * before evicting everything in a vain attempt to find space.
2975          */
2976         if (obj->base.size >
2977             (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
2978                 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2979                 return -E2BIG;
2980         }
2981
2982         ret = i915_gem_object_get_pages(obj);
2983         if (ret)
2984                 return ret;
2985
2986         i915_gem_object_pin_pages(obj);
2987
2988         node = kzalloc(sizeof(*node), GFP_KERNEL);
2989         if (node == NULL) {
2990                 i915_gem_object_unpin_pages(obj);
2991                 return -ENOMEM;
2992         }
2993
2994  search_free:
2995         if (map_and_fenceable)
2996                 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2997                                                           size, alignment, obj->cache_level,
2998                                                           0, dev_priv->gtt.mappable_end);
2999         else
3000                 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
3001                                                  size, alignment, obj->cache_level);
3002         if (ret) {
3003                 ret = i915_gem_evict_something(dev, size, alignment,
3004                                                obj->cache_level,
3005                                                map_and_fenceable,
3006                                                nonblocking);
3007                 if (ret == 0)
3008                         goto search_free;
3009
3010                 i915_gem_object_unpin_pages(obj);
3011                 kfree(node);
3012                 return ret;
3013         }
3014         if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
3015                 i915_gem_object_unpin_pages(obj);
3016                 drm_mm_put_block(node);
3017                 return -EINVAL;
3018         }
3019
3020         ret = i915_gem_gtt_prepare_object(obj);
3021         if (ret) {
3022                 i915_gem_object_unpin_pages(obj);
3023                 drm_mm_put_block(node);
3024                 return ret;
3025         }
3026
3027         list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
3028         list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3029
3030         obj->gtt_space = node;
3031         obj->gtt_offset = node->start;
3032
3033         fenceable =
3034                 node->size == fence_size &&
3035                 (node->start & (fence_alignment - 1)) == 0;
3036
3037         mappable =
3038                 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
3039
3040         obj->map_and_fenceable = mappable && fenceable;
3041
3042         i915_gem_object_unpin_pages(obj);
3043         trace_i915_gem_object_bind(obj, map_and_fenceable);
3044         i915_gem_verify_gtt(dev);
3045         return 0;
3046 }
3047
3048 void
3049 i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3050 {
3051         /* If we don't have a page list set up, then we're not pinned
3052          * to GPU, and we can ignore the cache flush because it'll happen
3053          * again at bind time.
3054          */
3055         if (obj->pages == NULL)
3056                 return;
3057
3058         /*
3059          * Stolen memory is always coherent with the GPU as it is explicitly
3060          * marked as wc by the system, or the system is cache-coherent.
3061          */
3062         if (obj->stolen)
3063                 return;
3064
3065         /* If the GPU is snooping the contents of the CPU cache,
3066          * we do not need to manually clear the CPU cache lines.  However,
3067          * the caches are only snooped when the render cache is
3068          * flushed/invalidated.  As we always have to emit invalidations
3069          * and flushes when moving into and out of the RENDER domain, correct
3070          * snooping behaviour occurs naturally as the result of our domain
3071          * tracking.
3072          */
3073         if (obj->cache_level != I915_CACHE_NONE)
3074                 return;
3075
3076         trace_i915_gem_object_clflush(obj);
3077
3078         drm_clflush_sg(obj->pages);
3079 }
3080
3081 /** Flushes the GTT write domain for the object if it's dirty. */
3082 static void
3083 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3084 {
3085         uint32_t old_write_domain;
3086
3087         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3088                 return;
3089
3090         /* No actual flushing is required for the GTT write domain.  Writes
3091          * to it immediately go to main memory as far as we know, so there's
3092          * no chipset flush.  It also doesn't land in render cache.
3093          *
3094          * However, we do have to enforce the order so that all writes through
3095          * the GTT land before any writes to the device, such as updates to
3096          * the GATT itself.
3097          */
3098         wmb();
3099
3100         old_write_domain = obj->base.write_domain;
3101         obj->base.write_domain = 0;
3102
3103         trace_i915_gem_object_change_domain(obj,
3104                                             obj->base.read_domains,
3105                                             old_write_domain);
3106 }
3107
3108 /** Flushes the CPU write domain for the object if it's dirty. */
3109 static void
3110 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3111 {
3112         uint32_t old_write_domain;
3113
3114         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3115                 return;
3116
3117         i915_gem_clflush_object(obj);
3118         i915_gem_chipset_flush(obj->base.dev);
3119         old_write_domain = obj->base.write_domain;
3120         obj->base.write_domain = 0;
3121
3122         trace_i915_gem_object_change_domain(obj,
3123                                             obj->base.read_domains,
3124                                             old_write_domain);
3125 }
3126
3127 /**
3128  * Moves a single object to the GTT read, and possibly write domain.
3129  *
3130  * This function returns when the move is complete, including waiting on
3131  * flushes to occur.
3132  */
3133 int
3134 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3135 {
3136         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3137         uint32_t old_write_domain, old_read_domains;
3138         int ret;
3139
3140         /* Not valid to be called on unbound objects. */
3141         if (obj->gtt_space == NULL)
3142                 return -EINVAL;
3143
3144         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3145                 return 0;
3146
3147         ret = i915_gem_object_wait_rendering(obj, !write);
3148         if (ret)
3149                 return ret;
3150
3151         i915_gem_object_flush_cpu_write_domain(obj);
3152
3153         /* Serialise direct access to this object with the barriers for
3154          * coherent writes from the GPU, by effectively invalidating the
3155          * GTT domain upon first access.
3156          */
3157         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3158                 mb();
3159
3160         old_write_domain = obj->base.write_domain;
3161         old_read_domains = obj->base.read_domains;
3162
3163         /* It should now be out of any other write domains, and we can update
3164          * the domain values for our changes.
3165          */
3166         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3167         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3168         if (write) {
3169                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3170                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3171                 obj->dirty = 1;
3172         }
3173
3174         trace_i915_gem_object_change_domain(obj,
3175                                             old_read_domains,
3176                                             old_write_domain);
3177
3178         /* And bump the LRU for this access */
3179         if (i915_gem_object_is_inactive(obj))
3180                 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
3181
3182         return 0;
3183 }
3184
3185 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3186                                     enum i915_cache_level cache_level)
3187 {
3188         struct drm_device *dev = obj->base.dev;
3189         drm_i915_private_t *dev_priv = dev->dev_private;
3190         int ret;
3191
3192         if (obj->cache_level == cache_level)
3193                 return 0;
3194
3195         if (obj->pin_count) {
3196                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3197                 return -EBUSY;
3198         }
3199
3200         if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
3201                 ret = i915_gem_object_unbind(obj);
3202                 if (ret)
3203                         return ret;
3204         }
3205
3206         if (obj->gtt_space) {
3207                 ret = i915_gem_object_finish_gpu(obj);
3208                 if (ret)
3209                         return ret;
3210
3211                 i915_gem_object_finish_gtt(obj);
3212
3213                 /* Before SandyBridge, you could not use tiling or fence
3214                  * registers with snooped memory, so relinquish any fences
3215                  * currently pointing to our region in the aperture.
3216                  */
3217                 if (INTEL_INFO(dev)->gen < 6) {
3218                         ret = i915_gem_object_put_fence(obj);
3219                         if (ret)
3220                                 return ret;
3221                 }
3222
3223                 if (obj->has_global_gtt_mapping)
3224                         i915_gem_gtt_bind_object(obj, cache_level);
3225                 if (obj->has_aliasing_ppgtt_mapping)
3226                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3227                                                obj, cache_level);
3228
3229                 obj->gtt_space->color = cache_level;
3230         }
3231
3232         if (cache_level == I915_CACHE_NONE) {
3233                 u32 old_read_domains, old_write_domain;
3234
3235                 /* If we're coming from LLC cached, then we haven't
3236                  * actually been tracking whether the data is in the
3237                  * CPU cache or not, since we only allow one bit set
3238                  * in obj->write_domain and have been skipping the clflushes.
3239                  * Just set it to the CPU cache for now.
3240                  */
3241                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3242                 WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
3243
3244                 old_read_domains = obj->base.read_domains;
3245                 old_write_domain = obj->base.write_domain;
3246
3247                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3248                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3249
3250                 trace_i915_gem_object_change_domain(obj,
3251                                                     old_read_domains,
3252                                                     old_write_domain);
3253         }
3254
3255         obj->cache_level = cache_level;
3256         i915_gem_verify_gtt(dev);
3257         return 0;
3258 }
3259
3260 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3261                                struct drm_file *file)
3262 {
3263         struct drm_i915_gem_caching *args = data;
3264         struct drm_i915_gem_object *obj;
3265         int ret;
3266
3267         ret = i915_mutex_lock_interruptible(dev);
3268         if (ret)
3269                 return ret;
3270
3271         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3272         if (&obj->base == NULL) {
3273                 ret = -ENOENT;
3274                 goto unlock;
3275         }
3276
3277         args->caching = obj->cache_level != I915_CACHE_NONE;
3278
3279         drm_gem_object_unreference(&obj->base);
3280 unlock:
3281         mutex_unlock(&dev->struct_mutex);
3282         return ret;
3283 }
3284
3285 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3286                                struct drm_file *file)
3287 {
3288         struct drm_i915_gem_caching *args = data;
3289         struct drm_i915_gem_object *obj;
3290         enum i915_cache_level level;
3291         int ret;
3292
3293         switch (args->caching) {
3294         case I915_CACHING_NONE:
3295                 level = I915_CACHE_NONE;
3296                 break;
3297         case I915_CACHING_CACHED:
3298                 level = I915_CACHE_LLC;
3299                 break;
3300         default:
3301                 return -EINVAL;
3302         }
3303
3304         ret = i915_mutex_lock_interruptible(dev);
3305         if (ret)
3306                 return ret;
3307
3308         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3309         if (&obj->base == NULL) {
3310                 ret = -ENOENT;
3311                 goto unlock;
3312         }
3313
3314         ret = i915_gem_object_set_cache_level(obj, level);
3315
3316         drm_gem_object_unreference(&obj->base);
3317 unlock:
3318         mutex_unlock(&dev->struct_mutex);
3319         return ret;
3320 }
3321
3322 /*
3323  * Prepare buffer for display plane (scanout, cursors, etc).
3324  * Can be called from an uninterruptible phase (modesetting) and allows
3325  * any flushes to be pipelined (for pageflips).
3326  */
3327 int
3328 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3329                                      u32 alignment,
3330                                      struct intel_ring_buffer *pipelined)
3331 {
3332         u32 old_read_domains, old_write_domain;
3333         int ret;
3334
3335         if (pipelined != obj->ring) {
3336                 ret = i915_gem_object_sync(obj, pipelined);
3337                 if (ret)
3338                         return ret;
3339         }
3340
3341         /* The display engine is not coherent with the LLC cache on gen6.  As
3342          * a result, we make sure that the pinning that is about to occur is
3343          * done with uncached PTEs. This is lowest common denominator for all
3344          * chipsets.
3345          *
3346          * However for gen6+, we could do better by using the GFDT bit instead
3347          * of uncaching, which would allow us to flush all the LLC-cached data
3348          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3349          */
3350         ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
3351         if (ret)
3352                 return ret;
3353
3354         /* As the user may map the buffer once pinned in the display plane
3355          * (e.g. libkms for the bootup splash), we have to ensure that we
3356          * always use map_and_fenceable for all scanout buffers.
3357          */
3358         ret = i915_gem_object_pin(obj, alignment, true, false);
3359         if (ret)
3360                 return ret;
3361
3362         i915_gem_object_flush_cpu_write_domain(obj);
3363
3364         old_write_domain = obj->base.write_domain;
3365         old_read_domains = obj->base.read_domains;
3366
3367         /* It should now be out of any other write domains, and we can update
3368          * the domain values for our changes.
3369          */
3370         obj->base.write_domain = 0;
3371         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3372
3373         trace_i915_gem_object_change_domain(obj,
3374                                             old_read_domains,
3375                                             old_write_domain);
3376
3377         return 0;
3378 }
3379
3380 int
3381 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3382 {
3383         int ret;
3384
3385         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3386                 return 0;
3387
3388         ret = i915_gem_object_wait_rendering(obj, false);
3389         if (ret)
3390                 return ret;
3391
3392         /* Ensure that we invalidate the GPU's caches and TLBs. */
3393         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3394         return 0;
3395 }
3396
3397 /**
3398  * Moves a single object to the CPU read, and possibly write domain.
3399  *
3400  * This function returns when the move is complete, including waiting on
3401  * flushes to occur.
3402  */
3403 int
3404 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3405 {
3406         uint32_t old_write_domain, old_read_domains;
3407         int ret;
3408
3409         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3410                 return 0;
3411
3412         ret = i915_gem_object_wait_rendering(obj, !write);
3413         if (ret)
3414                 return ret;
3415
3416         i915_gem_object_flush_gtt_write_domain(obj);
3417
3418         old_write_domain = obj->base.write_domain;
3419         old_read_domains = obj->base.read_domains;
3420
3421         /* Flush the CPU cache if it's still invalid. */
3422         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3423                 i915_gem_clflush_object(obj);
3424
3425                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3426         }
3427
3428         /* It should now be out of any other write domains, and we can update
3429          * the domain values for our changes.
3430          */
3431         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3432
3433         /* If we're writing through the CPU, then the GPU read domains will
3434          * need to be invalidated at next use.
3435          */
3436         if (write) {
3437                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3438                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3439         }
3440
3441         trace_i915_gem_object_change_domain(obj,
3442                                             old_read_domains,
3443                                             old_write_domain);
3444
3445         return 0;
3446 }
3447
3448 /* Throttle our rendering by waiting until the ring has completed our requests
3449  * emitted over 20 msec ago.
3450  *
3451  * Note that if we were to use the current jiffies each time around the loop,
3452  * we wouldn't escape the function with any frames outstanding if the time to
3453  * render a frame was over 20ms.
3454  *
3455  * This should get us reasonable parallelism between CPU and GPU but also
3456  * relatively low latency when blocking on a particular request to finish.
3457  */
3458 static int
3459 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3460 {
3461         struct drm_i915_private *dev_priv = dev->dev_private;
3462         struct drm_i915_file_private *file_priv = file->driver_priv;
3463         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3464         struct drm_i915_gem_request *request;
3465         struct intel_ring_buffer *ring = NULL;
3466         unsigned reset_counter;
3467         u32 seqno = 0;
3468         int ret;
3469
3470         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3471         if (ret)
3472                 return ret;
3473
3474         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3475         if (ret)
3476                 return ret;
3477
3478         spin_lock(&file_priv->mm.lock);
3479         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3480                 if (time_after_eq(request->emitted_jiffies, recent_enough))
3481                         break;
3482
3483                 ring = request->ring;
3484                 seqno = request->seqno;
3485         }
3486         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3487         spin_unlock(&file_priv->mm.lock);
3488
3489         if (seqno == 0)
3490                 return 0;
3491
3492         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3493         if (ret == 0)
3494                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3495
3496         return ret;
3497 }
3498
3499 int
3500 i915_gem_object_pin(struct drm_i915_gem_object *obj,
3501                     uint32_t alignment,
3502                     bool map_and_fenceable,
3503                     bool nonblocking)
3504 {
3505         int ret;
3506
3507         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3508                 return -EBUSY;
3509
3510         if (obj->gtt_space != NULL) {
3511                 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
3512                     (map_and_fenceable && !obj->map_and_fenceable)) {
3513                         WARN(obj->pin_count,
3514                              "bo is already pinned with incorrect alignment:"
3515                              " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
3516                              " obj->map_and_fenceable=%d\n",
3517                              obj->gtt_offset, alignment,
3518                              map_and_fenceable,
3519                              obj->map_and_fenceable);
3520                         ret = i915_gem_object_unbind(obj);
3521                         if (ret)
3522                                 return ret;
3523                 }
3524         }
3525
3526         if (obj->gtt_space == NULL) {
3527                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3528
3529                 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3530                                                   map_and_fenceable,
3531                                                   nonblocking);
3532                 if (ret)
3533                         return ret;
3534
3535                 if (!dev_priv->mm.aliasing_ppgtt)
3536                         i915_gem_gtt_bind_object(obj, obj->cache_level);
3537         }
3538
3539         if (!obj->has_global_gtt_mapping && map_and_fenceable)
3540                 i915_gem_gtt_bind_object(obj, obj->cache_level);
3541
3542         obj->pin_count++;
3543         obj->pin_mappable |= map_and_fenceable;
3544
3545         return 0;
3546 }
3547
3548 void
3549 i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3550 {
3551         BUG_ON(obj->pin_count == 0);
3552         BUG_ON(obj->gtt_space == NULL);
3553
3554         if (--obj->pin_count == 0)
3555                 obj->pin_mappable = false;
3556 }
3557
3558 int
3559 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3560                    struct drm_file *file)
3561 {
3562         struct drm_i915_gem_pin *args = data;
3563         struct drm_i915_gem_object *obj;
3564         int ret;
3565
3566         ret = i915_mutex_lock_interruptible(dev);
3567         if (ret)
3568                 return ret;
3569
3570         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3571         if (&obj->base == NULL) {
3572                 ret = -ENOENT;
3573                 goto unlock;
3574         }
3575
3576         if (obj->madv != I915_MADV_WILLNEED) {
3577                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
3578                 ret = -EINVAL;
3579                 goto out;
3580         }
3581
3582         if (obj->pin_filp != NULL && obj->pin_filp != file) {
3583                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3584                           args->handle);
3585                 ret = -EINVAL;
3586                 goto out;
3587         }
3588
3589         if (obj->user_pin_count == 0) {
3590                 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3591                 if (ret)
3592                         goto out;
3593         }
3594
3595         obj->user_pin_count++;
3596         obj->pin_filp = file;
3597
3598         /* XXX - flush the CPU caches for pinned objects
3599          * as the X server doesn't manage domains yet
3600          */
3601         i915_gem_object_flush_cpu_write_domain(obj);
3602         args->offset = obj->gtt_offset;
3603 out:
3604         drm_gem_object_unreference(&obj->base);
3605 unlock:
3606         mutex_unlock(&dev->struct_mutex);
3607         return ret;
3608 }
3609
3610 int
3611 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3612                      struct drm_file *file)
3613 {
3614         struct drm_i915_gem_pin *args = data;
3615         struct drm_i915_gem_object *obj;
3616         int ret;
3617
3618         ret = i915_mutex_lock_interruptible(dev);
3619         if (ret)
3620                 return ret;
3621
3622         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3623         if (&obj->base == NULL) {
3624                 ret = -ENOENT;
3625                 goto unlock;
3626         }
3627
3628         if (obj->pin_filp != file) {
3629                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3630                           args->handle);
3631                 ret = -EINVAL;
3632                 goto out;
3633         }
3634         obj->user_pin_count--;
3635         if (obj->user_pin_count == 0) {
3636                 obj->pin_filp = NULL;
3637                 i915_gem_object_unpin(obj);
3638         }
3639
3640 out:
3641         drm_gem_object_unreference(&obj->base);
3642 unlock:
3643         mutex_unlock(&dev->struct_mutex);
3644         return ret;
3645 }
3646
3647 int
3648 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3649                     struct drm_file *file)
3650 {
3651         struct drm_i915_gem_busy *args = data;
3652         struct drm_i915_gem_object *obj;
3653         int ret;
3654
3655         ret = i915_mutex_lock_interruptible(dev);
3656         if (ret)
3657                 return ret;
3658
3659         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3660         if (&obj->base == NULL) {
3661                 ret = -ENOENT;
3662                 goto unlock;
3663         }
3664
3665         /* Count all active objects as busy, even if they are currently not used
3666          * by the gpu. Users of this interface expect objects to eventually
3667          * become non-busy without any further actions, therefore emit any
3668          * necessary flushes here.
3669          */
3670         ret = i915_gem_object_flush_active(obj);
3671
3672         args->busy = obj->active;
3673         if (obj->ring) {
3674                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
3675                 args->busy |= intel_ring_flag(obj->ring) << 16;
3676         }
3677
3678         drm_gem_object_unreference(&obj->base);
3679 unlock:
3680         mutex_unlock(&dev->struct_mutex);
3681         return ret;
3682 }
3683
3684 int
3685 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3686                         struct drm_file *file_priv)
3687 {
3688         return i915_gem_ring_throttle(dev, file_priv);
3689 }
3690
3691 int
3692 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3693                        struct drm_file *file_priv)
3694 {
3695         struct drm_i915_gem_madvise *args = data;
3696         struct drm_i915_gem_object *obj;
3697         int ret;
3698
3699         switch (args->madv) {
3700         case I915_MADV_DONTNEED:
3701         case I915_MADV_WILLNEED:
3702             break;
3703         default:
3704             return -EINVAL;
3705         }
3706
3707         ret = i915_mutex_lock_interruptible(dev);
3708         if (ret)
3709                 return ret;
3710
3711         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3712         if (&obj->base == NULL) {
3713                 ret = -ENOENT;
3714                 goto unlock;
3715         }
3716
3717         if (obj->pin_count) {
3718                 ret = -EINVAL;
3719                 goto out;
3720         }
3721
3722         if (obj->madv != __I915_MADV_PURGED)
3723                 obj->madv = args->madv;
3724
3725         /* if the object is no longer attached, discard its backing storage */
3726         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3727                 i915_gem_object_truncate(obj);
3728
3729         args->retained = obj->madv != __I915_MADV_PURGED;
3730
3731 out:
3732         drm_gem_object_unreference(&obj->base);
3733 unlock:
3734         mutex_unlock(&dev->struct_mutex);
3735         return ret;
3736 }
3737
3738 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3739                           const struct drm_i915_gem_object_ops *ops)
3740 {
3741         INIT_LIST_HEAD(&obj->mm_list);
3742         INIT_LIST_HEAD(&obj->gtt_list);
3743         INIT_LIST_HEAD(&obj->ring_list);
3744         INIT_LIST_HEAD(&obj->exec_list);
3745
3746         obj->ops = ops;
3747
3748         obj->fence_reg = I915_FENCE_REG_NONE;
3749         obj->madv = I915_MADV_WILLNEED;
3750         /* Avoid an unnecessary call to unbind on the first bind. */
3751         obj->map_and_fenceable = true;
3752
3753         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3754 }
3755
3756 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3757         .get_pages = i915_gem_object_get_pages_gtt,
3758         .put_pages = i915_gem_object_put_pages_gtt,
3759 };
3760
3761 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3762                                                   size_t size)
3763 {
3764         struct drm_i915_gem_object *obj;
3765         struct address_space *mapping;
3766         gfp_t mask;
3767
3768         obj = i915_gem_object_alloc(dev);
3769         if (obj == NULL)
3770                 return NULL;
3771
3772         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3773                 i915_gem_object_free(obj);
3774                 return NULL;
3775         }
3776
3777         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3778         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3779                 /* 965gm cannot relocate objects above 4GiB. */
3780                 mask &= ~__GFP_HIGHMEM;
3781                 mask |= __GFP_DMA32;
3782         }
3783
3784         mapping = file_inode(obj->base.filp)->i_mapping;
3785         mapping_set_gfp_mask(mapping, mask);
3786
3787         i915_gem_object_init(obj, &i915_gem_object_ops);
3788
3789         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3790         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3791
3792         if (HAS_LLC(dev)) {
3793                 /* On some devices, we can have the GPU use the LLC (the CPU
3794                  * cache) for about a 10% performance improvement
3795                  * compared to uncached.  Graphics requests other than
3796                  * display scanout are coherent with the CPU in
3797                  * accessing this cache.  This means in this mode we
3798                  * don't need to clflush on the CPU side, and on the
3799                  * GPU side we only need to flush internal caches to
3800                  * get data visible to the CPU.
3801                  *
3802                  * However, we maintain the display planes as UC, and so
3803                  * need to rebind when first used as such.
3804                  */
3805                 obj->cache_level = I915_CACHE_LLC;
3806         } else
3807                 obj->cache_level = I915_CACHE_NONE;
3808
3809         return obj;
3810 }
3811
3812 int i915_gem_init_object(struct drm_gem_object *obj)
3813 {
3814         BUG();
3815
3816         return 0;
3817 }
3818
3819 void i915_gem_free_object(struct drm_gem_object *gem_obj)
3820 {
3821         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
3822         struct drm_device *dev = obj->base.dev;
3823         drm_i915_private_t *dev_priv = dev->dev_private;
3824
3825         trace_i915_gem_object_destroy(obj);
3826
3827         if (obj->phys_obj)
3828                 i915_gem_detach_phys_object(dev, obj);
3829
3830         obj->pin_count = 0;
3831         if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
3832                 bool was_interruptible;
3833
3834                 was_interruptible = dev_priv->mm.interruptible;
3835                 dev_priv->mm.interruptible = false;
3836
3837                 WARN_ON(i915_gem_object_unbind(obj));
3838
3839                 dev_priv->mm.interruptible = was_interruptible;
3840         }
3841
3842         obj->pages_pin_count = 0;
3843         i915_gem_object_put_pages(obj);
3844         i915_gem_object_free_mmap_offset(obj);
3845         i915_gem_object_release_stolen(obj);
3846
3847         BUG_ON(obj->pages);
3848
3849         if (obj->base.import_attach)
3850                 drm_prime_gem_destroy(&obj->base, NULL);
3851
3852         drm_gem_object_release(&obj->base);
3853         i915_gem_info_remove_obj(dev_priv, obj->base.size);
3854
3855         kfree(obj->bit_17);
3856         i915_gem_object_free(obj);
3857 }
3858
3859 int
3860 i915_gem_idle(struct drm_device *dev)
3861 {
3862         drm_i915_private_t *dev_priv = dev->dev_private;
3863         int ret;
3864
3865         mutex_lock(&dev->struct_mutex);
3866
3867         if (dev_priv->mm.suspended) {
3868                 mutex_unlock(&dev->struct_mutex);
3869                 return 0;
3870         }
3871
3872         ret = i915_gpu_idle(dev);
3873         if (ret) {
3874                 mutex_unlock(&dev->struct_mutex);
3875                 return ret;
3876         }
3877         i915_gem_retire_requests(dev);
3878
3879         /* Under UMS, be paranoid and evict. */
3880         if (!drm_core_check_feature(dev, DRIVER_MODESET))
3881                 i915_gem_evict_everything(dev);
3882
3883         i915_gem_reset_fences(dev);
3884
3885         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
3886          * We need to replace this with a semaphore, or something.
3887          * And not confound mm.suspended!
3888          */
3889         dev_priv->mm.suspended = 1;
3890         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
3891
3892         i915_kernel_lost_context(dev);
3893         i915_gem_cleanup_ringbuffer(dev);
3894
3895         mutex_unlock(&dev->struct_mutex);
3896
3897         /* Cancel the retire work handler, which should be idle now. */
3898         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3899
3900         return 0;
3901 }
3902
3903 void i915_gem_l3_remap(struct drm_device *dev)
3904 {
3905         drm_i915_private_t *dev_priv = dev->dev_private;
3906         u32 misccpctl;
3907         int i;
3908
3909         if (!HAS_L3_GPU_CACHE(dev))
3910                 return;
3911
3912         if (!dev_priv->l3_parity.remap_info)
3913                 return;
3914
3915         misccpctl = I915_READ(GEN7_MISCCPCTL);
3916         I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
3917         POSTING_READ(GEN7_MISCCPCTL);
3918
3919         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3920                 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3921                 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3922                         DRM_DEBUG("0x%x was already programmed to %x\n",
3923                                   GEN7_L3LOG_BASE + i, remap);
3924                 if (remap && !dev_priv->l3_parity.remap_info[i/4])
3925                         DRM_DEBUG_DRIVER("Clearing remapped register\n");
3926                 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3927         }
3928
3929         /* Make sure all the writes land before disabling dop clock gating */
3930         POSTING_READ(GEN7_L3LOG_BASE);
3931
3932         I915_WRITE(GEN7_MISCCPCTL, misccpctl);
3933 }
3934
3935 void i915_gem_init_swizzling(struct drm_device *dev)
3936 {
3937         drm_i915_private_t *dev_priv = dev->dev_private;
3938
3939         if (INTEL_INFO(dev)->gen < 5 ||
3940             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
3941                 return;
3942
3943         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
3944                                  DISP_TILE_SURFACE_SWIZZLING);
3945
3946         if (IS_GEN5(dev))
3947                 return;
3948
3949         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3950         if (IS_GEN6(dev))
3951                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3952         else if (IS_GEN7(dev))
3953                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3954         else
3955                 BUG();
3956 }
3957
3958 static bool
3959 intel_enable_blt(struct drm_device *dev)
3960 {
3961         if (!HAS_BLT(dev))
3962                 return false;
3963
3964         /* The blitter was dysfunctional on early prototypes */
3965         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
3966                 DRM_INFO("BLT not supported on this pre-production hardware;"
3967                          " graphics performance will be degraded.\n");
3968                 return false;
3969         }
3970
3971         return true;
3972 }
3973
3974 static int i915_gem_init_rings(struct drm_device *dev)
3975 {
3976         struct drm_i915_private *dev_priv = dev->dev_private;
3977         int ret;
3978
3979         ret = intel_init_render_ring_buffer(dev);
3980         if (ret)
3981                 return ret;
3982
3983         if (HAS_BSD(dev)) {
3984                 ret = intel_init_bsd_ring_buffer(dev);
3985                 if (ret)
3986                         goto cleanup_render_ring;
3987         }
3988
3989         if (intel_enable_blt(dev)) {
3990                 ret = intel_init_blt_ring_buffer(dev);
3991                 if (ret)
3992                         goto cleanup_bsd_ring;
3993         }
3994
3995         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
3996         if (ret)
3997                 goto cleanup_blt_ring;
3998
3999         return 0;
4000
4001 cleanup_blt_ring:
4002         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4003 cleanup_bsd_ring:
4004         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4005 cleanup_render_ring:
4006         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4007
4008         return ret;
4009 }
4010
4011 int
4012 i915_gem_init_hw(struct drm_device *dev)
4013 {
4014         drm_i915_private_t *dev_priv = dev->dev_private;
4015         int ret;
4016
4017         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4018                 return -EIO;
4019
4020         if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
4021                 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
4022
4023         if (HAS_PCH_NOP(dev)) {
4024                 u32 temp = I915_READ(GEN7_MSG_CTL);
4025                 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4026                 I915_WRITE(GEN7_MSG_CTL, temp);
4027         }
4028
4029         i915_gem_l3_remap(dev);
4030
4031         i915_gem_init_swizzling(dev);
4032
4033         ret = i915_gem_init_rings(dev);
4034         if (ret)
4035                 return ret;
4036
4037         /*
4038          * XXX: There was some w/a described somewhere suggesting loading
4039          * contexts before PPGTT.
4040          */
4041         i915_gem_context_init(dev);
4042         if (dev_priv->mm.aliasing_ppgtt) {
4043                 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4044                 if (ret) {
4045                         i915_gem_cleanup_aliasing_ppgtt(dev);
4046                         DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4047                 }
4048         }
4049
4050         return 0;
4051 }
4052
4053 int i915_gem_init(struct drm_device *dev)
4054 {
4055         struct drm_i915_private *dev_priv = dev->dev_private;
4056         int ret;
4057
4058         mutex_lock(&dev->struct_mutex);
4059
4060         if (IS_VALLEYVIEW(dev)) {
4061                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4062                 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4063                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4064                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4065         }
4066
4067         i915_gem_init_global_gtt(dev);
4068
4069         ret = i915_gem_init_hw(dev);
4070         mutex_unlock(&dev->struct_mutex);
4071         if (ret) {
4072                 i915_gem_cleanup_aliasing_ppgtt(dev);
4073                 return ret;
4074         }
4075
4076         /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4077         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4078                 dev_priv->dri1.allow_batchbuffer = 1;
4079         return 0;
4080 }
4081
4082 void
4083 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4084 {
4085         drm_i915_private_t *dev_priv = dev->dev_private;
4086         struct intel_ring_buffer *ring;
4087         int i;
4088
4089         for_each_ring(ring, dev_priv, i)
4090                 intel_cleanup_ring_buffer(ring);
4091 }
4092
4093 int
4094 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4095                        struct drm_file *file_priv)
4096 {
4097         drm_i915_private_t *dev_priv = dev->dev_private;
4098         int ret;
4099
4100         if (drm_core_check_feature(dev, DRIVER_MODESET))
4101                 return 0;
4102
4103         if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4104                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4105                 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4106         }
4107
4108         mutex_lock(&dev->struct_mutex);
4109         dev_priv->mm.suspended = 0;
4110
4111         ret = i915_gem_init_hw(dev);
4112         if (ret != 0) {
4113                 mutex_unlock(&dev->struct_mutex);
4114                 return ret;
4115         }
4116
4117         BUG_ON(!list_empty(&dev_priv->mm.active_list));
4118         mutex_unlock(&dev->struct_mutex);
4119
4120         ret = drm_irq_install(dev);
4121         if (ret)
4122                 goto cleanup_ringbuffer;
4123
4124         return 0;
4125
4126 cleanup_ringbuffer:
4127         mutex_lock(&dev->struct_mutex);
4128         i915_gem_cleanup_ringbuffer(dev);
4129         dev_priv->mm.suspended = 1;
4130         mutex_unlock(&dev->struct_mutex);
4131
4132         return ret;
4133 }
4134
4135 int
4136 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4137                        struct drm_file *file_priv)
4138 {
4139         if (drm_core_check_feature(dev, DRIVER_MODESET))
4140                 return 0;
4141
4142         drm_irq_uninstall(dev);
4143         return i915_gem_idle(dev);
4144 }
4145
4146 void
4147 i915_gem_lastclose(struct drm_device *dev)
4148 {
4149         int ret;
4150
4151         if (drm_core_check_feature(dev, DRIVER_MODESET))
4152                 return;
4153
4154         ret = i915_gem_idle(dev);
4155         if (ret)
4156                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4157 }
4158
4159 static void
4160 init_ring_lists(struct intel_ring_buffer *ring)
4161 {
4162         INIT_LIST_HEAD(&ring->active_list);
4163         INIT_LIST_HEAD(&ring->request_list);
4164 }
4165
4166 void
4167 i915_gem_load(struct drm_device *dev)
4168 {
4169         drm_i915_private_t *dev_priv = dev->dev_private;
4170         int i;
4171
4172         dev_priv->slab =
4173                 kmem_cache_create("i915_gem_object",
4174                                   sizeof(struct drm_i915_gem_object), 0,
4175                                   SLAB_HWCACHE_ALIGN,
4176                                   NULL);
4177
4178         INIT_LIST_HEAD(&dev_priv->mm.active_list);
4179         INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
4180         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4181         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4182         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4183         for (i = 0; i < I915_NUM_RINGS; i++)
4184                 init_ring_lists(&dev_priv->ring[i]);
4185         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4186                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4187         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4188                           i915_gem_retire_work_handler);
4189         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4190
4191         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4192         if (IS_GEN3(dev)) {
4193                 I915_WRITE(MI_ARB_STATE,
4194                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
4195         }
4196
4197         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4198
4199         /* Old X drivers will take 0-2 for front, back, depth buffers */
4200         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4201                 dev_priv->fence_reg_start = 3;
4202
4203         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4204                 dev_priv->num_fence_regs = 32;
4205         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4206                 dev_priv->num_fence_regs = 16;
4207         else
4208                 dev_priv->num_fence_regs = 8;
4209
4210         /* Initialize fence registers to zero */
4211         i915_gem_reset_fences(dev);
4212
4213         i915_gem_detect_bit_6_swizzle(dev);
4214         init_waitqueue_head(&dev_priv->pending_flip_queue);
4215
4216         dev_priv->mm.interruptible = true;
4217
4218         dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
4219         dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
4220         register_shrinker(&dev_priv->mm.inactive_shrinker);
4221 }
4222
4223 /*
4224  * Create a physically contiguous memory object for this object
4225  * e.g. for cursor + overlay regs
4226  */
4227 static int i915_gem_init_phys_object(struct drm_device *dev,
4228                                      int id, int size, int align)
4229 {
4230         drm_i915_private_t *dev_priv = dev->dev_private;
4231         struct drm_i915_gem_phys_object *phys_obj;
4232         int ret;
4233
4234         if (dev_priv->mm.phys_objs[id - 1] || !size)
4235                 return 0;
4236
4237         phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4238         if (!phys_obj)
4239                 return -ENOMEM;
4240
4241         phys_obj->id = id;
4242
4243         phys_obj->handle = drm_pci_alloc(dev, size, align);
4244         if (!phys_obj->handle) {
4245                 ret = -ENOMEM;
4246                 goto kfree_obj;
4247         }
4248 #ifdef CONFIG_X86
4249         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4250 #endif
4251
4252         dev_priv->mm.phys_objs[id - 1] = phys_obj;
4253
4254         return 0;
4255 kfree_obj:
4256         kfree(phys_obj);
4257         return ret;
4258 }
4259
4260 static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4261 {
4262         drm_i915_private_t *dev_priv = dev->dev_private;
4263         struct drm_i915_gem_phys_object *phys_obj;
4264
4265         if (!dev_priv->mm.phys_objs[id - 1])
4266                 return;
4267
4268         phys_obj = dev_priv->mm.phys_objs[id - 1];
4269         if (phys_obj->cur_obj) {
4270                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4271         }
4272
4273 #ifdef CONFIG_X86
4274         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4275 #endif
4276         drm_pci_free(dev, phys_obj->handle);
4277         kfree(phys_obj);
4278         dev_priv->mm.phys_objs[id - 1] = NULL;
4279 }
4280
4281 void i915_gem_free_all_phys_object(struct drm_device *dev)
4282 {
4283         int i;
4284
4285         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4286                 i915_gem_free_phys_object(dev, i);
4287 }
4288
4289 void i915_gem_detach_phys_object(struct drm_device *dev,
4290                                  struct drm_i915_gem_object *obj)
4291 {
4292         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4293         char *vaddr;
4294         int i;
4295         int page_count;
4296
4297         if (!obj->phys_obj)
4298                 return;
4299         vaddr = obj->phys_obj->handle->vaddr;
4300
4301         page_count = obj->base.size / PAGE_SIZE;
4302         for (i = 0; i < page_count; i++) {
4303                 struct page *page = shmem_read_mapping_page(mapping, i);
4304                 if (!IS_ERR(page)) {
4305                         char *dst = kmap_atomic(page);
4306                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4307                         kunmap_atomic(dst);
4308
4309                         drm_clflush_pages(&page, 1);
4310
4311                         set_page_dirty(page);
4312                         mark_page_accessed(page);
4313                         page_cache_release(page);
4314                 }
4315         }
4316         i915_gem_chipset_flush(dev);
4317
4318         obj->phys_obj->cur_obj = NULL;
4319         obj->phys_obj = NULL;
4320 }
4321
4322 int
4323 i915_gem_attach_phys_object(struct drm_device *dev,
4324                             struct drm_i915_gem_object *obj,
4325                             int id,
4326                             int align)
4327 {
4328         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4329         drm_i915_private_t *dev_priv = dev->dev_private;
4330         int ret = 0;
4331         int page_count;
4332         int i;
4333
4334         if (id > I915_MAX_PHYS_OBJECT)
4335                 return -EINVAL;
4336
4337         if (obj->phys_obj) {
4338                 if (obj->phys_obj->id == id)
4339                         return 0;
4340                 i915_gem_detach_phys_object(dev, obj);
4341         }
4342
4343         /* create a new object */
4344         if (!dev_priv->mm.phys_objs[id - 1]) {
4345                 ret = i915_gem_init_phys_object(dev, id,
4346                                                 obj->base.size, align);
4347                 if (ret) {
4348                         DRM_ERROR("failed to init phys object %d size: %zu\n",
4349                                   id, obj->base.size);
4350                         return ret;
4351                 }
4352         }
4353
4354         /* bind to the object */
4355         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4356         obj->phys_obj->cur_obj = obj;
4357
4358         page_count = obj->base.size / PAGE_SIZE;
4359
4360         for (i = 0; i < page_count; i++) {
4361                 struct page *page;
4362                 char *dst, *src;
4363
4364                 page = shmem_read_mapping_page(mapping, i);
4365                 if (IS_ERR(page))
4366                         return PTR_ERR(page);
4367
4368                 src = kmap_atomic(page);
4369                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4370                 memcpy(dst, src, PAGE_SIZE);
4371                 kunmap_atomic(src);
4372
4373                 mark_page_accessed(page);
4374                 page_cache_release(page);
4375         }
4376
4377         return 0;
4378 }
4379
4380 static int
4381 i915_gem_phys_pwrite(struct drm_device *dev,
4382                      struct drm_i915_gem_object *obj,
4383                      struct drm_i915_gem_pwrite *args,
4384                      struct drm_file *file_priv)
4385 {
4386         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4387         char __user *user_data = to_user_ptr(args->data_ptr);
4388
4389         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4390                 unsigned long unwritten;
4391
4392                 /* The physical object once assigned is fixed for the lifetime
4393                  * of the obj, so we can safely drop the lock and continue
4394                  * to access vaddr.
4395                  */
4396                 mutex_unlock(&dev->struct_mutex);
4397                 unwritten = copy_from_user(vaddr, user_data, args->size);
4398                 mutex_lock(&dev->struct_mutex);
4399                 if (unwritten)
4400                         return -EFAULT;
4401         }
4402
4403         i915_gem_chipset_flush(dev);
4404         return 0;
4405 }
4406
4407 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4408 {
4409         struct drm_i915_file_private *file_priv = file->driver_priv;
4410
4411         /* Clean up our request list when the client is going away, so that
4412          * later retire_requests won't dereference our soon-to-be-gone
4413          * file_priv.
4414          */
4415         spin_lock(&file_priv->mm.lock);
4416         while (!list_empty(&file_priv->mm.request_list)) {
4417                 struct drm_i915_gem_request *request;
4418
4419                 request = list_first_entry(&file_priv->mm.request_list,
4420                                            struct drm_i915_gem_request,
4421                                            client_list);
4422                 list_del(&request->client_list);
4423                 request->file_priv = NULL;
4424         }
4425         spin_unlock(&file_priv->mm.lock);
4426 }
4427
4428 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4429 {
4430         if (!mutex_is_locked(mutex))
4431                 return false;
4432
4433 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4434         return mutex->owner == task;
4435 #else
4436         /* Since UP may be pre-empted, we cannot assume that we own the lock */
4437         return false;
4438 #endif
4439 }
4440
4441 static int
4442 i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4443 {
4444         struct drm_i915_private *dev_priv =
4445                 container_of(shrinker,
4446                              struct drm_i915_private,
4447                              mm.inactive_shrinker);
4448         struct drm_device *dev = dev_priv->dev;
4449         struct drm_i915_gem_object *obj;
4450         int nr_to_scan = sc->nr_to_scan;
4451         bool unlock = true;
4452         int cnt;
4453
4454         if (!mutex_trylock(&dev->struct_mutex)) {
4455                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4456                         return 0;
4457
4458                 if (dev_priv->mm.shrinker_no_lock_stealing)
4459                         return 0;
4460
4461                 unlock = false;
4462         }
4463
4464         if (nr_to_scan) {
4465                 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4466                 if (nr_to_scan > 0)
4467                         nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4468                                                         false);
4469                 if (nr_to_scan > 0)
4470                         i915_gem_shrink_all(dev_priv);
4471         }
4472
4473         cnt = 0;
4474         list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
4475                 if (obj->pages_pin_count == 0)
4476                         cnt += obj->base.size >> PAGE_SHIFT;
4477         list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
4478                 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4479                         cnt += obj->base.size >> PAGE_SHIFT;
4480
4481         if (unlock)
4482                 mutex_unlock(&dev->struct_mutex);
4483         return cnt;
4484 }