]> rtime.felk.cvut.cz Git - lisovros/linux_canprio.git/commitdiff
Merge branch 'agp-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Sep 2009 16:18:07 +0000 (09:18 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Sep 2009 16:18:07 +0000 (09:18 -0700)
* 'agp-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/agp-2.6:
  agp/intel: remove restore in resume
  agp: fix uninorth build
  intel-agp: Set dma mask for i915
  agp: kill phys_to_gart() and gart_to_phys()
  intel-agp: fix sglist allocation to avoid vmalloc()
  intel-agp: Move repeated sglist free into separate function
  agp: Switch agp_{un,}map_page() to take struct page * argument
  agp: tidy up handling of scratch pages w.r.t. DMA API
  intel_agp: Use PCI DMA API correctly on chipsets new enough to have IOMMU
  agp: Add generic support for graphics dma remapping
  agp: Switch mask_memory() method to take address argument again, not page

1  2 
drivers/char/agp/intel-agp.c

index c58557790585dfe6a0d56dba70fabb3764a3e2c6,5eeaeeeaa2cc7b4b33c733d46a6852dd1552a56e..1540e693d91ebf3f886ad650eb067e6a297b4e3e
  #include <linux/agp_backend.h>
  #include "agp.h"
  
+ /*
+  * If we have Intel graphics, we're not going to have anything other than
+  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
+  * on the Intel IOMMU support (CONFIG_DMAR).
+  * Only newer chipsets need to bother with this, of course.
+  */
+ #ifdef CONFIG_DMAR
+ #define USE_PCI_DMA_API 1
+ #endif
  #define PCI_DEVICE_ID_INTEL_E7221_HB  0x2588
  #define PCI_DEVICE_ID_INTEL_E7221_IG  0x258a
  #define PCI_DEVICE_ID_INTEL_82946GZ_HB      0x2970
@@@ -49,7 -59,6 +59,7 @@@
  #define PCI_DEVICE_ID_INTEL_IGDNG_D_HB            0x0040
  #define PCI_DEVICE_ID_INTEL_IGDNG_D_IG            0x0042
  #define PCI_DEVICE_ID_INTEL_IGDNG_M_HB            0x0044
 +#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB           0x0062
  #define PCI_DEVICE_ID_INTEL_IGDNG_M_IG            0x0046
  
  /* cover 915 and 945 variants */
@@@ -82,8 -91,7 +92,8 @@@
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
 -              agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB)
 +              agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
 +              agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
  
  extern int agp_memory_reserved;
  
@@@ -172,6 -180,123 +182,123 @@@ static struct _intel_private 
        int resource_valid;
  } intel_private;
  
+ #ifdef USE_PCI_DMA_API
+ static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
+ {
+       *ret = pci_map_page(intel_private.pcidev, page, 0,
+                           PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       if (pci_dma_mapping_error(intel_private.pcidev, *ret))
+               return -EINVAL;
+       return 0;
+ }
+ static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
+ {
+       pci_unmap_page(intel_private.pcidev, dma,
+                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ static void intel_agp_free_sglist(struct agp_memory *mem)
+ {
+       struct sg_table st;
+       st.sgl = mem->sg_list;
+       st.orig_nents = st.nents = mem->page_count;
+       sg_free_table(&st);
+       mem->sg_list = NULL;
+       mem->num_sg = 0;
+ }
+ static int intel_agp_map_memory(struct agp_memory *mem)
+ {
+       struct sg_table st;
+       struct scatterlist *sg;
+       int i;
+       DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+       if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+               return -ENOMEM;
+       mem->sg_list = sg = st.sgl;
+       for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
+               sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+       mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
+                                mem->page_count, PCI_DMA_BIDIRECTIONAL);
+       if (unlikely(!mem->num_sg)) {
+               intel_agp_free_sglist(mem);
+               return -ENOMEM;
+       }
+       return 0;
+ }
+ static void intel_agp_unmap_memory(struct agp_memory *mem)
+ {
+       DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
+       pci_unmap_sg(intel_private.pcidev, mem->sg_list,
+                    mem->page_count, PCI_DMA_BIDIRECTIONAL);
+       intel_agp_free_sglist(mem);
+ }
+ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+                                       off_t pg_start, int mask_type)
+ {
+       struct scatterlist *sg;
+       int i, j;
+       j = pg_start;
+       WARN_ON(!mem->num_sg);
+       if (mem->num_sg == mem->page_count) {
+               for_each_sg(mem->sg_list, sg, mem->page_count, i) {
+                       writel(agp_bridge->driver->mask_memory(agp_bridge,
+                                       sg_dma_address(sg), mask_type),
+                                       intel_private.gtt+j);
+                       j++;
+               }
+       } else {
+               /* sg may merge pages, but we have to seperate
+                * per-page addr for GTT */
+               unsigned int len, m;
+               for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
+                       len = sg_dma_len(sg) / PAGE_SIZE;
+                       for (m = 0; m < len; m++) {
+                               writel(agp_bridge->driver->mask_memory(agp_bridge,
+                                                                      sg_dma_address(sg) + m * PAGE_SIZE,
+                                                                      mask_type),
+                                      intel_private.gtt+j);
+                               j++;
+                       }
+               }
+       }
+       readl(intel_private.gtt+j-1);
+ }
+ #else
+ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
+                                       off_t pg_start, int mask_type)
+ {
+       int i, j;
+       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+               writel(agp_bridge->driver->mask_memory(agp_bridge,
+                               page_to_phys(mem->pages[i]), mask_type),
+                      intel_private.gtt+j);
+       }
+       readl(intel_private.gtt+j-1);
+ }
+ #endif
  static int intel_i810_fetch_size(void)
  {
        u32 smram_miscc;
@@@ -345,8 -470,7 +472,7 @@@ static int intel_i810_insert_entries(st
                        global_cache_flush();
                for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
                        writel(agp_bridge->driver->mask_memory(agp_bridge,
-                                                              mem->pages[i],
-                                                              mask_type),
+                                       page_to_phys(mem->pages[i]), mask_type),
                               intel_private.registers+I810_PTE_BASE+(j*4));
                }
                readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
@@@ -463,9 -587,8 +589,8 @@@ static void intel_i810_free_by_type(str
  }
  
  static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
-                                           struct page *page, int type)
+                                           dma_addr_t addr, int type)
  {
-       unsigned long addr = phys_to_gart(page_to_phys(page));
        /* Type checking must be done elsewhere */
        return addr | bridge->driver->masks[type].mask;
  }
@@@ -853,7 -976,7 +978,7 @@@ static int intel_i830_insert_entries(st
  
        for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
                writel(agp_bridge->driver->mask_memory(agp_bridge,
-                                                      mem->pages[i], mask_type),
+                               page_to_phys(mem->pages[i]), mask_type),
                       intel_private.registers+I810_PTE_BASE+(j*4));
        }
        readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
@@@ -1017,6 -1140,12 +1142,12 @@@ static int intel_i915_configure(void
  
        intel_i9xx_setup_flush();
  
+ #ifdef USE_PCI_DMA_API 
+       if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+               dev_err(&intel_private.pcidev->dev,
+                       "set gfx device dma mask 36bit failed!\n");
+ #endif
        return 0;
  }
  
@@@ -1041,7 -1170,7 +1172,7 @@@ static void intel_i915_chipset_flush(st
  static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
                                     int type)
  {
-       int i, j, num_entries;
+       int num_entries;
        void *temp;
        int ret = -EINVAL;
        int mask_type;
        if ((pg_start + mem->page_count) > num_entries)
                goto out_err;
  
-       /* The i915 can't check the GTT for entries since its read only,
+       /* The i915 can't check the GTT for entries since it's read only;
         * depend on the caller to make the correct offset decisions.
         */
  
        if (!mem->is_flushed)
                global_cache_flush();
  
-       for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-               writel(agp_bridge->driver->mask_memory(agp_bridge,
-                                                      mem->pages[i], mask_type), intel_private.gtt+j);
-       }
-       readl(intel_private.gtt+j-1);
+       intel_agp_insert_sg_entries(mem, pg_start, mask_type);
        agp_bridge->driver->tlb_flush(mem);
  
   out:
@@@ -1198,9 -1322,8 +1324,8 @@@ static int intel_i915_create_gatt_table
   * this conditional.
   */
  static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
-                                           struct page *page, int type)
+                                           dma_addr_t addr, int type)
  {
-       dma_addr_t addr = phys_to_gart(page_to_phys(page));
        /* Shift high bits down */
        addr |= (addr >> 28) & 0xf0;
  
@@@ -1218,7 -1341,6 +1343,7 @@@ static void intel_i965_get_gtt_range(in
        case PCI_DEVICE_ID_INTEL_G41_HB:
        case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
        case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
 +      case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
                *gtt_offset = *gtt_size = MB(2);
                break;
        default:
@@@ -2006,6 -2128,12 +2131,12 @@@ static const struct agp_bridge_driver i
        .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
        .chipset_flush          = intel_i915_chipset_flush,
+ #ifdef USE_PCI_DMA_API
+       .agp_map_page           = intel_agp_map_page,
+       .agp_unmap_page         = intel_agp_unmap_page,
+       .agp_map_memory         = intel_agp_map_memory,
+       .agp_unmap_memory       = intel_agp_unmap_memory,
+ #endif
  };
  
  static const struct agp_bridge_driver intel_i965_driver = {
        .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
        .chipset_flush          = intel_i915_chipset_flush,
+ #ifdef USE_PCI_DMA_API
+       .agp_map_page           = intel_agp_map_page,
+       .agp_unmap_page         = intel_agp_unmap_page,
+       .agp_map_memory         = intel_agp_map_memory,
+       .agp_unmap_memory       = intel_agp_unmap_memory,
+ #endif
  };
  
  static const struct agp_bridge_driver intel_7505_driver = {
@@@ -2088,6 -2222,12 +2225,12 @@@ static const struct agp_bridge_driver i
        .agp_destroy_pages      = agp_generic_destroy_pages,
        .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
        .chipset_flush          = intel_i915_chipset_flush,
+ #ifdef USE_PCI_DMA_API
+       .agp_map_page           = intel_agp_map_page,
+       .agp_unmap_page         = intel_agp_unmap_page,
+       .agp_map_memory         = intel_agp_map_memory,
+       .agp_unmap_memory       = intel_agp_unmap_memory,
+ #endif
  };
  
  static int find_gmch(u16 device)
@@@ -2198,8 -2338,6 +2341,8 @@@ static const struct intel_driver_descri
            "IGDNG/D", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
            "IGDNG/M", NULL, &intel_i965_driver },
 +      { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
 +          "IGDNG/MA", NULL, &intel_i965_driver },
        { 0, 0, 0, NULL, NULL, NULL }
  };
  
@@@ -2313,15 -2451,6 +2456,6 @@@ static int agp_intel_resume(struct pci_
        struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
        int ret_val;
  
-       pci_restore_state(pdev);
-       /* We should restore our graphics device's config space,
-        * as host bridge (00:00) resumes before graphics device (02:00),
-        * then our access to its pci space can work right.
-        */
-       if (intel_private.pcidev)
-               pci_restore_state(intel_private.pcidev);
        if (bridge->driver == &intel_generic_driver)
                intel_configure();
        else if (bridge->driver == &intel_850_driver)
@@@ -2403,7 -2532,6 +2537,7 @@@ static struct pci_device_id agp_intel_p
        ID(PCI_DEVICE_ID_INTEL_G41_HB),
        ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
        ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
 +      ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
        { }
  };