diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile index 627f542..8eb56e2 100644 --- a/drivers/char/agp/Makefile +++ b/drivers/char/agp/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o obj-$(CONFIG_AGP_PARISC) += parisc-agp.o obj-$(CONFIG_AGP_I460) += i460-agp.o obj-$(CONFIG_AGP_INTEL) += intel-agp.o +obj-$(CONFIG_AGP_INTEL) += intel-gtt.o obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o obj-$(CONFIG_AGP_SGI_TIOCA) += sgi-agp.o obj-$(CONFIG_AGP_SIS) += sis-agp.o diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 710af89..cee7d6b 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -13,8 +13,6 @@ #include "agp.h" #include "intel-agp.h" -#include "intel-gtt.c" - int intel_agp_enabled; EXPORT_SYMBOL(intel_agp_enabled); @@ -702,169 +700,37 @@ static const struct agp_bridge_driver intel_7505_driver = { .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; -static int find_gmch(u16 device) -{ - struct pci_dev *gmch_device; - - gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); - if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { - gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, - device, gmch_device); - } - - if (!gmch_device) - return 0; - - intel_private.pcidev = gmch_device; - return 1; -} - /* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of * driver and gmch_driver must be non-null, and find_gmch will determine * which one should be used if a gmch_chip_id is present. */ -static const struct intel_driver_description { +static const struct intel_agp_driver_description { unsigned int chip_id; - unsigned int gmch_chip_id; char *name; const struct agp_bridge_driver *driver; - const struct agp_bridge_driver *gmch_driver; } intel_agp_chipsets[] = { - { PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810", - NULL, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810", - NULL, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810", - NULL, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815", - &intel_815_driver, &intel_810_driver }, - { PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M", - &intel_830mp_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL }, - { PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865", - &intel_845_driver, &intel_830_driver }, - { PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL }, - { PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", - NULL, &intel_915_driver }, - { PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", - NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL }, - { PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL }, - { PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", - NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, - "GM45", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, - "Eaglelake", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, - "Q45/Q43", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, - "G45/G43", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, - "B43", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, - "G41", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, - "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG, - "Sandybridge", NULL, &intel_gen6_driver }, - { 0, 0, NULL, NULL, NULL } + { PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver }, + { PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver }, + { PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver }, + { PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver }, + { PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver }, + { PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver }, + { PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver }, + { PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver }, + { PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver }, + { PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver }, + { PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver }, + { 0, NULL, NULL } }; -static int __devinit intel_gmch_probe(struct pci_dev *pdev, - struct agp_bridge_data *bridge) -{ - int i, mask; - - bridge->driver = NULL; - - for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { - if ((intel_agp_chipsets[i].gmch_chip_id != 0) && - find_gmch(intel_agp_chipsets[i].gmch_chip_id)) { - bridge->driver = - intel_agp_chipsets[i].gmch_driver; - break; - } - } - - if (!bridge->driver) - return 0; - - bridge->dev_private_data = &intel_private; - bridge->dev = pdev; - - dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); - - if (bridge->driver->mask_memory == intel_gen6_mask_memory) - mask = 40; - else if (bridge->driver->mask_memory == intel_i965_mask_memory) - mask = 36; - else - mask = 32; - - if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) - dev_err(&intel_private.pcidev->dev, - "set gfx device dma mask %d-bit failed!\n", mask); - else - pci_set_consistent_dma_mask(intel_private.pcidev, - DMA_BIT_MASK(mask)); - - return 1; -} - static int __devinit agp_intel_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -894,7 +760,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, } } - if (intel_agp_chipsets[i].name == NULL) { + if (!bridge->driver) { if (cap_ptr) dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n", pdev->vendor, pdev->device); @@ -902,14 +768,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, return -ENODEV; } - if (!bridge->driver) { - if (cap_ptr) - dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n", - intel_agp_chipsets[i].gmch_chip_id); - agp_put_bridge(bridge); - return -ENODEV; - } - bridge->dev = pdev; bridge->dev_private_data = NULL; @@ -961,8 +819,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev) agp_remove_bridge(bridge); - if (intel_private.pcidev) - pci_dev_put(intel_private.pcidev); + intel_gmch_remove(pdev); agp_put_bridge(bridge); } diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 08d4753..89e1ecf 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -244,3 +244,7 @@ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ IS_SNB) + +int intel_gmch_probe(struct pci_dev *pdev, + struct agp_bridge_data *bridge); +void intel_gmch_remove(struct pci_dev *pdev); diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index d22ffb8..e4c4448 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -15,6 +15,18 @@ * /fairy-tale-mode off */ +#include +#include +#include +#include +#include +#include +#include +#include +#include "agp.h" +#include "intel-agp.h" +#include + /* * If we have Intel graphics, we're not going to have anything other than * an Intel IOMMU. So make the correct use of the PCI DMA API contingent @@ -31,9 +43,9 @@ EXPORT_SYMBOL(intel_max_stolen); static const struct aper_size_info_fixed intel_i810_sizes[] = { - {64, 16384, 4}, + {64, 16384 - I830_CC_DANCE_PAGES, 4}, /* The 32M mode still requires a 64k gatt */ - {32, 8192, 4} + {32, 8192 - I830_CC_DANCE_PAGES, 4} }; #define AGP_DCACHE_MEMORY 1 @@ -61,11 +73,14 @@ static struct _intel_private { */ int gtt_entries; /* i830+ */ int gtt_total_size; - union { - void __iomem *i9xx_flush_page; - void *i8xx_flush_page; - }; - struct page *i8xx_page; + void __iomem *i9xx_flush_page; + void *i8xx_cpu_flush_page; + void *i8xx_cpu_check_page; + void *i8xx_cpu_canary_pages[I830_CC_CANARY_FLOCK_PAGES]; + void __iomem *i8xx_gtt_cc_pages; + unsigned int i8xx_cache_flush_num; + unsigned int i8xx_gtt_whack_pagenum; + struct page *i8xx_pages[I830_CC_DANCE_PAGES + 1]; struct resource ifp_resource; int resource_valid; } intel_private; @@ -486,11 +501,11 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge, static struct aper_size_info_fixed intel_i830_sizes[] = { - {128, 32768, 5}, + {128, 32768 - I830_CC_DANCE_PAGES, 5}, /* The 64M mode still requires a 128k gatt */ - {64, 16384, 5}, - {256, 65536, 6}, - {512, 131072, 7}, + {64, 16384 - I830_CC_DANCE_PAGES, 5}, + {256, 65536 - I830_CC_DANCE_PAGES, 6}, + {512, 131072 - I830_CC_DANCE_PAGES, 7}, }; static void intel_i830_init_gtt_entries(void) @@ -737,27 +752,215 @@ static void intel_i830_init_gtt_entries(void) static void intel_i830_fini_flush(void) { - kunmap(intel_private.i8xx_page); - intel_private.i8xx_flush_page = NULL; - unmap_page_from_agp(intel_private.i8xx_page); + int i; + + kunmap(intel_private.i8xx_pages[0]); + intel_private.i8xx_cpu_flush_page = NULL; + kunmap(intel_private.i8xx_pages[1]); + intel_private.i8xx_cpu_check_page = NULL; + + if (intel_private.i8xx_gtt_cc_pages) + iounmap(intel_private.i8xx_gtt_cc_pages); + + for (i = 0; i < I830_CC_CANARY_FLOCK_PAGES; i++) { + kunmap(intel_private.i8xx_cpu_canary_pages[i]); + intel_private.i8xx_cpu_canary_pages[i] = NULL; + } - __free_page(intel_private.i8xx_page); - intel_private.i8xx_page = NULL; + for (i = 0; i < I830_CC_DANCE_PAGES + 1; i++) { + __free_page(intel_private.i8xx_pages[i]); + intel_private.i8xx_pages[i] = NULL; + } } static void intel_i830_setup_flush(void) { + int num_entries = A_SIZE_FIX(agp_bridge->current_size)->num_entries; + int i; + /* return if we've already set the flush mechanism up */ - if (intel_private.i8xx_page) - return; + if (intel_private.i8xx_pages[0]) + goto setup; + + for (i = 0; i < I830_CC_DANCE_PAGES + 1; i++) { + intel_private.i8xx_pages[i] + = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); + if (!intel_private.i8xx_pages[i]) { + intel_i830_fini_flush(); + return; + } + } - intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); - if (!intel_private.i8xx_page) + intel_private.i8xx_cpu_check_page = kmap(intel_private.i8xx_pages[1]); + if (!intel_private.i8xx_cpu_check_page) { + WARN_ON(1); + intel_i830_fini_flush(); return; - - intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); - if (!intel_private.i8xx_flush_page) + } + intel_private.i8xx_cpu_flush_page = kmap(intel_private.i8xx_pages[0]); + if (!intel_private.i8xx_cpu_flush_page) { + WARN_ON(1); intel_i830_fini_flush(); + return; + } + + for (i = 0; i < I830_CC_CANARY_FLOCK_PAGES; i++) { + intel_private.i8xx_cpu_canary_pages[i] + = kmap(intel_private.i8xx_pages[i+2]); + if (!intel_private.i8xx_cpu_flush_page) { + WARN_ON(1); + intel_i830_fini_flush(); + return; + } + } + + /* Map the flushing pages into the gtt as the last entries. The last + * page can't be used by the gpu, anyway (prefetch might walk over the + * end of the last page). */ + intel_private.i8xx_gtt_cc_pages + = ioremap_wc(agp_bridge->gart_bus_addr + + num_entries*4096, + I830_CC_DANCE_PAGES*4096); + + if (!intel_private.i8xx_gtt_cc_pages) + dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); + +setup: + /* Don't map the first page, we only write via its physical address + * into it. */ + for (i = 0; i < I830_CC_DANCE_PAGES; i++) { + writel(agp_bridge->driver->mask_memory(agp_bridge, + page_to_phys(intel_private.i8xx_pages[i+1]), 0), + intel_private.registers+I810_PTE_BASE+((num_entries+i)*4)); + } + + intel_private.i8xx_cache_flush_num = 0; + intel_private.i8xx_gtt_whack_pagenum = 0; +} + +static void intel_whack_gtt_harder(void) +{ + void __iomem *whack_page = intel_private.i8xx_gtt_cc_pages + + (1 + I830_CC_CANARY_FLOCK_PAGES)*4096; + + whack_page += (intel_private.i8xx_gtt_whack_pagenum + % I830_CC_GTT_WHACK_PAGES)*4096; + + memset_io(whack_page, intel_private.i8xx_gtt_whack_pagenum, 4096); + + intel_private.i8xx_gtt_whack_pagenum++; +} + +static void intel_flush_mch_write_buffer(void) +{ + memset(intel_private.i8xx_cpu_flush_page, 0, + I830_MCH_WRITE_BUFFER_SIZE); + + mb(); + if (cpu_has_clflush) { + clflush_cache_range(intel_private.i8xx_cpu_flush_page, + I830_MCH_WRITE_BUFFER_SIZE); + } else if (wbinvd_on_all_cpus() != 0) + printk(KERN_ERR "Timed out waiting for cache flush.\n"); + mb(); +} + +static void intel_write_canary_flocks(void) +{ + int i, j; + + for (i = 0; i < 4096*I830_CC_CANARY_FLOCK_GTT_PAGES; i += sizeof(int)) { + unsigned int __iomem *write_pos_gtt + = intel_private.i8xx_gtt_cc_pages + 4096 + i; + writel(intel_private.i8xx_cache_flush_num, write_pos_gtt); + } + + for (i = 0; i < I830_CC_CANARY_FLOCK_CPU_PAGES; i++) { + for (j = 0; j < 4096; j += sizeof(int)) { + unsigned int *write_pos_cpu + = intel_private.i8xx_cpu_canary_pages[I830_CC_CANARY_FLOCK_GTT_PAGES + i] + + j; + *write_pos_cpu = intel_private.i8xx_cache_flush_num; + } + mb(); + if (cpu_has_clflush) { + clflush_cache_range(intel_private.i8xx_cpu_canary_pages[I830_CC_CANARY_FLOCK_GTT_PAGES + i], + 4096); + } else if (wbinvd_on_all_cpus() != 0) + printk(KERN_ERR "Timed out waiting for cache flush.\n"); + mb(); + } +} + +#define I830_GTT_MAX_RETRIES 100 +static void intel_wait_for_canary_flocks(void) +{ + int firstfail_gtt, firstfail_cpu; + unsigned int canary_cpu_read, canary_gtt_read; + int i, j, retries = 0; + + firstfail_cpu = firstfail_gtt = 0; + + for (i = 0; i < I830_CC_CANARY_FLOCK_GTT_PAGES; i++) { + mb(); + if (cpu_has_clflush) { + clflush_cache_range(intel_private.i8xx_cpu_canary_pages[i], + 4096); + } else if (wbinvd_on_all_cpus() != 0) + printk(KERN_ERR "Timed out waiting for cache flush.\n"); + mb(); + + for (j = 0; j < 4096; j += sizeof(int)) { + while (retries < I830_GTT_MAX_RETRIES) { + unsigned int *check_pos + = intel_private.i8xx_cpu_canary_pages[i] + j; + canary_cpu_read = *check_pos; + + if (canary_cpu_read + == intel_private.i8xx_cache_flush_num) + break; + + mb(); + if (cpu_has_clflush) + clflush(check_pos); + else + wbinvd_on_all_cpus(); + mb(); + + retries++; + intel_whack_gtt_harder(); + } + + if (retries == I830_GTT_MAX_RETRIES && !firstfail_cpu) + firstfail_cpu = i*4096 + j + 1; + } + } + + for (i = 0; i < 4096*I830_CC_CANARY_FLOCK_CPU_PAGES; i += sizeof(int)) { + while (retries < I830_GTT_MAX_RETRIES) { + unsigned int __iomem *check_pos + = intel_private.i8xx_gtt_cc_pages + + (1+I830_CC_CANARY_FLOCK_GTT_PAGES)*4096 + i; + + canary_gtt_read = readl(check_pos); + + if (canary_gtt_read == intel_private.i8xx_cache_flush_num) + break; + + retries++; + intel_whack_gtt_harder(); + } + + if (retries == I830_GTT_MAX_RETRIES && !firstfail_gtt) + firstfail_gtt = i+1; + } + + WARN_ONCE(retries == I830_GTT_MAX_RETRIES, "chipset flush timed out," + "gtt_read: %u, cpu_read: %u, " + "expected: %u, gtt_pos :%i, cpu_pos: %i\n", + canary_gtt_read, canary_cpu_read, + intel_private.i8xx_cache_flush_num, firstfail_gtt, + firstfail_cpu); } /* The chipset_flush interface needs to get data that has already been @@ -770,16 +973,56 @@ static void intel_i830_setup_flush(void) * that buffer out, we just fill 1KB and clflush it out, on the assumption * that it'll push whatever was in there out. It appears to work. */ -static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) -{ - unsigned int *pg = intel_private.i8xx_flush_page; - memset(pg, 0, 1024); +/* Complaining once a minute about cache incoherency is enough! */ +DEFINE_RATELIMIT_STATE(i8xx_chipset_flush_ratelimit_cpu, 60*HZ, 1); +DEFINE_RATELIMIT_STATE(i8xx_chipset_flush_ratelimit_gtt, 60*HZ, 1); - if (cpu_has_clflush) - clflush_cache_range(pg, 1024); - else if (wbinvd_on_all_cpus() != 0) - printk(KERN_ERR "Timed out waiting for cache flush.\n"); +static void intel_i830_chipset_flush(struct agp_bridge_data *bridge) +{ + unsigned int offset1 + = (intel_private.i8xx_cache_flush_num * sizeof(int)) % 4096; + unsigned int offset2 + = (intel_private.i8xx_cache_flush_num * sizeof(int) + + 2048) % 4096; + unsigned int *p_cpu_read = intel_private.i8xx_cpu_check_page + offset1; + unsigned int *p_cpu_write = intel_private.i8xx_cpu_check_page + offset2; + unsigned int gtt_read, cpu_read; + + /* write check values */ + *p_cpu_write = intel_private.i8xx_cache_flush_num; + mb(); + if (cpu_has_clflush) { + clflush(p_cpu_write); + clflush(p_cpu_read); + } else + wbinvd_on_all_cpus(); + writel(intel_private.i8xx_cache_flush_num, + intel_private.i8xx_gtt_cc_pages + offset1); + mb(); + + /* start chipset flush */ + intel_write_canary_flocks(); + + intel_flush_mch_write_buffer(); + + intel_wait_for_canary_flocks(); + + /* read check values */ + mb(); + gtt_read = readl(intel_private.i8xx_gtt_cc_pages + offset2); + cpu_read = *p_cpu_read; + + WARN(cpu_read != intel_private.i8xx_cache_flush_num + && __ratelimit(&i8xx_chipset_flush_ratelimit_cpu), + "i8xx chipset flush failed, expected: %u, cpu_read: %u\n", + intel_private.i8xx_cache_flush_num, cpu_read); + WARN(gtt_read != intel_private.i8xx_cache_flush_num + && __ratelimit(&i8xx_chipset_flush_ratelimit_gtt), + "i8xx chipset flush failed, expected: %u, gtt_read: %u\n", + intel_private.i8xx_cache_flush_num, gtt_read); + + intel_private.i8xx_cache_flush_num++; } /* The intel i830 automatically initializes the agp aperture during POST. @@ -888,6 +1131,7 @@ static int intel_i830_configure(void) global_cache_flush(); intel_i830_setup_flush(); + return 0; } @@ -1618,3 +1862,129 @@ static const struct agp_bridge_driver intel_g33_driver = { .agp_unmap_memory = intel_agp_unmap_memory, #endif }; + +/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of + * driver and gmch_driver must be non-null, and find_gmch will determine + * which one should be used if a gmch_chip_id is present. + */ +static const struct intel_gtt_driver_description { + unsigned int gmch_chip_id; + char *name; + const struct agp_bridge_driver *gmch_driver; +} intel_gtt_chipsets[] = { + { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver }, + { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver }, + { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver }, + { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver }, + { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", &intel_830_driver }, + { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", &intel_830_driver }, + { PCI_DEVICE_ID_INTEL_82854_IG, "854", &intel_830_driver }, + { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", &intel_830_driver }, + { PCI_DEVICE_ID_INTEL_82865_IG, "865", &intel_830_driver }, + { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", &intel_915_driver }, + { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_G33_IG, "G33", &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", &intel_g33_driver }, + { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_B43_IG, "B43", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_G41_IG, "G41", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, + "HD Graphics", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, + "HD Graphics", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, + "HD Graphics", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, + "HD Graphics", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, + "Sandybridge", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, + "Sandybridge", &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_D0_IG, + "Sandybridge", &intel_i965_driver }, + { 0, NULL, NULL } +}; + +static int find_gmch(u16 device) +{ + struct pci_dev *gmch_device; + + gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL); + if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) { + gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, + device, gmch_device); + } + + if (!gmch_device) + return 0; + + intel_private.pcidev = gmch_device; + return 1; +} + +int intel_gmch_probe(struct pci_dev *pdev, + struct agp_bridge_data *bridge) +{ + int i, mask; + bridge->driver = NULL; + + for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { + if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { + bridge->driver = + intel_gtt_chipsets[i].gmch_driver; + break; + } + } + + if (!bridge->driver) + return 0; + + bridge->dev_private_data = &intel_private; + bridge->dev = pdev; + + dev_info(&pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name); + + + if (bridge->driver->mask_memory == intel_gen6_mask_memory) + mask = 40; + else if (bridge->driver->mask_memory == intel_i965_mask_memory) + mask = 36; + else + mask = 32; + + if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) + dev_err(&intel_private.pcidev->dev, + "set gfx device dma mask %d-bit failed!\n", mask); + else + pci_set_consistent_dma_mask(intel_private.pcidev, + DMA_BIT_MASK(mask)); + + return 1; +} +EXPORT_SYMBOL(intel_gmch_probe); + +void intel_gmch_remove(struct pci_dev *pdev) +{ + if (intel_private.pcidev) + pci_dev_put(intel_private.pcidev); +} +EXPORT_SYMBOL(intel_gmch_remove); + +MODULE_AUTHOR("Dave Jones "); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a7ec93e..03d2d56 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -40,6 +40,7 @@ #include #include #include +#include extern int intel_max_stolen; /* from AGP driver */ @@ -1384,7 +1385,8 @@ static int i915_load_modeset_init(struct drm_device *dev, * at the last page of the aperture. One page should be enough to * keep any prefetching inside of the aperture. */ - i915_gem_do_init(dev, prealloc_size, agp_size - 4096); + i915_gem_do_init(dev, prealloc_size, + agp_size - I830_CC_DANCE_PAGES*4096); mutex_lock(&dev->struct_mutex); ret = i915_gem_init_ringbuffer(dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index df5a713..897f281 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4845,7 +4845,10 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, if (ret) return -EFAULT; + mutex_lock(&dev->struct_mutex); drm_agp_chipset_flush(dev); + mutex_unlock(&dev->struct_mutex); + return 0; } diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h new file mode 100644 index 0000000..10e5c5b --- /dev/null +++ b/include/drm/intel-gtt.h @@ -0,0 +1,13 @@ +/* Header file to share declarations between the intel-agp module and the i915 + * drm module + */ + +/* This denotes how many pages intel-gtt steals at the end of the gart. */ +#define I830_CC_CANARY_FLOCK_CPU_PAGES 2 +#define I830_CC_CANARY_FLOCK_GTT_PAGES 16 +#define I830_CC_GTT_WHACK_PAGES 16 +#define I830_CC_CANARY_FLOCK_PAGES (I830_CC_CANARY_FLOCK_CPU_PAGES +\ + I830_CC_CANARY_FLOCK_GTT_PAGES) +#define I830_CC_DANCE_PAGES (1 + I830_CC_CANARY_FLOCK_PAGES \ + + I830_CC_GTT_WHACK_PAGES) +#define I830_MCH_WRITE_BUFFER_SIZE 1024