diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index e0ab173..8f1a0f5 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -365,11 +365,14 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, *new_type = actual_type; is_range_ram = pat_pagerange_is_ram(start, end); - if (is_range_ram == 1) + if (is_range_ram == 1) { + printk("reserve_memtype: calling reserve_ram_pages_type for 0x%llx 0x%llx %lu\n", start, end, req_type); return reserve_ram_pages_type(start, end, req_type, new_type); - else if (is_range_ram < 0) + } else if (is_range_ram < 0) { + printk("reserve_memtype: is_range_ram < 0 for 0x%llx 0x%llx %lu\n", start, end, req_type); return -EINVAL; + } new = kmalloc(sizeof(struct memtype), GFP_KERNEL); if (!new) @@ -641,14 +644,21 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot, is_ram = pat_pagerange_is_ram(paddr, paddr + size); /* - * reserve_pfn_range() doesn't support RAM pages. + * reserve_pfn_range() doesn't support RAM pages. Maintain the current + * behavior with RAM pages by returning success. */ - if (is_ram != 0) - return -EINVAL; + if (is_ram != 0) { + printk("reserve_pfn_range: is_ram is %d for 0x%llx!\n", + is_ram, paddr); + return 0; + } ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); - if (ret) + if (ret) { + printk("reserve_pfn_range: reserve_memtype ret %d for 0x%llx 0x%lx 0x%lx\n", + ret, paddr, size, want_flags); return ret; + } if (flags != want_flags) { if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) { @@ -739,7 +749,12 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) return -EINVAL; } pgprot = __pgprot(prot); - return reserve_pfn_range(paddr, vma_size, &pgprot, 1); + retval = reserve_pfn_range(paddr, vma_size, &pgprot, 1); + if (retval) { + printk("track_pfn_vma_copy linear - 0x%llx 0x%lx 0x%lx failed\n", + paddr, vma_size, prot); + } + return retval; } /* reserve entire vma page by page, using pfn and prot from pte */ @@ -749,8 +764,11 @@ int track_pfn_vma_copy(struct vm_area_struct *vma) pgprot = __pgprot(prot); retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1); - if (retval) + if (retval) { + printk("track_pfn_vma_copy not linear - 0x%llx 0x%lx failed\n", + paddr, prot); goto cleanup_ret; + } } return 0; @@ -796,7 +814,12 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, if (is_linear_pfn_mapping(vma)) { /* reserve the whole chunk starting from vm_pgoff */ paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; - return reserve_pfn_range(paddr, vma_size, prot, 0); + retval = reserve_pfn_range(paddr, vma_size, prot, 0); + if (retval) { + printk("track_pfn_vma_new linear - 0x%llx 0x%lx 0x%x failed\n", + paddr, size, (int) pgprot_val(*prot)); + } + return retval; } /* reserve page by page using pfn and size */ @@ -804,8 +827,11 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, for (i = 0; i < size; i += PAGE_SIZE) { paddr = base_paddr + i; retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0); - if (retval) + if (retval) { + printk("track_pfn_vma_new not linear - 0x%llx 0x%x failed\n", + paddr, (int) pgprot_val(*prot)); goto cleanup_ret; + } } return 0; @@ -816,7 +842,7 @@ cleanup_ret: free_pfn_range(paddr, PAGE_SIZE); } - return retval; + return 0; } /* diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 5ead808..f234a37 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c @@ -319,6 +319,9 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, return -EINVAL; } flags = new_flags; + vma->vm_page_prot = __pgprot( + (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK) | + flags); } if (((vma->vm_pgoff < max_low_pfn_mapped) || diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 88d3368..0cfd65a 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -537,12 +537,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; vma->vm_ops = obj->dev->driver->gem_vm_ops; vma->vm_private_data = map->handle; - /* FIXME: use pgprot_writecombine when available */ - prot = pgprot_val(vma->vm_page_prot); -#ifdef CONFIG_X86 - prot |= _PAGE_CACHE_WC; -#endif - vma->vm_page_prot = __pgprot(prot); + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); /* Take a ref for this mapping of the object, so that the fault * handler can dereference the mmap offset's pointer to the object. diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index aa4731b..0741da8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -197,6 +197,8 @@ typedef struct drm_i915_private { u32 saveDSPACNTR; u32 saveDSPBCNTR; u32 saveDSPARB; + u32 saveFW_BLC; + u32 saveFW_BLC_SELF; u32 saveRENDERSTANDBY; u32 saveHWS; u32 savePIPEACONF; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index aab5fa2..1ee2d6d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -565,6 +565,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int ret = 0; bool write = !!(vmf->flags & FAULT_FLAG_WRITE); + vmf->page = NULL; + /* We don't use vmf->pgoff since that has the fake offset */ page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; @@ -602,6 +604,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) case -ENOMEM: case -EAGAIN: return VM_FAULT_OOM; + case -EINVAL: + DRM_ERROR("fault failed, invalid pfn\n"); case -EFAULT: return VM_FAULT_SIGBUS; default: diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index d669cc2..0705dc9 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -239,6 +239,10 @@ int i915_save_state(struct drm_device *dev) /* Display arbitration control */ dev_priv->saveDSPARB = I915_READ(DSPARB); + /* FIFO watermarks */ + dev_priv->saveFW_BLC = I915_READ(FW_BLC); + dev_priv->saveFW_BLC_SELF = I915_READ(FW_BLC_SELF); + /* Pipe & plane A info */ dev_priv->savePIPEACONF = I915_READ(PIPEACONF); dev_priv->savePIPEASRC = I915_READ(PIPEASRC); @@ -371,6 +375,10 @@ int i915_restore_state(struct drm_device *dev) /* Display arbitration */ I915_WRITE(DSPARB, dev_priv->saveDSPARB); + /* FIFO watermarks */ + I915_WRITE(FW_BLC, dev_priv->saveFW_BLC); + I915_WRITE(FW_BLC_SELF, dev_priv->saveFW_BLC_SELF); + /* Pipe & plane A info */ /* Prime the clock */ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 065cdf8..3daa05f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -98,7 +98,7 @@ extern unsigned int kobjsize(const void *objp); #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ -#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ +#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it. Refer note in VM_PFNMAP_AT_MMAP below */ #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ @@ -127,6 +127,17 @@ extern unsigned int kobjsize(const void *objp); #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) /* + * pfnmap vmas that are fully mapped at mmap time (not mapped on fault). + * Used by x86 PAT to identify such PFNMAP mappings and optimize their handling. + * Note VM_INSERTPAGE flag is overloaded here. i.e, + * VM_INSERTPAGE && !VM_PFNMAP implies + * The vma has had "vm_insert_page()" done on it + * VM_INSERTPAGE && VM_PFNMAP implies + * The vma is PFNMAP with full mapping at mmap time + */ +#define VM_PFNMAP_AT_MMAP (VM_INSERTPAGE | VM_PFNMAP) + +/* * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. */ @@ -145,7 +156,7 @@ extern pgprot_t protection_map[16]; */ static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) { - return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff); + return ((vma->vm_flags & VM_PFNMAP_AT_MMAP) == VM_PFNMAP_AT_MMAP); } static inline int is_pfn_mapping(struct vm_area_struct *vma) diff --git a/mm/memory.c b/mm/memory.c index baa999e..d7df5ba 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1665,9 +1665,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, * behaviour that some programs depend on. We mark the "original" * un-COW'ed pages by matching them up with "vma->vm_pgoff". */ - if (addr == vma->vm_start && end == vma->vm_end) + if (addr == vma->vm_start && end == vma->vm_end) { vma->vm_pgoff = pfn; - else if (is_cow_mapping(vma->vm_flags)) + vma->vm_flags |= VM_PFNMAP_AT_MMAP; + } else if (is_cow_mapping(vma->vm_flags)) return -EINVAL; vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; @@ -1679,6 +1680,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, * needed from higher level routine calling unmap_vmas */ vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP); + vma->vm_flags &= ~VM_PFNMAP_AT_MMAP; return -EINVAL; }