linux/debian/patches/features/all/xen/pvops-updates.patch

271 lines
8.0 KiB
Diff

diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 4953f9b..863e1c2 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -397,6 +397,9 @@ static inline unsigned long pages_to_mb(unsigned long npg)
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
+#define arch_vm_get_page_prot arch_vm_get_page_prot
+extern pgprot_t arch_vm_get_page_prot(unsigned vm_flags);
+
#if PAGETABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index c57a301..4e46931 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -160,7 +160,7 @@ extern void cleanup_highmap(void);
#define pgtable_cache_init() do { } while (0)
#define check_pgt_cache() do { } while (0)
-#define PAGE_AGP PAGE_KERNEL_NOCACHE
+#define PAGE_AGP PAGE_KERNEL_IO_NOCACHE
#define HAVE_PAGE_AGP 1
/* fs/proc/kcore.c */
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 25fc1df..103e324 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -17,6 +17,16 @@
gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
+pgprot_t arch_vm_get_page_prot(unsigned vm_flags)
+{
+ pgprot_t ret = __pgprot(0);
+
+ if (vm_flags & VM_IO)
+ ret = __pgprot(_PAGE_IOMAP);
+
+ return ret;
+}
+
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
return (pte_t *)__get_free_page(PGALLOC_GFP);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 9bca04e..399a017 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -398,15 +398,19 @@ static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
/* Exists to support ARGB cursors */
static struct page *i8xx_alloc_pages(void)
{
+ void *addr;
+ dma_addr_t _d;
struct page *page;
- page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
- if (page == NULL)
+ addr = dma_alloc_coherent(NULL, 4 * PAGE_SIZE, &_d, GFP_KERNEL);
+ if (addr == NULL)
return NULL;
+ page = virt_to_page(addr);
+
if (set_pages_uc(page, 4) < 0) {
set_pages_wb(page, 4);
- __free_pages(page, 2);
+ dma_free_coherent(NULL, 4 * PAGE_SIZE, addr, _d);
return NULL;
}
get_page(page);
@@ -416,12 +420,17 @@ static struct page *i8xx_alloc_pages(void)
static void i8xx_destroy_pages(struct page *page)
{
+ void *addr;
+
if (page == NULL)
return;
set_pages_wb(page, 4);
put_page(page);
- __free_pages(page, 2);
+
+ addr = page_address(page);
+
+ dma_free_coherent(NULL, 4 * PAGE_SIZE, addr, virt_to_bus(addr));
atomic_dec(&agp_bridge->current_memory_agp);
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a75ca63..bdc26b9 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -201,7 +201,7 @@ int drm_lastclose(struct drm_device * dev)
}
if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_sg_cleanup(dev->sg);
+ drm_sg_cleanup(dev, dev->sg);
dev->sg = NULL;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8bf3770..dde5f66 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -539,7 +539,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset's pointer to the object.
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index c7823c8..95ffb8a 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -32,20 +32,73 @@
*/
#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include "drmP.h"
#define DEBUG_SCATTER 0
-static inline void *drm_vmalloc_dma(unsigned long size)
+static void *drm_vmalloc_dma(struct drm_device *drmdev, unsigned long size)
{
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
#else
- return vmalloc_32(size);
+ struct device *dev = &drmdev->pdev->dev;
+ struct page **pages;
+ void *addr;
+ const int npages = PFN_UP(size);
+ int i;
+
+ pages = kmalloc(npages * sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ goto fail;
+
+ for (i = 0; i < npages; i++) {
+ dma_addr_t phys;
+ void *addr;
+ addr = dma_alloc_coherent(dev, PAGE_SIZE, &phys, GFP_KERNEL);
+ if (addr == NULL)
+ goto out_free_pages;
+
+ pages[i] = virt_to_page(addr);
+ }
+
+ addr = vmap(pages, npages, VM_MAP | VM_IOREMAP, PAGE_KERNEL);
+
+ kfree(pages);
+
+ return addr;
+
+out_free_pages:
+ while (i > 0) {
+ void *addr = page_address(pages[--i]);
+ dma_free_coherent(dev, PAGE_SIZE, addr, virt_to_bus(addr));
+ }
+
+ kfree(pages);
+
+fail:
+ return NULL;
+#endif
+}
+
+static void drm_vfree_dma(struct drm_device *drmdev, void *addr, int npages,
+ struct page **pages)
+{
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+ vfree(addr);
+#else
+ struct device *dev = &drmdev->pdev->dev;
+ int i;
+
+ for (i = 0; i < npages; i++) {
+ void *addr = page_address(pages[i]);
+ dma_free_coherent(dev, PAGE_SIZE, addr, virt_to_bus(addr));
+ }
+ vunmap(addr);
#endif
}
-void drm_sg_cleanup(struct drm_sg_mem * entry)
+void drm_sg_cleanup(struct drm_device *drmdev, struct drm_sg_mem * entry)
{
struct page *page;
int i;
@@ -56,7 +109,7 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
ClearPageReserved(page);
}
- vfree(entry->virtual);
+ drm_vfree_dma(drmdev, entry->virtual, entry->pages, entry->pagelist);
kfree(entry->busaddr);
kfree(entry->pagelist);
@@ -107,7 +160,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
}
memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
- entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
+ entry->virtual = drm_vmalloc_dma(dev, pages << PAGE_SHIFT);
if (!entry->virtual) {
kfree(entry->busaddr);
kfree(entry->pagelist);
@@ -180,7 +233,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
return 0;
failed:
- drm_sg_cleanup(entry);
+ drm_sg_cleanup(dev, entry);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_sg_alloc);
@@ -212,7 +265,7 @@ int drm_sg_free(struct drm_device *dev, void *data,
DRM_DEBUG("virtual = %p\n", entry->virtual);
- drm_sg_cleanup(entry);
+ drm_sg_cleanup(dev, entry);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1c040d0..3dc8d6b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -272,6 +272,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
vma->vm_private_data = bo;
vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
return 0;
out_unref:
ttm_bo_unref(&bo);
@@ -287,6 +288,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = ttm_bo_reference(bo);
vma->vm_flags |= VM_RESERVED | VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
return 0;
}
EXPORT_SYMBOL(ttm_fbdev_mmap);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 7ad3faa..cf9ddce 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1388,7 +1388,7 @@ extern int drm_vma_info(struct seq_file *m, void *data);
#endif
/* Scatter Gather Support (drm_scatter.h) */
-extern void drm_sg_cleanup(struct drm_sg_mem * entry);
+extern void drm_sg_cleanup(struct drm_device *dev, struct drm_sg_mem * entry);
extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);