summarylogtreecommitdiffstats
path: root/0004-drm-i915-Wean-off-drm_pci_alloc-drm_pci_free.patch
diff options
context:
space:
mode:
Diffstat (limited to '0004-drm-i915-Wean-off-drm_pci_alloc-drm_pci_free.patch')
-rw-r--r--0004-drm-i915-Wean-off-drm_pci_alloc-drm_pci_free.patch260
1 files changed, 260 insertions, 0 deletions
diff --git a/0004-drm-i915-Wean-off-drm_pci_alloc-drm_pci_free.patch b/0004-drm-i915-Wean-off-drm_pci_alloc-drm_pci_free.patch
new file mode 100644
index 000000000000..8be0a16bcd17
--- /dev/null
+++ b/0004-drm-i915-Wean-off-drm_pci_alloc-drm_pci_free.patch
@@ -0,0 +1,260 @@
+From 7af5e53f87d728579cba99568717a6af3625184b Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Sun, 2 Feb 2020 15:39:34 +0000
+Subject: [PATCH 4/7] drm/i915: Wean off drm_pci_alloc/drm_pci_free
+
+drm_pci_alloc and drm_pci_free are just very thin wrappers around
+dma_alloc_coherent, with a note that we should be removing them.
+Furthermore since
+
+commit de09d31dd38a50fdce106c15abd68432eebbd014
+Author: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Date: Fri Jan 15 16:51:42 2016 -0800
+
+ page-flags: define PG_reserved behavior on compound pages
+
+ As far as I can see there's no users of PG_reserved on compound pages.
+ Let's use PF_NO_COMPOUND here.
+
+drm_pci_alloc has been declared broken since it mixes GFP_COMP and
+SetPageReserved. Avoid this conflict by weaning ourselves off using the
+abstraction and using the dma functions directly.
+
+Reported-by: Taketo Kabe
+Closes: https://gitlab.freedesktop.org/drm/intel/issues/1027
+Fixes: de09d31dd38a ("page-flags: define PG_reserved behavior on compound pages")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: <stable@vger.kernel.org> # v4.5+
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200202153934.3899472-1-chris@chris-wilson.co.uk
+---
+ drivers/gpu/drm/i915/display/intel_display.c | 2 +-
+ .../gpu/drm/i915/gem/i915_gem_object_types.h | 3 -
+ drivers/gpu/drm/i915/gem/i915_gem_phys.c | 98 ++++++++++---------
+ drivers/gpu/drm/i915/i915_gem.c | 8 +-
+ 4 files changed, 55 insertions(+), 56 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
+index 301897791627..b670239a293b 100644
+--- a/drivers/gpu/drm/i915/display/intel_display.c
++++ b/drivers/gpu/drm/i915/display/intel_display.c
+@@ -10731,7 +10731,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+ u32 base;
+
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
+- base = obj->phys_handle->busaddr;
++ base = sg_dma_address(obj->mm.pages->sgl);
+ else
+ base = intel_plane_ggtt_offset(plane_state);
+
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+index e3f3944fbd90..1078a76d6d84 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+@@ -260,9 +260,6 @@ struct drm_i915_gem_object {
+
+ void *gvt_info;
+ };
+-
+- /** for phys allocated objects */
+- struct drm_dma_handle *phys_handle;
+ };
+
+ static inline struct drm_i915_gem_object *
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+index 8043ff63d73f..5e2e0109c9ba 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+@@ -22,88 +22,87 @@
+ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+ {
+ struct address_space *mapping = obj->base.filp->f_mapping;
+- struct drm_dma_handle *phys;
+- struct sg_table *st;
+ struct scatterlist *sg;
+- char *vaddr;
++ struct sg_table *st;
++ dma_addr_t dma;
++ void *vaddr;
++ void *dst;
+ int i;
+- int err;
+
+ if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
+ return -EINVAL;
+
+- /* Always aligning to the object size, allows a single allocation
++ /*
++ * Always aligning to the object size, allows a single allocation
+ * to handle all possible callers, and given typical object sizes,
+ * the alignment of the buddy allocation will naturally match.
+ */
+- phys = drm_pci_alloc(obj->base.dev,
+- roundup_pow_of_two(obj->base.size),
+- roundup_pow_of_two(obj->base.size));
+- if (!phys)
++ vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
++ roundup_pow_of_two(obj->base.size),
++ &dma, GFP_KERNEL);
++ if (!vaddr)
+ return -ENOMEM;
+
+- vaddr = phys->vaddr;
++ st = kmalloc(sizeof(*st), GFP_KERNEL);
++ if (!st)
++ goto err_pci;
++
++ if (sg_alloc_table(st, 1, GFP_KERNEL))
++ goto err_st;
++
++ sg = st->sgl;
++ sg->offset = 0;
++ sg->length = obj->base.size;
++
++ sg_assign_page(sg, (struct page *)vaddr);
++ sg_dma_address(sg) = dma;
++ sg_dma_len(sg) = obj->base.size;
++
++ dst = vaddr;
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+ struct page *page;
+- char *src;
++ void *src;
+
+ page = shmem_read_mapping_page(mapping, i);
+- if (IS_ERR(page)) {
+- err = PTR_ERR(page);
+- goto err_phys;
+- }
++ if (IS_ERR(page))
++ goto err_st;
+
+ src = kmap_atomic(page);
+- memcpy(vaddr, src, PAGE_SIZE);
+- drm_clflush_virt_range(vaddr, PAGE_SIZE);
++ memcpy(dst, src, PAGE_SIZE);
++ drm_clflush_virt_range(dst, PAGE_SIZE);
+ kunmap_atomic(src);
+
+ put_page(page);
+- vaddr += PAGE_SIZE;
++ dst += PAGE_SIZE;
+ }
+
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+
+- st = kmalloc(sizeof(*st), GFP_KERNEL);
+- if (!st) {
+- err = -ENOMEM;
+- goto err_phys;
+- }
+-
+- if (sg_alloc_table(st, 1, GFP_KERNEL)) {
+- kfree(st);
+- err = -ENOMEM;
+- goto err_phys;
+- }
+-
+- sg = st->sgl;
+- sg->offset = 0;
+- sg->length = obj->base.size;
+-
+- sg_dma_address(sg) = phys->busaddr;
+- sg_dma_len(sg) = obj->base.size;
+-
+- obj->phys_handle = phys;
+-
+ __i915_gem_object_set_pages(obj, st, sg->length);
+
+ return 0;
+
+-err_phys:
+- drm_pci_free(obj->base.dev, phys);
+-
+- return err;
++err_st:
++ kfree(st);
++err_pci:
++ dma_free_coherent(&obj->base.dev->pdev->dev,
++ roundup_pow_of_two(obj->base.size),
++ vaddr, dma);
++ return -ENOMEM;
+ }
+
+ static void
+ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+ {
++ dma_addr_t dma = sg_dma_address(pages->sgl);
++ void *vaddr = sg_page(pages->sgl);
++
+ __i915_gem_object_release_shmem(obj, pages, false);
+
+ if (obj->mm.dirty) {
+ struct address_space *mapping = obj->base.filp->f_mapping;
+- char *vaddr = obj->phys_handle->vaddr;
++ void *src = vaddr;
+ int i;
+
+ for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
+@@ -115,15 +114,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ continue;
+
+ dst = kmap_atomic(page);
+- drm_clflush_virt_range(vaddr, PAGE_SIZE);
+- memcpy(dst, vaddr, PAGE_SIZE);
++ drm_clflush_virt_range(src, PAGE_SIZE);
++ memcpy(dst, src, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ set_page_dirty(page);
+ if (obj->mm.madv == I915_MADV_WILLNEED)
+ mark_page_accessed(page);
+ put_page(page);
+- vaddr += PAGE_SIZE;
++
++ src += PAGE_SIZE;
+ }
+ obj->mm.dirty = false;
+ }
+@@ -131,7 +131,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+ sg_free_table(pages);
+ kfree(pages);
+
+- drm_pci_free(obj->base.dev, obj->phys_handle);
++ dma_free_coherent(&obj->base.dev->pdev->dev,
++ roundup_pow_of_two(obj->base.size),
++ vaddr, dma);
+ }
+
+ static void phys_release(struct drm_i915_gem_object *obj)
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 905890e3ac24..3f07948ea4da 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -154,7 +154,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_pwrite *args,
+ struct drm_file *file)
+ {
+- void *vaddr = obj->phys_handle->vaddr + args->offset;
++ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
+
+ /*
+@@ -800,10 +800,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+ ret = i915_gem_gtt_pwrite_fast(obj, args);
+
+ if (ret == -EFAULT || ret == -ENOSPC) {
+- if (obj->phys_handle)
+- ret = i915_gem_phys_pwrite(obj, args, file);
+- else
++ if (i915_gem_object_has_struct_page(obj))
+ ret = i915_gem_shmem_pwrite(obj, args);
++ else
++ ret = i915_gem_phys_pwrite(obj, args, file);
+ }
+
+ i915_gem_object_unpin_pages(obj);
+--
+2.25.0
+