Skip to content

Commit

Permalink
drm/i915/ttm: Introduce a TTM i915 gem object backend
Browse files Browse the repository at this point in the history
Most logical place to introduce TTM buffer objects is as an i915
gem object backend. We need to add some ops to account for added
functionality like delayed delete and LRU list manipulation.

Initially we support only LMEM and SYSTEM memory, but SYSTEM
(which in this case means evicted LMEM objects) is not
visible to i915 GEM yet. The plan is to move the i915 gem system region
over to the TTM system memory type in upcoming patches.

We set up GPU bindings directly both from LMEM and from the system region,
as there is no need to use the legacy TTM_TT memory type. We reserve
that for future porting of GGTT bindings to TTM.

Remove the old lmem backend.

Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210610070152.572423-2-thomas.hellstrom@linux.intel.com
  • Loading branch information
Thomas Hellström authored and mlankhorst committed Jun 11, 2021
1 parent 1bd8a7d commit 213d509
Show file tree
Hide file tree
Showing 16 changed files with 730 additions and 153 deletions.
1 change: 1 addition & 0 deletions drivers/gpu/drm/i915/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ gem-y += \
gem/i915_gem_stolen.o \
gem/i915_gem_throttle.o \
gem/i915_gem_tiling.o \
gem/i915_gem_ttm.o \
gem/i915_gem_userptr.o \
gem/i915_gem_wait.o \
gem/i915_gemfs.o
Expand Down
9 changes: 3 additions & 6 deletions drivers/gpu/drm/i915/gem/i915_gem_create.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,13 +85,10 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
return -E2BIG;

/*
* For now resort to CPU based clearing for device local-memory, in the
* near future this will use the blitter engine for accelerated, GPU
* based clearing.
* I915_BO_ALLOC_USER will make sure the object is cleared before
* any user access.
*/
flags = 0;
if (mr->type == INTEL_MEMORY_LOCAL)
flags = I915_BO_ALLOC_CPU_CLEAR;
flags = I915_BO_ALLOC_USER;

ret = mr->ops->init_object(mr, obj, size, flags);
if (ret)
Expand Down
84 changes: 0 additions & 84 deletions drivers/gpu/drm/i915/gem/i915_gem_lmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,74 +4,10 @@
*/

#include "intel_memory_region.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"

static void lmem_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
obj->mm.dirty = false;
sg_free_table(pages);
kfree(pages);
}

static int lmem_get_pages(struct drm_i915_gem_object *obj)
{
unsigned int flags;
struct sg_table *pages;

flags = I915_ALLOC_MIN_PAGE_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;

obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
obj->base.size,
flags);
if (IS_ERR(obj->mm.st_mm_node))
return PTR_ERR(obj->mm.st_mm_node);

/* Range manager is always contigous */
if (obj->mm.region->is_range_manager)
obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
if (IS_ERR(pages)) {
intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
return PTR_ERR(pages);
}

__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));

if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
void __iomem *vaddr =
i915_gem_object_lmem_io_map(obj, 0, obj->base.size);

if (!vaddr) {
struct sg_table *pages =
__i915_gem_object_unset_pages(obj);

if (!IS_ERR_OR_NULL(pages))
lmem_put_pages(obj, pages);
}

memset_io(vaddr, 0, obj->base.size);
io_mapping_unmap(vaddr);
}

return 0;
}

const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.name = "i915_gem_object_lmem",
.flags = I915_GEM_OBJECT_HAS_IOMEM,

.get_pages = lmem_get_pages,
.put_pages = lmem_put_pages,
.release = i915_gem_object_release_memory_region,
};

void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
Expand Down Expand Up @@ -103,23 +39,3 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
size, flags);
}

int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
unsigned int flags)
{
static struct lock_class_key lock_class;
struct drm_i915_private *i915 = mem->i915;

drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags);

obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;

i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);

i915_gem_object_init_memory_region(obj, mem);

return 0;
}
5 changes: 0 additions & 5 deletions drivers/gpu/drm/i915/gem/i915_gem_lmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,4 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags);

int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
struct drm_i915_gem_object *obj,
resource_size_t size,
unsigned int flags);

#endif /* !__I915_GEM_LMEM_H */
125 changes: 86 additions & 39 deletions drivers/gpu/drm/i915/gem/i915_gem_object.c
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *f
}
}

static void __i915_gem_free_object_rcu(struct rcu_head *head)
void __i915_gem_free_object_rcu(struct rcu_head *head)
{
struct drm_i915_gem_object *obj =
container_of(head, typeof(*obj), rcu);
Expand Down Expand Up @@ -208,59 +208,69 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
}
}

static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
void __i915_gem_free_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_object *obj, *on;
trace_i915_gem_object_destroy(obj);

llist_for_each_entry_safe(obj, on, freed, freed) {
trace_i915_gem_object_destroy(obj);
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;

/*
* Note that the vma keeps an object reference while
* it is active, so it *should* not sleep while we
* destroy it. Our debug code errs insits it *might*.
* For the moment, play along.
*/
spin_lock(&obj->vma.lock);
while ((vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
GEM_BUG_ON(vma->obj != obj);
spin_unlock(&obj->vma.lock);

if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
__i915_vma_put(vma);

/*
* Note that the vma keeps an object reference while
* it is active, so it *should* not sleep while we
* destroy it. Our debug code errs insits it *might*.
* For the moment, play along.
*/
spin_lock(&obj->vma.lock);
while ((vma = list_first_entry_or_null(&obj->vma.list,
struct i915_vma,
obj_link))) {
GEM_BUG_ON(vma->obj != obj);
spin_unlock(&obj->vma.lock);
}
spin_unlock(&obj->vma.lock);
}

__i915_vma_put(vma);
__i915_gem_object_free_mmaps(obj);

spin_lock(&obj->vma.lock);
}
spin_unlock(&obj->vma.lock);
}
GEM_BUG_ON(!list_empty(&obj->lut_list));

__i915_gem_object_free_mmaps(obj);
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
bitmap_free(obj->bit_17);

GEM_BUG_ON(!list_empty(&obj->lut_list));
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);

atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
bitmap_free(obj->bit_17);
drm_gem_free_mmap_offset(&obj->base);

if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
if (obj->ops->release)
obj->ops->release(obj);

drm_gem_free_mmap_offset(&obj->base);
if (obj->mm.n_placements > 1)
kfree(obj->mm.placements);

if (obj->ops->release)
obj->ops->release(obj);
if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
}

if (obj->mm.n_placements > 1)
kfree(obj->mm.placements);
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;

if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
llist_for_each_entry_safe(obj, on, freed, freed) {
might_sleep();
if (obj->ops->delayed_free) {
obj->ops->delayed_free(obj);
continue;
}
__i915_gem_free_object(obj);

/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
Expand Down Expand Up @@ -318,6 +328,7 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
* worker and performing frees directly from subsequent allocations for
* crude but effective memory throttling.
*/

if (llist_add(&obj->freed, &i915->mm.free_list))
queue_work(i915->wq, &i915->mm.free_work);
}
Expand Down Expand Up @@ -410,6 +421,42 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset,
return 0;
}

/**
* i915_gem_object_evictable - Whether object is likely evictable after unbind.
* @obj: The object to check
*
* This function checks whether the object is likely unvictable after unbind.
* If the object is not locked when checking, the result is only advisory.
* If the object is locked when checking, and the function returns true,
* then an eviction should indeed be possible. But since unlocked vma
* unpinning and unbinding is currently possible, the object can actually
* become evictable even if this function returns false.
*
* Return: true if the object may be evictable. False otherwise.
*/
bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
int pin_count = atomic_read(&obj->mm.pages_pin_count);

if (!pin_count)
return true;

spin_lock(&obj->vma.lock);
list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (i915_vma_is_pinned(vma)) {
spin_unlock(&obj->vma.lock);
return false;
}
if (atomic_read(&vma->pages_count))
pin_count--;
}
spin_unlock(&obj->vma.lock);
GEM_WARN_ON(pin_count < 0);

return pin_count == 0;
}

void i915_gem_init__objects(struct drm_i915_private *i915)
{
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
Expand Down
9 changes: 9 additions & 0 deletions drivers/gpu/drm/i915/gem/i915_gem_object.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,9 @@ static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)

static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
if (obj->ops->adjust_lru)
obj->ops->adjust_lru(obj);

dma_resv_unlock(obj->base.resv);
}

Expand Down Expand Up @@ -587,6 +590,12 @@ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset,

bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);

void __i915_gem_free_object_rcu(struct rcu_head *head);

void __i915_gem_free_object(struct drm_i915_gem_object *obj);

bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);

#ifdef CONFIG_MMU_NOTIFIER
static inline bool
i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
Expand Down
27 changes: 24 additions & 3 deletions drivers/gpu/drm/i915/gem/i915_gem_object_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,20 @@ struct drm_i915_gem_object_ops {
const struct drm_i915_gem_pwrite *arg);

int (*dmabuf_export)(struct drm_i915_gem_object *obj);

/**
* adjust_lru - notify that the madvise value was updated
* @obj: The gem object
*
* The madvise value may have been updated, or object was recently
* referenced so act accordingly (Perhaps changing an LRU list etc).
*/
void (*adjust_lru)(struct drm_i915_gem_object *obj);

/**
* delayed_free - Override the default delayed free implementation
*/
void (*delayed_free)(struct drm_i915_gem_object *obj);
void (*release)(struct drm_i915_gem_object *obj);

const char *name; /* friendly name for debug, e.g. lockdep classes */
Expand Down Expand Up @@ -187,12 +201,14 @@ struct drm_i915_gem_object {
#define I915_BO_ALLOC_VOLATILE BIT(1)
#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
#define I915_BO_ALLOC_CPU_CLEAR BIT(3)
#define I915_BO_ALLOC_USER BIT(4)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_STRUCT_PAGE | \
I915_BO_ALLOC_CPU_CLEAR)
#define I915_BO_READONLY BIT(4)
#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */
I915_BO_ALLOC_CPU_CLEAR | \
I915_BO_ALLOC_USER)
#define I915_BO_READONLY BIT(5)
#define I915_TILING_QUIRK_BIT 6 /* unknown swizzling; do not release! */

/*
* Is the object to be mapped as read-only to the GPU
Expand Down Expand Up @@ -310,6 +326,11 @@ struct drm_i915_gem_object {
bool dirty:1;
} mm;

struct {
struct sg_table *cached_io_st;
bool created:1;
} ttm;

/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;

Expand Down
6 changes: 1 addition & 5 deletions drivers/gpu/drm/i915/gem/i915_gem_region.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,

mutex_lock(&mem->objects.lock);

if (obj->flags & I915_BO_ALLOC_VOLATILE)
list_add(&obj->mm.region_link, &mem->objects.purgeable);
else
list_add(&obj->mm.region_link, &mem->objects.list);

list_add(&obj->mm.region_link, &mem->objects.list);
mutex_unlock(&mem->objects.lock);
}

Expand Down
Loading

0 comments on commit 213d509

Please sign in to comment.