drm/ttm: call ttm_bo_swapout directly when ttm shrink
remove the extra indirection because we have only one implementation anyway Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Roger He <Hongbo.He@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
44835a8627
commit
a6c26af8a4
|
@ -42,7 +42,6 @@
|
|||
#include <linux/atomic.h>
|
||||
#include <linux/reservation.h>
|
||||
|
||||
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
|
||||
static void ttm_bo_global_kobj_release(struct kobject *kobj);
|
||||
|
||||
static struct attribute ttm_bo_count = {
|
||||
|
@ -1456,7 +1455,6 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
|
|||
struct ttm_bo_global *glob =
|
||||
container_of(kobj, struct ttm_bo_global, kobj);
|
||||
|
||||
ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
|
||||
__free_page(glob->dummy_read_page);
|
||||
kfree(glob);
|
||||
}
|
||||
|
@ -1481,6 +1479,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
|
|||
mutex_init(&glob->device_list_mutex);
|
||||
spin_lock_init(&glob->lru_lock);
|
||||
glob->mem_glob = bo_ref->mem_glob;
|
||||
glob->mem_glob->bo_glob = glob;
|
||||
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
|
||||
|
||||
if (unlikely(glob->dummy_read_page == NULL)) {
|
||||
|
@ -1491,14 +1490,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
|
|||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
|
||||
INIT_LIST_HEAD(&glob->swap_lru[i]);
|
||||
INIT_LIST_HEAD(&glob->device_list);
|
||||
|
||||
ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
|
||||
ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
|
||||
if (unlikely(ret != 0)) {
|
||||
pr_err("Could not register buffer object swapout\n");
|
||||
goto out_no_shrink;
|
||||
}
|
||||
|
||||
atomic_set(&glob->bo_count, 0);
|
||||
|
||||
ret = kobject_init_and_add(
|
||||
|
@ -1506,8 +1497,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
|
|||
if (unlikely(ret != 0))
|
||||
kobject_put(&glob->kobj);
|
||||
return ret;
|
||||
out_no_shrink:
|
||||
__free_page(glob->dummy_read_page);
|
||||
out_no_drp:
|
||||
kfree(glob);
|
||||
return ret;
|
||||
|
@ -1690,11 +1679,8 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
|
|||
* A buffer object shrink method that tries to swap out the first
|
||||
* buffer object on the bo_global::swap_lru list.
|
||||
*/
|
||||
|
||||
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
|
||||
int ttm_bo_swapout(struct ttm_bo_global *glob)
|
||||
{
|
||||
struct ttm_bo_global *glob =
|
||||
container_of(shrink, struct ttm_bo_global, shrink);
|
||||
struct ttm_buffer_object *bo;
|
||||
int ret = -EBUSY;
|
||||
unsigned i;
|
||||
|
@ -1776,10 +1762,11 @@ out:
|
|||
kref_put(&bo->list_kref, ttm_bo_release_list);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_swapout);
|
||||
|
||||
void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
|
||||
{
|
||||
while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
|
||||
while (ttm_bo_swapout(bdev->glob) == 0)
|
||||
;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_swapout_all);
|
||||
|
|
|
@ -214,26 +214,20 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
|
|||
uint64_t extra)
|
||||
{
|
||||
int ret;
|
||||
struct ttm_mem_shrink *shrink;
|
||||
|
||||
spin_lock(&glob->lock);
|
||||
if (glob->shrink == NULL)
|
||||
goto out;
|
||||
|
||||
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
|
||||
shrink = glob->shrink;
|
||||
spin_unlock(&glob->lock);
|
||||
ret = shrink->do_shrink(shrink);
|
||||
ret = ttm_bo_swapout(glob->bo_glob);
|
||||
spin_lock(&glob->lock);
|
||||
if (unlikely(ret != 0))
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
out:
|
||||
|
||||
spin_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void ttm_shrink_work(struct work_struct *work)
|
||||
{
|
||||
struct ttm_mem_global *glob =
|
||||
|
|
|
@ -752,6 +752,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
|
|||
const char __user *wbuf, char __user *rbuf,
|
||||
size_t count, loff_t *f_pos, bool write);
|
||||
|
||||
int ttm_bo_swapout(struct ttm_bo_global *glob);
|
||||
void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
|
||||
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
|
||||
#endif
|
||||
|
|
|
@ -522,7 +522,6 @@ struct ttm_bo_global {
|
|||
struct kobject kobj;
|
||||
struct ttm_mem_global *mem_glob;
|
||||
struct page *dummy_read_page;
|
||||
struct ttm_mem_shrink shrink;
|
||||
struct mutex device_list_mutex;
|
||||
spinlock_t lru_lock;
|
||||
|
||||
|
|
|
@ -36,20 +36,6 @@
|
|||
#include <linux/kobject.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
/**
|
||||
* struct ttm_mem_shrink - callback to shrink TTM memory usage.
|
||||
*
|
||||
* @do_shrink: The callback function.
|
||||
*
|
||||
* Arguments to the do_shrink functions are intended to be passed using
|
||||
* inheritance. That is, the argument class derives from struct ttm_mem_shrink,
|
||||
* and can be accessed using container_of().
|
||||
*/
|
||||
|
||||
struct ttm_mem_shrink {
|
||||
int (*do_shrink) (struct ttm_mem_shrink *);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ttm_mem_global - Global memory accounting structure.
|
||||
*
|
||||
|
@ -76,7 +62,7 @@ struct ttm_mem_shrink {
|
|||
struct ttm_mem_zone;
|
||||
struct ttm_mem_global {
|
||||
struct kobject kobj;
|
||||
struct ttm_mem_shrink *shrink;
|
||||
struct ttm_bo_global *bo_glob;
|
||||
struct workqueue_struct *swap_queue;
|
||||
struct work_struct work;
|
||||
spinlock_t lock;
|
||||
|
@ -90,59 +76,6 @@ struct ttm_mem_global {
|
|||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* ttm_mem_init_shrink - initialize a struct ttm_mem_shrink object
|
||||
*
|
||||
* @shrink: The object to initialize.
|
||||
* @func: The callback function.
|
||||
*/
|
||||
|
||||
static inline void ttm_mem_init_shrink(struct ttm_mem_shrink *shrink,
|
||||
int (*func) (struct ttm_mem_shrink *))
|
||||
{
|
||||
shrink->do_shrink = func;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_mem_register_shrink - register a struct ttm_mem_shrink object.
|
||||
*
|
||||
* @glob: The struct ttm_mem_global object to register with.
|
||||
* @shrink: An initialized struct ttm_mem_shrink object to register.
|
||||
*
|
||||
* Returns:
|
||||
* -EBUSY: There's already a callback registered. (May change).
|
||||
*/
|
||||
|
||||
static inline int ttm_mem_register_shrink(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_shrink *shrink)
|
||||
{
|
||||
spin_lock(&glob->lock);
|
||||
if (glob->shrink != NULL) {
|
||||
spin_unlock(&glob->lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
glob->shrink = shrink;
|
||||
spin_unlock(&glob->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_mem_unregister_shrink - unregister a struct ttm_mem_shrink object.
|
||||
*
|
||||
* @glob: The struct ttm_mem_global object to unregister from.
|
||||
* @shrink: A previously registert struct ttm_mem_shrink object.
|
||||
*
|
||||
*/
|
||||
|
||||
static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
|
||||
struct ttm_mem_shrink *shrink)
|
||||
{
|
||||
spin_lock(&glob->lock);
|
||||
BUG_ON(glob->shrink != shrink);
|
||||
glob->shrink = NULL;
|
||||
spin_unlock(&glob->lock);
|
||||
}
|
||||
|
||||
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
|
||||
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
|
||||
extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
|
||||
|
|
Loading…
Reference in New Issue