drm/vmwgfx: Make use of PFN_ALIGN/PFN_UP helper macro

it's a refactor to make use of PFN_ALIGN/PFN_UP helper macro

Signed-off-by: Cai Huoqing <caihuoqing@baidu.com>
Signed-off-by: Zack Rusin <zackr@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210802033308.927-1-caihuoqing@baidu.com
This commit is contained in:
Cai Huoqing 2021-08-02 11:33:08 +08:00 committed by Zack Rusin
parent 2bc5da528d
commit bc65754ca6
7 changed files with 9 additions and 14 deletions

View File

@ -405,7 +405,7 @@ static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
bool user)
{
static size_t struct_size, user_struct_size;
size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
size_t num_pages = PFN_UP(size);
size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
if (unlikely(struct_size == 0)) {
@ -474,7 +474,6 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo)
{
unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct ttm_operation_ctx ctx = { false, false };
struct ttm_buffer_object *bo;
size_t acc_size;
@ -485,7 +484,7 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
return -ENOMEM;
acc_size = ttm_round_pot(sizeof(*bo));
acc_size += ttm_round_pot(npages * sizeof(void *));
acc_size += ttm_round_pot(PFN_UP(size) * sizeof(void *));
acc_size += ttm_round_pot(sizeof(struct ttm_tt));
ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);

View File

@ -801,7 +801,7 @@ static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
{
struct vmw_cmdbuf_alloc_info info;
info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
info.page_size = PFN_UP(size);
info.node = node;
info.done = false;

View File

@ -607,8 +607,7 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
if (num_entries < co_info[type].min_initial_entries) {
vcotbl->res.backup_size = co_info[type].min_initial_entries *
co_info[type].size;
vcotbl->res.backup_size =
(vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size);
}
vcotbl->scrubbed = true;

View File

@ -100,7 +100,7 @@ static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
int ret;
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
kmap_num = PFN_UP(width*height*4);
ret = ttm_bo_reserve(&bo->base, true, false, NULL);
if (unlikely(ret != 0)) {

View File

@ -256,8 +256,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
if (!otables[i].enabled)
continue;
otables[i].size =
(otables[i].size + PAGE_SIZE - 1) & PAGE_MASK;
otables[i].size = PFN_ALIGN(otables[i].size);
bo_size += otables[i].size;
}
@ -385,7 +384,7 @@ static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages)
while (likely(data_size > PAGE_SIZE)) {
data_size = DIV_ROUND_UP(data_size, PAGE_SIZE);
data_size *= VMW_PPN_SIZE;
tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK;
tot_size += PFN_ALIGN(data_size);
}
return tot_size >> PAGE_SHIFT;

View File

@ -353,8 +353,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
static int vmw_resource_buf_alloc(struct vmw_resource *res,
bool interruptible)
{
unsigned long size =
(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
unsigned long size = PFN_ALIGN(res->backup_size);
struct vmw_buffer_object *backup;
int ret;

View File

@ -981,8 +981,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
goto no_reserve;
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
&map);
ret = ttm_bo_kmap(&buf->base, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
ttm_bo_unreserve(&buf->base);
goto no_reserve;