drm/nouveau/kms: Check framebuffer size against bo

Make sure framebuffer dimensions and tiling
parameters will not result in accesses beyond the
end of the GEM buffer they are bound to.

v3: Return EINVAL when creating FB against BO with
    unsupported tiling
v5: Resolved against nouveau_framebuffer cleanup

Signed-off-by: James Jones <jajones@nvidia.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
James Jones 2020-02-10 15:15:54 -08:00 committed by Ben Skeggs
parent c586f30bf7
commit 4f5746c863
1 changed files with 98 additions and 0 deletions

View File

@ -185,6 +185,76 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
.create_handle = drm_gem_fb_create_handle,
};
static inline uint32_t
nouveau_get_width_in_blocks(uint32_t stride)
{
/* GOBs per block in the x direction is always one, and GOBs are
* 64 bytes wide
*/
static const uint32_t log_block_width = 6;
return (stride + (1 << log_block_width) - 1) >> log_block_width;
}
static inline uint32_t
nouveau_get_height_in_blocks(struct nouveau_drm *drm,
uint32_t height,
uint32_t log_block_height_in_gobs)
{
uint32_t log_gob_height;
uint32_t log_block_height;
BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
log_gob_height = 2;
else
log_gob_height = 3;
log_block_height = log_block_height_in_gobs + log_gob_height;
return (height + (1 << log_block_height) - 1) >> log_block_height;
}
static int
nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
uint32_t offset, uint32_t stride, uint32_t h,
uint32_t tile_mode)
{
uint32_t gob_size, bw, bh;
uint64_t bl_size;
BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
if (drm->client.device.info.chipset >= 0xc0) {
if (tile_mode & 0xF)
return -EINVAL;
tile_mode >>= 4;
}
if (tile_mode & 0xFFFFFFF0)
return -EINVAL;
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
gob_size = 256;
else
gob_size = 512;
bw = nouveau_get_width_in_blocks(stride);
bh = nouveau_get_height_in_blocks(drm, h, tile_mode);
bl_size = bw * bh * (1 << tile_mode) * gob_size;
DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
nvbo->bo.mem.size);
if (bl_size + offset > nvbo->bo.mem.size)
return -ERANGE;
return 0;
}
int
nouveau_framebuffer_new(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
@ -192,7 +262,10 @@ nouveau_framebuffer_new(struct drm_device *dev,
struct drm_framebuffer **pfb)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct drm_framebuffer *fb;
const struct drm_format_info *info;
unsigned int width, height, i;
int ret;
/* YUV overlays have special requirements pre-NV50 */
@ -215,6 +288,31 @@ nouveau_framebuffer_new(struct drm_device *dev,
return -EINVAL;
}
info = drm_get_format_info(dev, mode_cmd);
for (i = 0; i < info->num_planes; i++) {
width = drm_format_info_plane_width(info,
mode_cmd->width,
i);
height = drm_format_info_plane_height(info,
mode_cmd->height,
i);
if (nvbo->kind) {
ret = nouveau_check_bl_size(drm, nvbo,
mode_cmd->offsets[i],
mode_cmd->pitches[i],
height, nvbo->mode);
if (ret)
return ret;
} else {
uint32_t size = mode_cmd->pitches[i] * height;
if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
return -ERANGE;
}
}
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;