drm fixes for 6.2 final
drm: - dynamic debug disable for now fbdev: - deferred i/o device close fix amdgpu: - Fix GC11.x suspend warning - Fix display warning vc4: - YUV planes fix - hdmi display fix - crtc reduced blanking fix ast: - fix start address computation vmwgfx: - fix bo/handle races i915: - gen11 WA fix -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmPu4vMACgkQDHTzWXnE hr5+eg/+LcMV6Mkc2pAWc/M9vjuViXgP6GlkUgqa5qD8oiB54ifyCYHjNYXt3LdQ /s6xxfmD+9UitHAoCmt+j5LL0UYdLh3JGUP8y8FWEsPCigx3gPIInlrU92Y53hs7 9NsO7hGl7/5WJZngt7GO2fr4DG+Di0nZ5PUMYihk2EFl3OpeuhKmoz1Gw6HRDGvj Alydm40SYFfVA0/RqdqybNJg9fFqLO/7NaK42KaFXpgL6uAnL6lHcmxgMXfHNeQD 0QCp6dxQSF1xbrrEgnS63QxObWDcBk8FeMGjLijQR0eoCg5FJrYXcQ7Vk7zgma/c iCQyhd9xx7tdWd3rIf6kuBDKfKl7xFSNflgvjjWc4TwD9WfQuZXNkozO1GuZAPms vsmF4Yj+GIsaKAyvUEr5u9S/zcPJ9s50Bn2QRTLyeKkYUoE7StukMvVqgAxqlJO5 ljrUbzl/EbRFMj/oGFQXi5hpemj2jyzsi5W9vsOgWMTw/gv2QIKIqjDjUVnhZ9Rf 14THVTILfQ6fTPUp5bq+yR5N9QIjr/qrmN/1BZHNTx9ZGMo0cMf7r4EmtPvy0gLi EvdY2MrVPOVyFTWoE7ivY1RXHuh54LObw18uCsEaXRWWVxEEV0RfcpNvK3ukC1az WhaXa2B5gTLWYBHSViexaRuKbQScAgLPDidj8KZhKsPpLTN1cKE= =Dii1 -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2023-02-17' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Just a final collection of misc fixes, the biggest disables the recently added dynamic debugging support, it has a regression that needs some bigger fixes. Otherwise a bunch of fixes across the board, vc4, amdgpu and vmwgfx mostly, with some smaller i915 and ast fixes. drm: - dynamic debug disable for now fbdev: - deferred i/o device close fix amdgpu: - Fix GC11.x suspend warning - Fix display warning vc4: - YUV planes fix - hdmi display fix - crtc reduced blanking fix ast: - fix start address computation vmwgfx: - fix bo/handle races i915: - gen11 WA fix" * tag 'drm-fixes-2023-02-17' of git://anongit.freedesktop.org/drm/drm: drm/amd/display: Fail atomic_check early on normalize_zpos error drm/amd/amdgpu: fix warning during suspend drm/vmwgfx: Do not drop the reference to the handle too soon drm/vmwgfx: Stop accessing buffer objects which failed init drm/i915/gen11: Wa_1408615072/Wa_1407596294 should be on GT list drm: Disable dynamic debug as broken drm/ast: Fix start address computation fbdev: Fix invalid page access after closing deferred I/O devices drm/vc4: crtc: Increase setup cost in core clock calculation to handle extreme reduced blanking drm/vc4: hdmi: Always enable GCP with AVMUTE cleared drm/vc4: Fix YUV plane handling when planes are in different buffers
This commit is contained in:
commit
ec35307e18
|
@ -53,7 +53,8 @@ config DRM_DEBUG_MM
|
|||
|
||||
config DRM_USE_DYNAMIC_DEBUG
|
||||
bool "use dynamic debug to implement drm.debug"
|
||||
default y
|
||||
default n
|
||||
depends on BROKEN
|
||||
depends on DRM
|
||||
depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
|
||||
depends on JUMP_LABEL
|
||||
|
|
|
@ -4268,6 +4268,9 @@ exit:
|
|||
}
|
||||
adev->in_suspend = false;
|
||||
|
||||
if (adev->enable_mes)
|
||||
amdgpu_mes_self_test(adev);
|
||||
|
||||
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
|
||||
DRM_WARN("smart shift update failed\n");
|
||||
|
||||
|
|
|
@ -1344,7 +1344,7 @@ static int mes_v11_0_late_init(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
|
||||
if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
|
||||
if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
|
||||
(adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
|
||||
amdgpu_mes_self_test(adev);
|
||||
|
||||
|
|
|
@ -9658,7 +9658,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
* `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
|
||||
* atomic state, so call drm helper to normalize zpos.
|
||||
*/
|
||||
drm_atomic_normalize_zpos(dev, state);
|
||||
ret = drm_atomic_normalize_zpos(dev, state);
|
||||
if (ret) {
|
||||
drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Remove exiting planes if they are modified */
|
||||
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
|
||||
|
|
|
@ -714,7 +714,7 @@ static int ast_primary_plane_init(struct ast_private *ast)
|
|||
struct ast_plane *ast_primary_plane = &ast->primary_plane;
|
||||
struct drm_plane *primary_plane = &ast_primary_plane->base;
|
||||
void __iomem *vaddr = ast->vram;
|
||||
u64 offset = ast->vram_base;
|
||||
u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
|
||||
unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
|
||||
unsigned long size = ast->vram_fb_available - cursor_size;
|
||||
int ret;
|
||||
|
@ -972,7 +972,7 @@ static int ast_cursor_plane_init(struct ast_private *ast)
|
|||
return -ENOMEM;
|
||||
|
||||
vaddr = ast->vram + ast->vram_fb_available - size;
|
||||
offset = ast->vram_base + ast->vram_fb_available - size;
|
||||
offset = ast->vram_fb_available - size;
|
||||
|
||||
ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
|
||||
0x01, &ast_cursor_plane_funcs,
|
||||
|
|
|
@ -1355,6 +1355,13 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
|
|||
GAMT_CHKN_BIT_REG,
|
||||
GAMT_CHKN_DISABLE_L3_COH_PIPE);
|
||||
|
||||
/*
|
||||
* Wa_1408615072:icl,ehl (vsunit)
|
||||
* Wa_1407596294:icl,ehl (hsunit)
|
||||
*/
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
|
||||
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
|
||||
|
||||
/* Wa_1407352427:icl,ehl */
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
|
||||
PSDUNIT_CLKGATE_DIS);
|
||||
|
@ -2539,13 +2546,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
|||
wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
|
||||
GEN11_ENABLE_32_PLANE_MODE);
|
||||
|
||||
/*
|
||||
* Wa_1408615072:icl,ehl (vsunit)
|
||||
* Wa_1407596294:icl,ehl (hsunit)
|
||||
*/
|
||||
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
|
||||
VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
|
||||
|
||||
/*
|
||||
* Wa_1408767742:icl[a2..forever],ehl[all]
|
||||
* Wa_1605460711:icl[a0..c0]
|
||||
|
|
|
@ -711,7 +711,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
|
|||
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
|
||||
|
||||
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
|
||||
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
|
||||
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 8000,
|
||||
mode->clock * 9 / 10) * 1000;
|
||||
} else {
|
||||
vc4_state->hvs_load = mode->clock * 1000;
|
||||
|
|
|
@ -97,6 +97,10 @@
|
|||
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8
|
||||
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8)
|
||||
|
||||
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK VC4_MASK(7, 0)
|
||||
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_SET_AVMUTE BIT(0)
|
||||
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE BIT(4)
|
||||
|
||||
# define VC4_HD_M_SW_RST BIT(2)
|
||||
# define VC4_HD_M_ENABLE BIT(0)
|
||||
|
||||
|
@ -1306,7 +1310,6 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
|
|||
VC4_HDMI_VERTB_VBP));
|
||||
unsigned long flags;
|
||||
unsigned char gcp;
|
||||
bool gcp_en;
|
||||
u32 reg;
|
||||
int idx;
|
||||
|
||||
|
@ -1341,16 +1344,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
|
|||
switch (vc4_state->output_bpc) {
|
||||
case 12:
|
||||
gcp = 6;
|
||||
gcp_en = true;
|
||||
break;
|
||||
case 10:
|
||||
gcp = 5;
|
||||
gcp_en = true;
|
||||
break;
|
||||
case 8:
|
||||
default:
|
||||
gcp = 4;
|
||||
gcp_en = false;
|
||||
gcp = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1359,8 +1359,7 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
|
|||
* doesn't signal in GCP.
|
||||
*/
|
||||
if (vc4_state->output_format == VC4_HDMI_OUTPUT_YUV422) {
|
||||
gcp = 4;
|
||||
gcp_en = false;
|
||||
gcp = 0;
|
||||
}
|
||||
|
||||
reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
|
||||
|
@ -1373,11 +1372,12 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
|
|||
reg = HDMI_READ(HDMI_GCP_WORD_1);
|
||||
reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
|
||||
reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
|
||||
reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK;
|
||||
reg |= VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE;
|
||||
HDMI_WRITE(HDMI_GCP_WORD_1, reg);
|
||||
|
||||
reg = HDMI_READ(HDMI_GCP_CONFIG);
|
||||
reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
|
||||
reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
|
||||
reg |= VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
|
||||
HDMI_WRITE(HDMI_GCP_CONFIG, reg);
|
||||
|
||||
reg = HDMI_READ(HDMI_MISC_CONTROL);
|
||||
|
|
|
@ -340,7 +340,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
|
|||
{
|
||||
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
|
||||
struct drm_framebuffer *fb = state->fb;
|
||||
struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
|
||||
struct drm_gem_dma_object *bo;
|
||||
int num_planes = fb->format->num_planes;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
u32 h_subsample = fb->format->hsub;
|
||||
|
@ -359,8 +359,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < num_planes; i++)
|
||||
for (i = 0; i < num_planes; i++) {
|
||||
bo = drm_fb_dma_get_gem_obj(fb, i);
|
||||
vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't support subpixel source positioning for scaling,
|
||||
|
|
|
@ -462,6 +462,9 @@ int vmw_bo_create(struct vmw_private *vmw,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* vmw_bo_init will delete the *p_bo object if it fails
|
||||
*/
|
||||
ret = vmw_bo_init(vmw, *p_bo, size,
|
||||
placement, interruptible, pin,
|
||||
bo_free);
|
||||
|
@ -470,7 +473,6 @@ int vmw_bo_create(struct vmw_private *vmw,
|
|||
|
||||
return ret;
|
||||
out_error:
|
||||
kfree(*p_bo);
|
||||
*p_bo = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
@ -596,6 +598,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
|
|||
ttm_bo_put(&vmw_bo->base);
|
||||
}
|
||||
|
||||
drm_gem_object_put(&vmw_bo->base.base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -636,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
|
||||
vmw_bo_unreference(&vbo);
|
||||
drm_gem_object_put(&vbo->base.base);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret == -ERESTARTSYS || ret == -EBUSY)
|
||||
return -EBUSY;
|
||||
|
@ -693,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
|
|||
* struct vmw_buffer_object should be placed.
|
||||
* Return: Zero on success, Negative error code on error.
|
||||
*
|
||||
* The vmw buffer object pointer will be refcounted.
|
||||
* The vmw buffer object pointer will be refcounted (both ttm and gem)
|
||||
*/
|
||||
int vmw_user_bo_lookup(struct drm_file *filp,
|
||||
uint32_t handle,
|
||||
|
@ -710,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
|
|||
|
||||
*out = gem_to_vmw_bo(gobj);
|
||||
ttm_bo_get(&(*out)->base);
|
||||
drm_gem_object_put(gobj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -791,7 +794,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
|
|||
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
|
||||
args->size, &args->handle,
|
||||
&vbo);
|
||||
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_put(&vbo->base.base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
|
|||
}
|
||||
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
|
||||
ttm_bo_put(&vmw_bo->base);
|
||||
drm_gem_object_put(&vmw_bo->base.base);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
|
|||
}
|
||||
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
|
||||
ttm_bo_put(&vmw_bo->base);
|
||||
drm_gem_object_put(&vmw_bo->base.base);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -146,14 +146,12 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
|
|||
&vmw_sys_placement :
|
||||
&vmw_vram_sys_placement,
|
||||
true, false, &vmw_gem_destroy, p_vbo);
|
||||
|
||||
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
|
||||
if (ret != 0)
|
||||
goto out_no_bo;
|
||||
|
||||
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
|
||||
|
||||
ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_put(&(*p_vbo)->base.base);
|
||||
out_no_bo:
|
||||
return ret;
|
||||
}
|
||||
|
@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
|
|||
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
|
||||
rep->cur_gmr_id = handle;
|
||||
rep->cur_gmr_offset = 0;
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_put(&vbo->base.base);
|
||||
out_no_bo:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1815,8 +1815,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
|||
|
||||
err_out:
|
||||
/* vmw_user_lookup_handle takes one ref so does new_fb */
|
||||
if (bo)
|
||||
if (bo) {
|
||||
vmw_bo_unreference(&bo);
|
||||
drm_gem_object_put(&bo->base.base);
|
||||
}
|
||||
if (surface)
|
||||
vmw_surface_unreference(&surface);
|
||||
|
||||
|
|
|
@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
|||
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
|
||||
|
||||
vmw_bo_unreference(&buf);
|
||||
drm_gem_object_put(&buf->base.base);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
|
|
@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
|
|||
num_output_sig, tfile, shader_handle);
|
||||
out_bad_arg:
|
||||
vmw_bo_unreference(&buffer);
|
||||
drm_gem_object_put(&buffer->base.base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -683,7 +683,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
|
|||
container_of(base, struct vmw_user_surface, prime.base);
|
||||
struct vmw_resource *res = &user_srf->srf.res;
|
||||
|
||||
if (base->shareable && res && res->backup)
|
||||
if (res && res->backup)
|
||||
drm_gem_object_put(&res->backup->base.base);
|
||||
|
||||
*p_base = NULL;
|
||||
|
@ -864,7 +864,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
goto out_unlock;
|
||||
}
|
||||
vmw_bo_reference(res->backup);
|
||||
drm_gem_object_get(&res->backup->base.base);
|
||||
/*
|
||||
* We don't expose the handle to the userspace and surface
|
||||
* already holds a gem reference
|
||||
*/
|
||||
drm_gem_handle_delete(file_priv, backup_handle);
|
||||
}
|
||||
|
||||
tmp = vmw_resource_reference(&srf->res);
|
||||
|
@ -1568,8 +1572,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
|
|||
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
|
||||
rep->buffer_size = res->backup->base.base.size;
|
||||
rep->buffer_handle = backup_handle;
|
||||
if (user_srf->prime.base.shareable)
|
||||
drm_gem_object_get(&res->backup->base.base);
|
||||
} else {
|
||||
rep->buffer_map_handle = 0;
|
||||
rep->buffer_size = 0;
|
||||
|
|
|
@ -313,7 +313,7 @@ void fb_deferred_io_open(struct fb_info *info,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
|
||||
|
||||
void fb_deferred_io_cleanup(struct fb_info *info)
|
||||
void fb_deferred_io_release(struct fb_info *info)
|
||||
{
|
||||
struct fb_deferred_io *fbdefio = info->fbdefio;
|
||||
struct page *page;
|
||||
|
@ -327,6 +327,14 @@ void fb_deferred_io_cleanup(struct fb_info *info)
|
|||
page = fb_deferred_io_page(info, i);
|
||||
page->mapping = NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fb_deferred_io_release);
|
||||
|
||||
void fb_deferred_io_cleanup(struct fb_info *info)
|
||||
{
|
||||
struct fb_deferred_io *fbdefio = info->fbdefio;
|
||||
|
||||
fb_deferred_io_release(info);
|
||||
|
||||
kvfree(info->pagerefs);
|
||||
mutex_destroy(&fbdefio->lock);
|
||||
|
|
|
@ -1454,6 +1454,10 @@ __releases(&info->lock)
|
|||
struct fb_info * const info = file->private_data;
|
||||
|
||||
lock_fb_info(info);
|
||||
#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
|
||||
if (info->fbdefio)
|
||||
fb_deferred_io_release(info);
|
||||
#endif
|
||||
if (info->fbops->fb_release)
|
||||
info->fbops->fb_release(info,1);
|
||||
module_put(info->fbops->owner);
|
||||
|
|
|
@ -662,6 +662,7 @@ extern int fb_deferred_io_init(struct fb_info *info);
|
|||
extern void fb_deferred_io_open(struct fb_info *info,
|
||||
struct inode *inode,
|
||||
struct file *file);
|
||||
extern void fb_deferred_io_release(struct fb_info *info);
|
||||
extern void fb_deferred_io_cleanup(struct fb_info *info);
|
||||
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
|
||||
loff_t end, int datasync);
|
||||
|
|
Loading…
Reference in New Issue