2007-05-08 15:37:37 +08:00
|
|
|
/*
|
|
|
|
* linux/drivers/video/fb_defio.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Jaya Kumar
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
2008-03-20 08:01:10 +08:00
|
|
|
* License. See the file COPYING in the main directory of this archive
|
2007-05-08 15:37:37 +08:00
|
|
|
* for more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/fb.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
|
|
|
|
/* to support deferred IO */
|
|
|
|
#include <linux/rmap.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
|
2012-05-03 08:23:40 +08:00
|
|
|
static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
|
2008-12-19 14:34:32 +08:00
|
|
|
{
|
|
|
|
void *screen_base = (void __force *) info->screen_base;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (is_vmalloc_addr(screen_base + offs))
|
|
|
|
page = vmalloc_to_page(screen_base + offs);
|
|
|
|
else
|
|
|
|
page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
|
|
|
|
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2007-05-08 15:37:37 +08:00
|
|
|
/* this is to find and return the vmalloc-ed fb pages */
|
2018-04-25 00:11:21 +08:00
|
|
|
static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
|
2007-05-08 15:37:37 +08:00
|
|
|
{
|
|
|
|
unsigned long offset;
|
|
|
|
struct page *page;
|
2017-02-25 06:56:41 +08:00
|
|
|
struct fb_info *info = vmf->vma->vm_private_data;
|
2007-05-08 15:37:37 +08:00
|
|
|
|
2008-02-06 17:39:10 +08:00
|
|
|
offset = vmf->pgoff << PAGE_SHIFT;
|
2007-05-08 15:37:37 +08:00
|
|
|
if (offset >= info->fix.smem_len)
|
2008-02-06 17:39:10 +08:00
|
|
|
return VM_FAULT_SIGBUS;
|
2007-05-08 15:37:37 +08:00
|
|
|
|
2008-12-19 14:34:32 +08:00
|
|
|
page = fb_deferred_io_page(info, offset);
|
2007-05-08 15:37:37 +08:00
|
|
|
if (!page)
|
2008-02-06 17:39:10 +08:00
|
|
|
return VM_FAULT_SIGBUS;
|
2007-05-08 15:37:37 +08:00
|
|
|
|
|
|
|
get_page(page);
|
2021-06-01 22:30:30 +08:00
|
|
|
|
|
|
|
if (vmf->vma->vm_file)
|
|
|
|
page->mapping = vmf->vma->vm_file->f_mapping;
|
|
|
|
else
|
|
|
|
printk(KERN_ERR "no mapping available\n");
|
|
|
|
|
|
|
|
BUG_ON(!page->mapping);
|
2022-02-11 17:46:39 +08:00
|
|
|
INIT_LIST_HEAD(&page->lru);
|
2008-03-20 08:01:10 +08:00
|
|
|
page->index = vmf->pgoff;
|
|
|
|
|
2008-02-06 17:39:10 +08:00
|
|
|
vmf->page = page;
|
|
|
|
return 0;
|
2007-05-08 15:37:37 +08:00
|
|
|
}
|
|
|
|
|
2011-07-17 08:44:56 +08:00
|
|
|
int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
2007-05-08 15:37:41 +08:00
|
|
|
{
|
|
|
|
struct fb_info *info = file->private_data;
|
2013-01-24 06:07:38 +08:00
|
|
|
struct inode *inode = file_inode(file);
|
2017-07-08 03:20:52 +08:00
|
|
|
int err = file_write_and_wait_range(file, start, end);
|
2011-07-17 08:44:56 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
2007-05-08 15:37:41 +08:00
|
|
|
|
2009-10-16 21:20:49 +08:00
|
|
|
/* Skip if deferred io is compiled-in but disabled on this fbdev */
|
2008-12-19 14:34:09 +08:00
|
|
|
if (!info->fbdefio)
|
|
|
|
return 0;
|
|
|
|
|
2016-01-23 04:40:57 +08:00
|
|
|
inode_lock(inode);
|
2007-05-08 15:37:41 +08:00
|
|
|
/* Kill off the delayed work */
|
2010-12-14 23:21:17 +08:00
|
|
|
cancel_delayed_work_sync(&info->deferred_work);
|
2007-05-08 15:37:41 +08:00
|
|
|
|
|
|
|
/* Run it immediately */
|
2014-12-19 19:55:41 +08:00
|
|
|
schedule_delayed_work(&info->deferred_work, 0);
|
2016-01-23 04:40:57 +08:00
|
|
|
inode_unlock(inode);
|
2014-12-19 19:55:41 +08:00
|
|
|
|
|
|
|
return 0;
|
2007-05-08 15:37:41 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
|
|
|
|
|
2007-05-08 15:37:37 +08:00
|
|
|
/* vm_ops->page_mkwrite handler */
|
2018-04-25 00:11:21 +08:00
|
|
|
static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
|
2007-05-08 15:37:37 +08:00
|
|
|
{
|
2009-04-01 06:23:21 +08:00
|
|
|
struct page *page = vmf->page;
|
2017-02-25 06:56:41 +08:00
|
|
|
struct fb_info *info = vmf->vma->vm_private_data;
|
2007-05-08 15:37:37 +08:00
|
|
|
struct fb_deferred_io *fbdefio = info->fbdefio;
|
fbdev: Don't sort deferred-I/O pages by default
Fbdev's deferred I/O sorts all dirty pages by default, which incurs a
significant overhead. Make the sorting step optional and update the few
drivers that require it. Use a FIFO list by default.
Most fbdev drivers with deferred I/O build a bounding rectangle around
the dirty pages or simply flush the whole screen. The only two affected
DRM drivers, generic fbdev and vmwgfx, both use a bounding rectangle.
In those cases, the exact order of the pages doesn't matter. The other
drivers look at the page index or handle pages one-by-one. The patch
sets the sort_pagelist flag for those, even though some of them would
probably work correctly without sorting. Driver maintainers should update
their driver accordingly.
Sorting pages by memory offset for deferred I/O performs an implicit
bubble-sort step on the list of dirty pages. The algorithm goes through
the list of dirty pages and inserts each new page according to its
index field. Even worse, list traversal always starts at the first
entry. As video memory is most likely updated scanline by scanline, the
algorithm traverses through the complete list for each updated page.
For example, with 1024x768x32bpp each page covers exactly one scanline.
Writing a single screen update from top to bottom requires updating
768 pages. With an average list length of 384 entries, a screen update
creates (768 * 384 =) 294912 compare operation.
Fix this by making the sorting step opt-in and update the few drivers
that require it. All other drivers work with unsorted page lists. Pages
are appended to the list. Therefore, in the common case of writing the
framebuffer top to bottom, pages are still sorted by offset, which may
have a positive effect on performance.
Playing a video [1] in mplayer's benchmark mode shows the difference
(i7-4790, FullHD, simpledrm, kernel with debugging).
mplayer -benchmark -nosound -vo fbdev ./big_buck_bunny_720p_stereo.ogg
With sorted page lists:
BENCHMARKs: VC: 32.960s VO: 73.068s A: 0.000s Sys: 2.413s = 108.441s
BENCHMARK%: VC: 30.3947% VO: 67.3802% A: 0.0000% Sys: 2.2251% = 100.0000%
With unsorted page lists:
BENCHMARKs: VC: 31.005s VO: 42.889s A: 0.000s Sys: 2.256s = 76.150s
BENCHMARK%: VC: 40.7156% VO: 56.3219% A: 0.0000% Sys: 2.9625% = 100.0000%
VC shows the overhead of video decoding, VO shows the overhead of the
video output. Using unsorted page lists reduces the benchmark's run time
by ~32s/~25%.
v2:
* Make sorted pagelists the special case (Sam)
* Comment on drivers' use of pagelist (Sam)
* Warn about the overhead in comment
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Sam Ravnborg <sam@ravnborg.org>
Acked-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Link: https://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_stereo.ogg # [1]
Link: https://patchwork.freedesktop.org/patch/msgid/20220211094640.21632-3-tzimmermann@suse.de
2022-02-11 17:46:40 +08:00
|
|
|
struct list_head *pos = &fbdefio->pagelist;
|
2007-05-08 15:37:37 +08:00
|
|
|
|
|
|
|
/* this is a callback we get when userspace first tries to
|
|
|
|
write to the page. we schedule a workqueue. that workqueue
|
|
|
|
will eventually mkclean the touched pages and execute the
|
|
|
|
deferred framebuffer IO. then if userspace touches a page
|
|
|
|
again, we repeat the same scheme */
|
|
|
|
|
2017-02-25 06:56:41 +08:00
|
|
|
file_update_time(vmf->vma->vm_file);
|
2012-06-12 22:20:22 +08:00
|
|
|
|
2007-05-08 15:37:37 +08:00
|
|
|
/* protect against the workqueue changing the page list */
|
|
|
|
mutex_lock(&fbdefio->lock);
|
2008-07-13 04:47:51 +08:00
|
|
|
|
2012-04-28 18:19:10 +08:00
|
|
|
/* first write in this cycle, notify the driver */
|
|
|
|
if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
|
|
|
|
fbdefio->first_io(info);
|
|
|
|
|
2010-06-05 05:14:57 +08:00
|
|
|
/*
|
|
|
|
* We want the page to remain locked from ->page_mkwrite until
|
|
|
|
* the PTE is marked dirty to avoid page_mkclean() being called
|
|
|
|
* before the PTE is updated, which would leave the page ignored
|
|
|
|
* by defio.
|
|
|
|
* Do this by locking the page here and informing the caller
|
|
|
|
* about it with VM_FAULT_LOCKED.
|
|
|
|
*/
|
|
|
|
lock_page(page);
|
|
|
|
|
2022-02-11 17:46:39 +08:00
|
|
|
/*
|
|
|
|
* This check is to catch the case where a new process could start
|
|
|
|
* writing to the same page through a new PTE. This new access
|
|
|
|
* can cause a call to .page_mkwrite even if the original process'
|
|
|
|
* PTE is marked writable.
|
|
|
|
*
|
|
|
|
* TODO: The lru field is owned by the page cache; hence the name.
|
|
|
|
* We dequeue in fb_deferred_io_work() after flushing the
|
|
|
|
* page's content into video memory. Instead of lru, fbdefio
|
|
|
|
* should have it's own field.
|
|
|
|
*/
|
|
|
|
if (!list_empty(&page->lru))
|
|
|
|
goto page_already_added;
|
|
|
|
|
fbdev: Don't sort deferred-I/O pages by default
Fbdev's deferred I/O sorts all dirty pages by default, which incurs a
significant overhead. Make the sorting step optional and update the few
drivers that require it. Use a FIFO list by default.
Most fbdev drivers with deferred I/O build a bounding rectangle around
the dirty pages or simply flush the whole screen. The only two affected
DRM drivers, generic fbdev and vmwgfx, both use a bounding rectangle.
In those cases, the exact order of the pages doesn't matter. The other
drivers look at the page index or handle pages one-by-one. The patch
sets the sort_pagelist flag for those, even though some of them would
probably work correctly without sorting. Driver maintainers should update
their driver accordingly.
Sorting pages by memory offset for deferred I/O performs an implicit
bubble-sort step on the list of dirty pages. The algorithm goes through
the list of dirty pages and inserts each new page according to its
index field. Even worse, list traversal always starts at the first
entry. As video memory is most likely updated scanline by scanline, the
algorithm traverses through the complete list for each updated page.
For example, with 1024x768x32bpp each page covers exactly one scanline.
Writing a single screen update from top to bottom requires updating
768 pages. With an average list length of 384 entries, a screen update
creates (768 * 384 =) 294912 compare operation.
Fix this by making the sorting step opt-in and update the few drivers
that require it. All other drivers work with unsorted page lists. Pages
are appended to the list. Therefore, in the common case of writing the
framebuffer top to bottom, pages are still sorted by offset, which may
have a positive effect on performance.
Playing a video [1] in mplayer's benchmark mode shows the difference
(i7-4790, FullHD, simpledrm, kernel with debugging).
mplayer -benchmark -nosound -vo fbdev ./big_buck_bunny_720p_stereo.ogg
With sorted page lists:
BENCHMARKs: VC: 32.960s VO: 73.068s A: 0.000s Sys: 2.413s = 108.441s
BENCHMARK%: VC: 30.3947% VO: 67.3802% A: 0.0000% Sys: 2.2251% = 100.0000%
With unsorted page lists:
BENCHMARKs: VC: 31.005s VO: 42.889s A: 0.000s Sys: 2.256s = 76.150s
BENCHMARK%: VC: 40.7156% VO: 56.3219% A: 0.0000% Sys: 2.9625% = 100.0000%
VC shows the overhead of video decoding, VO shows the overhead of the
video output. Using unsorted page lists reduces the benchmark's run time
by ~32s/~25%.
v2:
* Make sorted pagelists the special case (Sam)
* Comment on drivers' use of pagelist (Sam)
* Warn about the overhead in comment
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Sam Ravnborg <sam@ravnborg.org>
Acked-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Link: https://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_stereo.ogg # [1]
Link: https://patchwork.freedesktop.org/patch/msgid/20220211094640.21632-3-tzimmermann@suse.de
2022-02-11 17:46:40 +08:00
|
|
|
if (unlikely(fbdefio->sort_pagelist)) {
|
|
|
|
/*
|
|
|
|
* We loop through the pagelist before adding in order to
|
|
|
|
* keep the pagelist sorted. This has significant overhead
|
|
|
|
* of O(n^2) with n being the number of written pages. If
|
|
|
|
* possible, drivers should try to work with unsorted page
|
|
|
|
* lists instead.
|
|
|
|
*/
|
|
|
|
struct page *cur;
|
|
|
|
|
|
|
|
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
|
|
|
|
if (cur->index > page->index)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
pos = &cur->lru;
|
2008-07-13 04:47:51 +08:00
|
|
|
}
|
|
|
|
|
fbdev: Don't sort deferred-I/O pages by default
Fbdev's deferred I/O sorts all dirty pages by default, which incurs a
significant overhead. Make the sorting step optional and update the few
drivers that require it. Use a FIFO list by default.
Most fbdev drivers with deferred I/O build a bounding rectangle around
the dirty pages or simply flush the whole screen. The only two affected
DRM drivers, generic fbdev and vmwgfx, both use a bounding rectangle.
In those cases, the exact order of the pages doesn't matter. The other
drivers look at the page index or handle pages one-by-one. The patch
sets the sort_pagelist flag for those, even though some of them would
probably work correctly without sorting. Driver maintainers should update
their driver accordingly.
Sorting pages by memory offset for deferred I/O performs an implicit
bubble-sort step on the list of dirty pages. The algorithm goes through
the list of dirty pages and inserts each new page according to its
index field. Even worse, list traversal always starts at the first
entry. As video memory is most likely updated scanline by scanline, the
algorithm traverses through the complete list for each updated page.
For example, with 1024x768x32bpp each page covers exactly one scanline.
Writing a single screen update from top to bottom requires updating
768 pages. With an average list length of 384 entries, a screen update
creates (768 * 384 =) 294912 compare operation.
Fix this by making the sorting step opt-in and update the few drivers
that require it. All other drivers work with unsorted page lists. Pages
are appended to the list. Therefore, in the common case of writing the
framebuffer top to bottom, pages are still sorted by offset, which may
have a positive effect on performance.
Playing a video [1] in mplayer's benchmark mode shows the difference
(i7-4790, FullHD, simpledrm, kernel with debugging).
mplayer -benchmark -nosound -vo fbdev ./big_buck_bunny_720p_stereo.ogg
With sorted page lists:
BENCHMARKs: VC: 32.960s VO: 73.068s A: 0.000s Sys: 2.413s = 108.441s
BENCHMARK%: VC: 30.3947% VO: 67.3802% A: 0.0000% Sys: 2.2251% = 100.0000%
With unsorted page lists:
BENCHMARKs: VC: 31.005s VO: 42.889s A: 0.000s Sys: 2.256s = 76.150s
BENCHMARK%: VC: 40.7156% VO: 56.3219% A: 0.0000% Sys: 2.9625% = 100.0000%
VC shows the overhead of video decoding, VO shows the overhead of the
video output. Using unsorted page lists reduces the benchmark's run time
by ~32s/~25%.
v2:
* Make sorted pagelists the special case (Sam)
* Comment on drivers' use of pagelist (Sam)
* Warn about the overhead in comment
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Sam Ravnborg <sam@ravnborg.org>
Acked-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Link: https://download.blender.org/peach/bigbuckbunny_movies/big_buck_bunny_720p_stereo.ogg # [1]
Link: https://patchwork.freedesktop.org/patch/msgid/20220211094640.21632-3-tzimmermann@suse.de
2022-02-11 17:46:40 +08:00
|
|
|
list_add_tail(&page->lru, pos);
|
2008-07-13 04:47:51 +08:00
|
|
|
|
|
|
|
page_already_added:
|
2007-05-08 15:37:37 +08:00
|
|
|
mutex_unlock(&fbdefio->lock);
|
|
|
|
|
|
|
|
/* come back after delay to process the deferred IO */
|
|
|
|
schedule_delayed_work(&info->deferred_work, fbdefio->delay);
|
2010-06-05 05:14:57 +08:00
|
|
|
return VM_FAULT_LOCKED;
|
2007-05-08 15:37:37 +08:00
|
|
|
}
|
|
|
|
|
2009-09-28 02:29:37 +08:00
|
|
|
static const struct vm_operations_struct fb_deferred_io_vm_ops = {
|
2008-02-06 17:39:10 +08:00
|
|
|
.fault = fb_deferred_io_fault,
|
2007-05-08 15:37:37 +08:00
|
|
|
.page_mkwrite = fb_deferred_io_mkwrite,
|
|
|
|
};
|
|
|
|
|
2021-06-01 22:30:30 +08:00
|
|
|
static const struct address_space_operations fb_deferred_io_aops = {
|
2022-02-10 04:22:14 +08:00
|
|
|
.dirty_folio = noop_dirty_folio,
|
2021-06-01 22:30:30 +08:00
|
|
|
};
|
|
|
|
|
2016-04-28 23:18:34 +08:00
|
|
|
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
2007-05-08 15:37:37 +08:00
|
|
|
{
|
|
|
|
vma->vm_ops = &fb_deferred_io_vm_ops;
|
mm: kill vma flag VM_RESERVED and mm->reserved_vm counter
A long time ago, in v2.4, VM_RESERVED kept swapout process off VMA,
currently it lost original meaning but still has some effects:
| effect | alternative flags
-+------------------------+---------------------------------------------
1| account as reserved_vm | VM_IO
2| skip in core dump | VM_IO, VM_DONTDUMP
3| do not merge or expand | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP
4| do not mlock | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP
This patch removes reserved_vm counter from mm_struct. Seems like nobody
cares about it, it does not exported into userspace directly, it only
reduces total_vm showed in proc.
Thus VM_RESERVED can be replaced with VM_IO or pair VM_DONTEXPAND | VM_DONTDUMP.
remap_pfn_range() and io_remap_pfn_range() set VM_IO|VM_DONTEXPAND|VM_DONTDUMP.
remap_vmalloc_range() set VM_DONTEXPAND | VM_DONTDUMP.
[akpm@linux-foundation.org: drivers/vfio/pci/vfio_pci.c fixup]
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Eric Paris <eparis@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Kentaro Takeda <takedakn@nttdata.co.jp>
Cc: Matt Helsley <matthltc@us.ibm.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Venkatesh Pallipadi <venki@google.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 07:29:02 +08:00
|
|
|
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
2009-12-03 23:31:56 +08:00
|
|
|
if (!(info->flags & FBINFO_VIRTFB))
|
|
|
|
vma->vm_flags |= VM_IO;
|
2007-05-08 15:37:37 +08:00
|
|
|
vma->vm_private_data = info;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* workqueue callback */
|
|
|
|
static void fb_deferred_io_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct fb_info *info = container_of(work, struct fb_info,
|
|
|
|
deferred_work.work);
|
2010-06-05 05:14:56 +08:00
|
|
|
struct list_head *node, *next;
|
|
|
|
struct page *cur;
|
2007-05-08 15:37:37 +08:00
|
|
|
struct fb_deferred_io *fbdefio = info->fbdefio;
|
|
|
|
|
|
|
|
/* here we mkclean the pages, then do all deferred IO */
|
|
|
|
mutex_lock(&fbdefio->lock);
|
2010-06-05 05:14:56 +08:00
|
|
|
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
|
|
|
|
lock_page(cur);
|
|
|
|
page_mkclean(cur);
|
|
|
|
unlock_page(cur);
|
2007-05-08 15:37:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* driver's callback with pagelist */
|
|
|
|
fbdefio->deferred_io(info, &fbdefio->pagelist);
|
|
|
|
|
2010-06-05 05:14:56 +08:00
|
|
|
/* clear the list */
|
|
|
|
list_for_each_safe(node, next, &fbdefio->pagelist) {
|
2022-02-11 17:46:39 +08:00
|
|
|
list_del_init(node);
|
2007-05-08 15:37:37 +08:00
|
|
|
}
|
|
|
|
mutex_unlock(&fbdefio->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void fb_deferred_io_init(struct fb_info *info)
|
|
|
|
{
|
|
|
|
struct fb_deferred_io *fbdefio = info->fbdefio;
|
|
|
|
|
|
|
|
BUG_ON(!fbdefio);
|
|
|
|
mutex_init(&fbdefio->lock);
|
|
|
|
INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
|
|
|
|
INIT_LIST_HEAD(&fbdefio->pagelist);
|
|
|
|
if (fbdefio->delay == 0) /* set a default of 1 s */
|
|
|
|
fbdefio->delay = HZ;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fb_deferred_io_init);
|
|
|
|
|
2021-06-01 22:30:30 +08:00
|
|
|
void fb_deferred_io_open(struct fb_info *info,
|
|
|
|
struct inode *inode,
|
|
|
|
struct file *file)
|
|
|
|
{
|
|
|
|
file->f_mapping->a_ops = &fb_deferred_io_aops;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
|
|
|
|
|
2007-05-08 15:37:37 +08:00
|
|
|
void fb_deferred_io_cleanup(struct fb_info *info)
|
|
|
|
{
|
|
|
|
struct fb_deferred_io *fbdefio = info->fbdefio;
|
2021-06-01 22:30:30 +08:00
|
|
|
struct page *page;
|
|
|
|
int i;
|
2007-05-08 15:37:37 +08:00
|
|
|
|
|
|
|
BUG_ON(!fbdefio);
|
2011-06-15 22:57:21 +08:00
|
|
|
cancel_delayed_work_sync(&info->deferred_work);
|
2021-06-01 22:30:30 +08:00
|
|
|
|
|
|
|
/* clear out the mapping that we setup */
|
|
|
|
for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
|
|
|
|
page = fb_deferred_io_page(info, i);
|
|
|
|
page->mapping = NULL;
|
|
|
|
}
|
|
|
|
|
2008-12-19 14:34:23 +08:00
|
|
|
mutex_destroy(&fbdefio->lock);
|
2007-05-08 15:37:37 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
|