i915: convert shmem_sg_free_table() to use a folio_batch
Remove a few hidden compound_head() calls by converting the returned page to a folio once and using the folio APIs. We also only increment the refcount on the folio once instead of once for each page. Ideally, we would have a for_each_sgt_folio macro, but until then this will do. Link: https://lkml.kernel.org/r/20230621164557.3510324-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
bdadc6d831
commit
0b62af28f2
|
@ -19,13 +19,13 @@
|
||||||
#include "i915_trace.h"
|
#include "i915_trace.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Move pages to appropriate lru and release the pagevec, decrementing the
|
* Move folios to appropriate lru and release the batch, decrementing the
|
||||||
* ref count of those pages.
|
* ref count of those folios.
|
||||||
*/
|
*/
|
||||||
static void check_release_pagevec(struct pagevec *pvec)
|
static void check_release_folio_batch(struct folio_batch *fbatch)
|
||||||
{
|
{
|
||||||
check_move_unevictable_pages(pvec);
|
check_move_unevictable_folios(fbatch);
|
||||||
__pagevec_release(pvec);
|
__folio_batch_release(fbatch);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,24 +33,29 @@ void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping,
|
||||||
bool dirty, bool backup)
|
bool dirty, bool backup)
|
||||||
{
|
{
|
||||||
struct sgt_iter sgt_iter;
|
struct sgt_iter sgt_iter;
|
||||||
struct pagevec pvec;
|
struct folio_batch fbatch;
|
||||||
|
struct folio *last = NULL;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
mapping_clear_unevictable(mapping);
|
mapping_clear_unevictable(mapping);
|
||||||
|
|
||||||
pagevec_init(&pvec);
|
folio_batch_init(&fbatch);
|
||||||
for_each_sgt_page(page, sgt_iter, st) {
|
for_each_sgt_page(page, sgt_iter, st) {
|
||||||
|
struct folio *folio = page_folio(page);
|
||||||
|
|
||||||
|
if (folio == last)
|
||||||
|
continue;
|
||||||
|
last = folio;
|
||||||
if (dirty)
|
if (dirty)
|
||||||
set_page_dirty(page);
|
folio_mark_dirty(folio);
|
||||||
|
|
||||||
if (backup)
|
if (backup)
|
||||||
mark_page_accessed(page);
|
folio_mark_accessed(folio);
|
||||||
|
|
||||||
if (!pagevec_add(&pvec, page))
|
if (!folio_batch_add(&fbatch, folio))
|
||||||
check_release_pagevec(&pvec);
|
check_release_folio_batch(&fbatch);
|
||||||
}
|
}
|
||||||
if (pagevec_count(&pvec))
|
if (fbatch.nr)
|
||||||
check_release_pagevec(&pvec);
|
check_release_folio_batch(&fbatch);
|
||||||
|
|
||||||
sg_free_table(st);
|
sg_free_table(st);
|
||||||
}
|
}
|
||||||
|
@ -63,8 +68,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
|
||||||
unsigned int page_count; /* restricted by sg_alloc_table */
|
unsigned int page_count; /* restricted by sg_alloc_table */
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
struct page *page;
|
unsigned long next_pfn = 0; /* suppress gcc warning */
|
||||||
unsigned long last_pfn = 0; /* suppress gcc warning */
|
|
||||||
gfp_t noreclaim;
|
gfp_t noreclaim;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -95,6 +99,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
|
||||||
sg = st->sgl;
|
sg = st->sgl;
|
||||||
st->nents = 0;
|
st->nents = 0;
|
||||||
for (i = 0; i < page_count; i++) {
|
for (i = 0; i < page_count; i++) {
|
||||||
|
struct folio *folio;
|
||||||
const unsigned int shrink[] = {
|
const unsigned int shrink[] = {
|
||||||
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
|
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
|
||||||
0,
|
0,
|
||||||
|
@ -103,12 +108,12 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
|
||||||
|
|
||||||
do {
|
do {
|
||||||
cond_resched();
|
cond_resched();
|
||||||
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
|
folio = shmem_read_folio_gfp(mapping, i, gfp);
|
||||||
if (!IS_ERR(page))
|
if (!IS_ERR(folio))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (!*s) {
|
if (!*s) {
|
||||||
ret = PTR_ERR(page);
|
ret = PTR_ERR(folio);
|
||||||
goto err_sg;
|
goto err_sg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,19 +152,21 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
|
||||||
|
|
||||||
if (!i ||
|
if (!i ||
|
||||||
sg->length >= max_segment ||
|
sg->length >= max_segment ||
|
||||||
page_to_pfn(page) != last_pfn + 1) {
|
folio_pfn(folio) != next_pfn) {
|
||||||
if (i)
|
if (i)
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
|
|
||||||
st->nents++;
|
st->nents++;
|
||||||
sg_set_page(sg, page, PAGE_SIZE, 0);
|
sg_set_folio(sg, folio, folio_size(folio), 0);
|
||||||
} else {
|
} else {
|
||||||
sg->length += PAGE_SIZE;
|
/* XXX: could overflow? */
|
||||||
|
sg->length += folio_size(folio);
|
||||||
}
|
}
|
||||||
last_pfn = page_to_pfn(page);
|
next_pfn = folio_pfn(folio) + folio_nr_pages(folio);
|
||||||
|
i += folio_nr_pages(folio) - 1;
|
||||||
|
|
||||||
/* Check that the i965g/gm workaround works. */
|
/* Check that the i965g/gm workaround works. */
|
||||||
GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
|
GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);
|
||||||
}
|
}
|
||||||
if (sg) /* loop terminated early; short sg table */
|
if (sg) /* loop terminated early; short sg table */
|
||||||
sg_mark_end(sg);
|
sg_mark_end(sg);
|
||||||
|
|
Loading…
Reference in New Issue