2012-01-10 06:51:56 +08:00
|
|
|
/*
|
|
|
|
* zsmalloc memory allocator
|
|
|
|
*
|
|
|
|
* Copyright (C) 2011 Nitin Gupta
|
2014-01-31 07:45:55 +08:00
|
|
|
* Copyright (C) 2012, 2013 Minchan Kim
|
2012-01-10 06:51:56 +08:00
|
|
|
*
|
|
|
|
* This code is released using a dual license strategy: BSD/GPL
|
|
|
|
* You can choose the license that better fits your requirements.
|
|
|
|
*
|
|
|
|
* Released under the terms of 3-clause BSD License
|
|
|
|
* Released under the terms of GNU General Public License Version 2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ZS_MALLOC_H_
|
|
|
|
#define _ZS_MALLOC_H_
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
|
2012-07-03 05:15:52 +08:00
|
|
|
/*
|
|
|
|
* zsmalloc mapping modes
|
|
|
|
*
|
2013-12-11 10:04:37 +08:00
|
|
|
* NOTE: These only make a difference when a mapped object spans pages.
|
2013-05-21 03:18:14 +08:00
|
|
|
*/
|
2012-07-03 05:15:52 +08:00
|
|
|
enum zs_mapmode {
|
|
|
|
ZS_MM_RW, /* normal read-write mapping */
|
|
|
|
ZS_MM_RO, /* read-only (no copy-out at unmap time) */
|
|
|
|
ZS_MM_WO /* write-only (no copy-in at map time) */
|
2013-12-11 10:04:37 +08:00
|
|
|
/*
|
|
|
|
* NOTE: ZS_MM_WO should only be used for initializing new
|
|
|
|
* (uninitialized) allocations. Partial writes to already
|
|
|
|
* initialized allocations should use ZS_MM_RW to preserve the
|
|
|
|
* existing data.
|
|
|
|
*/
|
2012-07-03 05:15:52 +08:00
|
|
|
};
|
|
|
|
|
2015-09-09 06:04:35 +08:00
|
|
|
struct zs_pool_stats {
|
2015-09-09 06:04:38 +08:00
|
|
|
/* How many pages were migrated (freed) */
|
2021-02-26 09:18:31 +08:00
|
|
|
atomic_long_t pages_compacted;
|
2015-09-09 06:04:35 +08:00
|
|
|
};
|
|
|
|
|
2012-01-10 06:51:56 +08:00
|
|
|
struct zs_pool;
|
|
|
|
|
2016-05-21 07:59:48 +08:00
|
|
|
struct zs_pool *zs_create_pool(const char *name);
|
2012-01-10 06:51:56 +08:00
|
|
|
void zs_destroy_pool(struct zs_pool *pool);
|
|
|
|
|
2016-05-21 07:59:48 +08:00
|
|
|
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
|
2012-06-08 14:39:25 +08:00
|
|
|
void zs_free(struct zs_pool *pool, unsigned long obj);
|
2012-01-10 06:51:56 +08:00
|
|
|
|
zsmalloc: introduce zs_huge_class_size()
Patch series "zsmalloc/zram: drop zram's max_zpage_size", v3.
ZRAM's max_zpage_size is a bad thing. It forces zsmalloc to store
normal objects as huge ones, which results in bigger zsmalloc memory
usage. Drop it and use actual zsmalloc huge-class value when decide if
the object is huge or not.
This patch (of 2):
Not every object can be share its zspage with other objects, e.g. when
the object is as big as zspage or nearly as big a zspage. For such
objects zsmalloc has a so called huge class - every object which belongs
to huge class consumes the entire zspage (which consists of a physical
page). On x86_64, PAGE_SHIFT 12 box, the first non-huge class size is
3264, so starting down from size 3264, objects can share page(-s) and
thus minimize memory wastage.
ZRAM, however, has its own statically defined watermark for huge
objects, namely "3 * PAGE_SIZE / 4 = 3072", and forcibly stores every
object larger than this watermark (3072) as a PAGE_SIZE object, in other
words, to a huge class, while zsmalloc can keep some of those objects in
non-huge classes. This results in increased memory consumption.
zsmalloc knows better if the object is huge or not. Introduce
zs_huge_class_size() function which tells if the given object can be
stored in one of non-huge classes or not. This will let us to drop
ZRAM's huge object watermark and fully rely on zsmalloc when we decide
if the object is huge.
[sergey.senozhatsky.work@gmail.com: add pool param to zs_huge_class_size()]
Link: http://lkml.kernel.org/r/20180314081833.1096-2-sergey.senozhatsky@gmail.com
Link: http://lkml.kernel.org/r/20180306070639.7389-2-sergey.senozhatsky@gmail.com
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-04-06 07:24:43 +08:00
|
|
|
size_t zs_huge_class_size(struct zs_pool *pool);
|
|
|
|
|
2012-07-03 05:15:52 +08:00
|
|
|
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
|
|
|
enum zs_mapmode mm);
|
2012-06-08 14:39:25 +08:00
|
|
|
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
|
2012-01-10 06:51:56 +08:00
|
|
|
|
2014-10-10 06:29:50 +08:00
|
|
|
unsigned long zs_get_total_pages(struct zs_pool *pool);
|
2015-04-16 07:15:30 +08:00
|
|
|
unsigned long zs_compact(struct zs_pool *pool);
|
2012-01-10 06:51:56 +08:00
|
|
|
|
2015-09-09 06:04:35 +08:00
|
|
|
void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats);
|
2012-01-10 06:51:56 +08:00
|
|
|
#endif
|