2017-12-18 11:00:59 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2008-04-30 10:01:31 +08:00
|
|
|
/*
|
|
|
|
* fs/ext4/mballoc.h
|
|
|
|
*
|
|
|
|
* Written by: Alex Tomas <alex@clusterfs.com>
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#ifndef _EXT4_MBALLOC_H
|
|
|
|
#define _EXT4_MBALLOC_H
|
|
|
|
|
|
|
|
#include <linux/time.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/namei.h>
|
|
|
|
#include <linux/quotaops.h>
|
|
|
|
#include <linux/buffer_head.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/seq_file.h>
|
2008-10-16 22:06:27 +08:00
|
|
|
#include <linux/blkdev.h>
|
2009-01-06 10:36:19 +08:00
|
|
|
#include <linux/mutex.h>
|
2008-04-30 10:01:31 +08:00
|
|
|
#include "ext4_jbd2.h"
|
|
|
|
#include "ext4.h"
|
|
|
|
|
|
|
|
/*
|
2020-05-10 14:24:54 +08:00
|
|
|
* mb_debug() dynamic printk msgs could be used to debug mballoc code.
|
2008-04-30 10:01:31 +08:00
|
|
|
*/
|
2009-09-19 01:38:55 +08:00
|
|
|
#ifdef CONFIG_EXT4_DEBUG
|
2020-05-10 14:24:54 +08:00
|
|
|
#define mb_debug(sb, fmt, ...) \
|
|
|
|
pr_debug("[%s/%d] EXT4-fs (%s): (%s, %d): %s: " fmt, \
|
|
|
|
current->comm, task_pid_nr(current), sb->s_id, \
|
|
|
|
__FILE__, __LINE__, __func__, ##__VA_ARGS__)
|
2008-04-30 10:01:31 +08:00
|
|
|
#else
|
2020-05-10 14:24:54 +08:00
|
|
|
#define mb_debug(sb, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
2008-04-30 10:01:31 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
|
|
|
|
#define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* How long mballoc can look for a best extent (in found extents)
|
|
|
|
*/
|
|
|
|
#define MB_DEFAULT_MAX_TO_SCAN 200
|
|
|
|
|
|
|
|
/*
|
|
|
|
* How long mballoc must look for a best extent
|
|
|
|
*/
|
|
|
|
#define MB_DEFAULT_MIN_TO_SCAN 10
|
|
|
|
|
|
|
|
/*
|
2023-05-30 20:33:41 +08:00
|
|
|
* with 's_mb_stats' allocator will collect stats that will be
|
2008-04-30 10:01:31 +08:00
|
|
|
* shown at umount. The collecting costs though!
|
|
|
|
*/
|
2009-09-30 03:51:30 +08:00
|
|
|
#define MB_DEFAULT_STATS 0
|
2008-04-30 10:01:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
|
|
|
|
* by the stream allocator, which purpose is to pack requests
|
|
|
|
* as close each to other as possible to produce smooth I/O traffic
|
|
|
|
* We use locality group prealloc space for stream request.
|
2021-03-27 18:30:05 +08:00
|
|
|
* We can tune the same via /proc/fs/ext4/<partition>/stream_req
|
2008-04-30 10:01:31 +08:00
|
|
|
*/
|
|
|
|
#define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* for which requests use 2^N search using buddies
|
|
|
|
*/
|
|
|
|
#define MB_DEFAULT_ORDER2_REQS 2
|
|
|
|
|
|
|
|
/*
|
|
|
|
* default group prealloc size 512 blocks
|
|
|
|
*/
|
|
|
|
#define MB_DEFAULT_GROUP_PREALLOC 512
|
|
|
|
|
ext4: improve cr 0 / cr 1 group scanning
Instead of traversing through groups linearly, scan groups in specific
orders at cr 0 and cr 1. At cr 0, we want to find groups that have the
largest free order >= the order of the request. So, with this patch,
we maintain lists for each possible order and insert each group into a
list based on the largest free order in its buddy bitmap. During cr 0
allocation, we traverse these lists in the increasing order of largest
free orders. This allows us to find a group with the best available cr
0 match in constant time. If nothing can be found, we fallback to cr 1
immediately.
At CR1, the story is slightly different. We want to traverse in the
order of increasing average fragment size. For CR1, we maintain a rb
tree of groupinfos which is sorted by average fragment size. Instead
of traversing linearly, at CR1, we traverse in the order of increasing
average fragment size, starting at the most optimal group. This brings
down cr 1 search complexity to log(num groups).
For cr >= 2, we just perform the linear search as before. Also, in
case of lock contention, we intermittently fallback to linear search
even in CR 0 and CR 1 cases. This allows us to proceed during the
allocation path even in case of high contention.
There is an opportunity to do optimization at CR2 too. That's because
at CR2 we only consider groups where bb_free counter (number of free
blocks) is greater than the request extent size. That's left as future
work.
All the changes introduced in this patch are protected under a new
mount option "mb_optimize_scan".
With this patchset, following experiment was performed:
Created a highly fragmented disk of size 65TB. The disk had no
contiguous 2M regions. Following command was run consecutively for 3
times:
time dd if=/dev/urandom of=file bs=2M count=10
Here are the results with and without cr 0/1 optimizations introduced
in this patch:
|---------+------------------------------+---------------------------|
| | Without CR 0/1 Optimizations | With CR 0/1 Optimizations |
|---------+------------------------------+---------------------------|
| 1st run | 5m1.871s | 2m47.642s |
| 2nd run | 2m28.390s | 0m0.611s |
| 3rd run | 2m26.530s | 0m1.255s |
|---------+------------------------------+---------------------------|
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Link: https://lore.kernel.org/r/20210401172129.189766-6-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2021-04-02 01:21:27 +08:00
|
|
|
/*
|
|
|
|
* Number of groups to search linearly before performing group scanning
|
|
|
|
* optimization.
|
|
|
|
*/
|
|
|
|
#define MB_DEFAULT_LINEAR_LIMIT 4
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Minimum number of groups that should be present in the file system to perform
|
|
|
|
* group scanning optimizations.
|
|
|
|
*/
|
|
|
|
#define MB_DEFAULT_LINEAR_SCAN_THRESHOLD 16
|
|
|
|
|
2023-05-30 20:33:49 +08:00
|
|
|
/*
|
2023-05-30 20:33:50 +08:00
|
|
|
* The maximum order upto which CR_BEST_AVAIL_LEN can trim a particular
|
|
|
|
* allocation request. Example, if we have an order 7 request and max trim order
|
|
|
|
* of 3, we can trim this request upto order 4.
|
2023-05-30 20:33:49 +08:00
|
|
|
*/
|
2023-05-30 20:33:50 +08:00
|
|
|
#define MB_DEFAULT_BEST_AVAIL_TRIM_ORDER 3
|
2023-05-30 20:33:49 +08:00
|
|
|
|
2021-04-02 01:21:26 +08:00
|
|
|
/*
|
|
|
|
* Number of valid buddy orders
|
|
|
|
*/
|
|
|
|
#define MB_NUM_ORDERS(sb) ((sb)->s_blocksize_bits + 2)
|
|
|
|
|
2008-10-16 22:14:27 +08:00
|
|
|
struct ext4_free_data {
|
2017-06-23 11:54:33 +08:00
|
|
|
/* this links the free block information from sb_info */
|
|
|
|
struct list_head efd_list;
|
2008-04-30 10:01:31 +08:00
|
|
|
|
2012-02-21 06:53:02 +08:00
|
|
|
/* this links the free block information from group_info */
|
|
|
|
struct rb_node efd_node;
|
2008-10-16 22:14:27 +08:00
|
|
|
|
|
|
|
/* group which free block extent belongs */
|
2012-02-21 06:53:02 +08:00
|
|
|
ext4_group_t efd_group;
|
2008-10-16 22:14:27 +08:00
|
|
|
|
|
|
|
/* free block extent */
|
2012-02-21 06:53:02 +08:00
|
|
|
ext4_grpblk_t efd_start_cluster;
|
|
|
|
ext4_grpblk_t efd_count;
|
2008-10-16 22:14:27 +08:00
|
|
|
|
|
|
|
/* transaction which freed this extent */
|
2012-02-21 06:53:02 +08:00
|
|
|
tid_t efd_tid;
|
2008-04-30 10:01:31 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct ext4_prealloc_space {
|
2023-03-25 16:13:40 +08:00
|
|
|
union {
|
ext4: Use rbtrees to manage PAs instead of inode i_prealloc_list
Currently, the kernel uses i_prealloc_list to hold all the inode
preallocations. This is known to cause degradation in performance in
workloads which perform large number of sparse writes on a single file.
This is mainly because functions like ext4_mb_normalize_request() and
ext4_mb_use_preallocated() iterate over this complete list, resulting in
slowdowns when large number of PAs are present.
Patch 27bc446e2 partially fixed this by enforcing a limit of 512 for
the inode preallocation list and adding logic to continually trim the
list if it grows above the threshold, however our testing revealed that
a hardcoded value is not suitable for all kinds of workloads.
To optimize this, add an rbtree to the inode and hold the inode
preallocations in this rbtree. This will make iterating over inode PAs
faster and scale much better than a linked list. Additionally, we also
had to remove the LRU logic that was added during trimming of the list
(in ext4_mb_release_context()) as it will add extra overhead in rbtree.
The discards now happen in the lowest-logical-offset-first order.
** Locking notes **
With the introduction of rbtree to maintain inode PAs, we can't use RCU
to walk the tree for searching since it can result in partial traversals
which might miss some nodes(or entire subtrees) while discards happen
in parallel (which happens under a lock). Hence this patch converts the
ei->i_prealloc_lock spin_lock to rw_lock.
Almost all the codepaths that read/modify the PA rbtrees are protected
by the higher level inode->i_data_sem (except
ext4_mb_discard_group_preallocations() and ext4_clear_inode()) IIUC, the
only place we need lock protection is when one thread is reading
"searching" the PA rbtree (earlier protected under rcu_read_lock()) and
another is "deleting" the PAs in ext4_mb_discard_group_preallocations()
function (which iterates all the PAs using the grp->bb_prealloc_list and
deletes PAs from the tree without taking any inode lock (i_data_sem)).
So, this patch converts all rcu_read_lock/unlock() paths for inode list
PA to use read_lock() and all places where we were using
ei->i_prealloc_lock spinlock will now be using write_lock().
Note that this makes the fast path (searching of the right PA e.g.
ext4_mb_use_preallocated() or ext4_mb_normalize_request()), now use
read_lock() instead of rcu_read_lock/unlock(). Ths also will now block
due to slow discard path (ext4_mb_discard_group_preallocations()) which
uses write_lock().
But this is not as bad as it looks. This is because -
1. The slow path only occurs when the normal allocation failed and we
can say that we are low on disk space. One can argue this scenario
won't be much frequent.
2. ext4_mb_discard_group_preallocations(), locks and unlocks the rwlock
for deleting every individual PA. This gives enough opportunity for
the fast path to acquire the read_lock for searching the PA inode
list.
Suggested-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/4137bce8f6948fedd8bae134dabae24acfe699c6.1679731817.git.ojaswin@linux.ibm.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2023-03-25 16:13:41 +08:00
|
|
|
struct rb_node inode_node; /* for inode PA rbtree */
|
2023-03-25 16:13:40 +08:00
|
|
|
struct list_head lg_list; /* for lg PAs */
|
|
|
|
} pa_node;
|
2008-04-30 10:01:31 +08:00
|
|
|
struct list_head pa_group_list;
|
|
|
|
union {
|
|
|
|
struct list_head pa_tmp_list;
|
|
|
|
struct rcu_head pa_rcu;
|
|
|
|
} u;
|
|
|
|
spinlock_t pa_lock;
|
|
|
|
atomic_t pa_count;
|
|
|
|
unsigned pa_deleted;
|
|
|
|
ext4_fsblk_t pa_pstart; /* phys. block */
|
|
|
|
ext4_lblk_t pa_lstart; /* log. block */
|
2009-08-26 10:36:45 +08:00
|
|
|
ext4_grpblk_t pa_len; /* len of preallocated chunk */
|
|
|
|
ext4_grpblk_t pa_free; /* how many blocks are free */
|
2009-03-28 05:16:58 +08:00
|
|
|
unsigned short pa_type; /* pa type. inode or group */
|
2023-03-25 16:13:40 +08:00
|
|
|
union {
|
ext4: Use rbtrees to manage PAs instead of inode i_prealloc_list
Currently, the kernel uses i_prealloc_list to hold all the inode
preallocations. This is known to cause degradation in performance in
workloads which perform large number of sparse writes on a single file.
This is mainly because functions like ext4_mb_normalize_request() and
ext4_mb_use_preallocated() iterate over this complete list, resulting in
slowdowns when large number of PAs are present.
Patch 27bc446e2 partially fixed this by enforcing a limit of 512 for
the inode preallocation list and adding logic to continually trim the
list if it grows above the threshold, however our testing revealed that
a hardcoded value is not suitable for all kinds of workloads.
To optimize this, add an rbtree to the inode and hold the inode
preallocations in this rbtree. This will make iterating over inode PAs
faster and scale much better than a linked list. Additionally, we also
had to remove the LRU logic that was added during trimming of the list
(in ext4_mb_release_context()) as it will add extra overhead in rbtree.
The discards now happen in the lowest-logical-offset-first order.
** Locking notes **
With the introduction of rbtree to maintain inode PAs, we can't use RCU
to walk the tree for searching since it can result in partial traversals
which might miss some nodes(or entire subtrees) while discards happen
in parallel (which happens under a lock). Hence this patch converts the
ei->i_prealloc_lock spin_lock to rw_lock.
Almost all the codepaths that read/modify the PA rbtrees are protected
by the higher level inode->i_data_sem (except
ext4_mb_discard_group_preallocations() and ext4_clear_inode()) IIUC, the
only place we need lock protection is when one thread is reading
"searching" the PA rbtree (earlier protected under rcu_read_lock()) and
another is "deleting" the PAs in ext4_mb_discard_group_preallocations()
function (which iterates all the PAs using the grp->bb_prealloc_list and
deletes PAs from the tree without taking any inode lock (i_data_sem)).
So, this patch converts all rcu_read_lock/unlock() paths for inode list
PA to use read_lock() and all places where we were using
ei->i_prealloc_lock spinlock will now be using write_lock().
Note that this makes the fast path (searching of the right PA e.g.
ext4_mb_use_preallocated() or ext4_mb_normalize_request()), now use
read_lock() instead of rcu_read_lock/unlock(). Ths also will now block
due to slow discard path (ext4_mb_discard_group_preallocations()) which
uses write_lock().
But this is not as bad as it looks. This is because -
1. The slow path only occurs when the normal allocation failed and we
can say that we are low on disk space. One can argue this scenario
won't be much frequent.
2. ext4_mb_discard_group_preallocations(), locks and unlocks the rwlock
for deleting every individual PA. This gives enough opportunity for
the fast path to acquire the read_lock for searching the PA inode
list.
Suggested-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/4137bce8f6948fedd8bae134dabae24acfe699c6.1679731817.git.ojaswin@linux.ibm.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2023-03-25 16:13:41 +08:00
|
|
|
rwlock_t *inode_lock; /* locks the rbtree holding this PA */
|
2023-03-25 16:13:40 +08:00
|
|
|
spinlock_t *lg_lock; /* locks the lg list holding this PA */
|
|
|
|
} pa_node_lock;
|
ext4: Use rbtrees to manage PAs instead of inode i_prealloc_list
Currently, the kernel uses i_prealloc_list to hold all the inode
preallocations. This is known to cause degradation in performance in
workloads which perform large number of sparse writes on a single file.
This is mainly because functions like ext4_mb_normalize_request() and
ext4_mb_use_preallocated() iterate over this complete list, resulting in
slowdowns when large number of PAs are present.
Patch 27bc446e2 partially fixed this by enforcing a limit of 512 for
the inode preallocation list and adding logic to continually trim the
list if it grows above the threshold, however our testing revealed that
a hardcoded value is not suitable for all kinds of workloads.
To optimize this, add an rbtree to the inode and hold the inode
preallocations in this rbtree. This will make iterating over inode PAs
faster and scale much better than a linked list. Additionally, we also
had to remove the LRU logic that was added during trimming of the list
(in ext4_mb_release_context()) as it will add extra overhead in rbtree.
The discards now happen in the lowest-logical-offset-first order.
** Locking notes **
With the introduction of rbtree to maintain inode PAs, we can't use RCU
to walk the tree for searching since it can result in partial traversals
which might miss some nodes(or entire subtrees) while discards happen
in parallel (which happens under a lock). Hence this patch converts the
ei->i_prealloc_lock spin_lock to rw_lock.
Almost all the codepaths that read/modify the PA rbtrees are protected
by the higher level inode->i_data_sem (except
ext4_mb_discard_group_preallocations() and ext4_clear_inode()) IIUC, the
only place we need lock protection is when one thread is reading
"searching" the PA rbtree (earlier protected under rcu_read_lock()) and
another is "deleting" the PAs in ext4_mb_discard_group_preallocations()
function (which iterates all the PAs using the grp->bb_prealloc_list and
deletes PAs from the tree without taking any inode lock (i_data_sem)).
So, this patch converts all rcu_read_lock/unlock() paths for inode list
PA to use read_lock() and all places where we were using
ei->i_prealloc_lock spinlock will now be using write_lock().
Note that this makes the fast path (searching of the right PA e.g.
ext4_mb_use_preallocated() or ext4_mb_normalize_request()), now use
read_lock() instead of rcu_read_lock/unlock(). Ths also will now block
due to slow discard path (ext4_mb_discard_group_preallocations()) which
uses write_lock().
But this is not as bad as it looks. This is because -
1. The slow path only occurs when the normal allocation failed and we
can say that we are low on disk space. One can argue this scenario
won't be much frequent.
2. ext4_mb_discard_group_preallocations(), locks and unlocks the rwlock
for deleting every individual PA. This gives enough opportunity for
the fast path to acquire the read_lock for searching the PA inode
list.
Suggested-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Signed-off-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/4137bce8f6948fedd8bae134dabae24acfe699c6.1679731817.git.ojaswin@linux.ibm.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2023-03-25 16:13:41 +08:00
|
|
|
struct inode *pa_inode; /* used to get the inode during group discard */
|
2008-04-30 10:01:31 +08:00
|
|
|
};
|
|
|
|
|
2009-03-28 05:16:58 +08:00
|
|
|
enum {
|
|
|
|
MB_INODE_PA = 0,
|
|
|
|
MB_GROUP_PA = 1
|
|
|
|
};
|
2008-04-30 10:01:31 +08:00
|
|
|
|
|
|
|
struct ext4_free_extent {
|
|
|
|
ext4_lblk_t fe_logical;
|
2011-09-10 06:48:51 +08:00
|
|
|
ext4_grpblk_t fe_start; /* In cluster units */
|
2008-04-30 10:01:31 +08:00
|
|
|
ext4_group_t fe_group;
|
2011-09-10 06:48:51 +08:00
|
|
|
ext4_grpblk_t fe_len; /* In cluster units */
|
2008-04-30 10:01:31 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Locality group:
|
|
|
|
* we try to group all related changes together
|
|
|
|
* so that writeback can flush/allocate them together as well
|
2008-07-24 02:14:05 +08:00
|
|
|
* Size of lg_prealloc_list hash is determined by MB_DEFAULT_GROUP_PREALLOC
|
|
|
|
* (512). We store prealloc space into the hash based on the pa_free blocks
|
|
|
|
* order value.ie, fls(pa_free)-1;
|
2008-04-30 10:01:31 +08:00
|
|
|
*/
|
2008-07-24 02:14:05 +08:00
|
|
|
#define PREALLOC_TB_SIZE 10
|
2008-04-30 10:01:31 +08:00
|
|
|
struct ext4_locality_group {
|
|
|
|
/* for allocator */
|
2008-07-24 02:14:05 +08:00
|
|
|
/* to serialize allocates */
|
|
|
|
struct mutex lg_mutex;
|
|
|
|
/* list of preallocations */
|
|
|
|
struct list_head lg_prealloc_list[PREALLOC_TB_SIZE];
|
2008-04-30 10:01:31 +08:00
|
|
|
spinlock_t lg_prealloc_lock;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ext4_allocation_context {
|
|
|
|
struct inode *ac_inode;
|
|
|
|
struct super_block *ac_sb;
|
|
|
|
|
|
|
|
/* original request */
|
|
|
|
struct ext4_free_extent ac_o_ex;
|
|
|
|
|
2011-02-25 03:10:00 +08:00
|
|
|
/* goal request (normalized ac_o_ex) */
|
2008-04-30 10:01:31 +08:00
|
|
|
struct ext4_free_extent ac_g_ex;
|
|
|
|
|
|
|
|
/* the best found extent */
|
|
|
|
struct ext4_free_extent ac_b_ex;
|
|
|
|
|
2011-11-01 06:55:50 +08:00
|
|
|
/* copy of the best found extent taken before preallocation efforts */
|
2008-04-30 10:01:31 +08:00
|
|
|
struct ext4_free_extent ac_f_ex;
|
|
|
|
|
2023-05-30 20:33:49 +08:00
|
|
|
/*
|
|
|
|
* goal len can change in CR1.5, so save the original len. This is
|
|
|
|
* used while adjusting the PA window and for accounting.
|
|
|
|
*/
|
|
|
|
ext4_grpblk_t ac_orig_goal_len;
|
|
|
|
|
ext4: improve cr 0 / cr 1 group scanning
Instead of traversing through groups linearly, scan groups in specific
orders at cr 0 and cr 1. At cr 0, we want to find groups that have the
largest free order >= the order of the request. So, with this patch,
we maintain lists for each possible order and insert each group into a
list based on the largest free order in its buddy bitmap. During cr 0
allocation, we traverse these lists in the increasing order of largest
free orders. This allows us to find a group with the best available cr
0 match in constant time. If nothing can be found, we fallback to cr 1
immediately.
At CR1, the story is slightly different. We want to traverse in the
order of increasing average fragment size. For CR1, we maintain a rb
tree of groupinfos which is sorted by average fragment size. Instead
of traversing linearly, at CR1, we traverse in the order of increasing
average fragment size, starting at the most optimal group. This brings
down cr 1 search complexity to log(num groups).
For cr >= 2, we just perform the linear search as before. Also, in
case of lock contention, we intermittently fallback to linear search
even in CR 0 and CR 1 cases. This allows us to proceed during the
allocation path even in case of high contention.
There is an opportunity to do optimization at CR2 too. That's because
at CR2 we only consider groups where bb_free counter (number of free
blocks) is greater than the request extent size. That's left as future
work.
All the changes introduced in this patch are protected under a new
mount option "mb_optimize_scan".
With this patchset, following experiment was performed:
Created a highly fragmented disk of size 65TB. The disk had no
contiguous 2M regions. Following command was run consecutively for 3
times:
time dd if=/dev/urandom of=file bs=2M count=10
Here are the results with and without cr 0/1 optimizations introduced
in this patch:
|---------+------------------------------+---------------------------|
| | Without CR 0/1 Optimizations | With CR 0/1 Optimizations |
|---------+------------------------------+---------------------------|
| 1st run | 5m1.871s | 2m47.642s |
| 2nd run | 2m28.390s | 0m0.611s |
| 3rd run | 2m26.530s | 0m1.255s |
|---------+------------------------------+---------------------------|
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Link: https://lore.kernel.org/r/20210401172129.189766-6-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2021-04-02 01:21:27 +08:00
|
|
|
__u32 ac_groups_considered;
|
|
|
|
__u32 ac_flags; /* allocation hints */
|
2008-04-30 10:01:31 +08:00
|
|
|
__u16 ac_groups_scanned;
|
ext4: improve cr 0 / cr 1 group scanning
Instead of traversing through groups linearly, scan groups in specific
orders at cr 0 and cr 1. At cr 0, we want to find groups that have the
largest free order >= the order of the request. So, with this patch,
we maintain lists for each possible order and insert each group into a
list based on the largest free order in its buddy bitmap. During cr 0
allocation, we traverse these lists in the increasing order of largest
free orders. This allows us to find a group with the best available cr
0 match in constant time. If nothing can be found, we fallback to cr 1
immediately.
At CR1, the story is slightly different. We want to traverse in the
order of increasing average fragment size. For CR1, we maintain a rb
tree of groupinfos which is sorted by average fragment size. Instead
of traversing linearly, at CR1, we traverse in the order of increasing
average fragment size, starting at the most optimal group. This brings
down cr 1 search complexity to log(num groups).
For cr >= 2, we just perform the linear search as before. Also, in
case of lock contention, we intermittently fallback to linear search
even in CR 0 and CR 1 cases. This allows us to proceed during the
allocation path even in case of high contention.
There is an opportunity to do optimization at CR2 too. That's because
at CR2 we only consider groups where bb_free counter (number of free
blocks) is greater than the request extent size. That's left as future
work.
All the changes introduced in this patch are protected under a new
mount option "mb_optimize_scan".
With this patchset, following experiment was performed:
Created a highly fragmented disk of size 65TB. The disk had no
contiguous 2M regions. Following command was run consecutively for 3
times:
time dd if=/dev/urandom of=file bs=2M count=10
Here are the results with and without cr 0/1 optimizations introduced
in this patch:
|---------+------------------------------+---------------------------|
| | Without CR 0/1 Optimizations | With CR 0/1 Optimizations |
|---------+------------------------------+---------------------------|
| 1st run | 5m1.871s | 2m47.642s |
| 2nd run | 2m28.390s | 0m0.611s |
| 3rd run | 2m26.530s | 0m1.255s |
|---------+------------------------------+---------------------------|
Signed-off-by: Harshad Shirwadkar <harshadshirwadkar@gmail.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Reviewed-by: Andreas Dilger <adilger@dilger.ca>
Link: https://lore.kernel.org/r/20210401172129.189766-6-harshadshirwadkar@gmail.com
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
2021-04-02 01:21:27 +08:00
|
|
|
__u16 ac_groups_linear_remaining;
|
2008-04-30 10:01:31 +08:00
|
|
|
__u16 ac_found;
|
2023-05-30 20:33:43 +08:00
|
|
|
__u16 ac_cX_found[EXT4_MB_NUM_CRS];
|
2008-04-30 10:01:31 +08:00
|
|
|
__u16 ac_tail;
|
|
|
|
__u16 ac_buddy;
|
|
|
|
__u8 ac_status;
|
|
|
|
__u8 ac_criteria;
|
|
|
|
__u8 ac_2order; /* if request is to allocate 2^N blocks and
|
|
|
|
* N > 0, the field stores N, otherwise 0 */
|
|
|
|
__u8 ac_op; /* operation, for history only */
|
|
|
|
struct page *ac_bitmap_page;
|
|
|
|
struct page *ac_buddy_page;
|
|
|
|
struct ext4_prealloc_space *ac_pa;
|
|
|
|
struct ext4_locality_group *ac_lg;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define AC_STATUS_CONTINUE 1
|
|
|
|
#define AC_STATUS_FOUND 2
|
|
|
|
#define AC_STATUS_BREAK 3
|
|
|
|
|
|
|
|
struct ext4_buddy {
|
|
|
|
struct page *bd_buddy_page;
|
|
|
|
void *bd_buddy;
|
|
|
|
struct page *bd_bitmap_page;
|
|
|
|
void *bd_bitmap;
|
|
|
|
struct ext4_group_info *bd_info;
|
|
|
|
struct super_block *bd_sb;
|
|
|
|
__u16 bd_blkbits;
|
|
|
|
ext4_group_t bd_group;
|
|
|
|
};
|
|
|
|
|
2008-11-26 04:11:52 +08:00
|
|
|
static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
|
2008-04-30 10:01:31 +08:00
|
|
|
struct ext4_free_extent *fex)
|
|
|
|
{
|
2011-09-10 06:46:51 +08:00
|
|
|
return ext4_group_first_block_no(sb, fex->fe_group) +
|
|
|
|
(fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
|
2008-04-30 10:01:31 +08:00
|
|
|
}
|
2017-04-30 12:36:53 +08:00
|
|
|
|
|
|
|
typedef int (*ext4_mballoc_query_range_fn)(
|
|
|
|
struct super_block *sb,
|
|
|
|
ext4_group_t agno,
|
|
|
|
ext4_grpblk_t start,
|
|
|
|
ext4_grpblk_t len,
|
|
|
|
void *priv);
|
|
|
|
|
|
|
|
int
|
|
|
|
ext4_mballoc_query_range(
|
|
|
|
struct super_block *sb,
|
|
|
|
ext4_group_t agno,
|
|
|
|
ext4_grpblk_t start,
|
|
|
|
ext4_grpblk_t end,
|
|
|
|
ext4_mballoc_query_range_fn formatter,
|
|
|
|
void *priv);
|
|
|
|
|
2008-04-30 10:01:31 +08:00
|
|
|
#endif
|