2018-04-04 01:23:33 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2007-10-16 04:18:56 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
2012-07-10 10:22:35 +08:00
|
|
|
#include <asm/unaligned.h>
|
2022-10-19 22:50:49 +08:00
|
|
|
#include "messages.h"
|
2012-07-10 10:22:35 +08:00
|
|
|
#include "ctree.h"
|
2022-10-19 22:50:59 +08:00
|
|
|
#include "accessors.h"
|
2012-07-10 10:22:35 +08:00
|
|
|
|
2020-05-01 05:38:11 +08:00
|
|
|
static bool check_setget_bounds(const struct extent_buffer *eb,
|
|
|
|
const void *ptr, unsigned off, int size)
|
|
|
|
{
|
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off;
|
|
|
|
|
btrfs: remove redundant check in up check_setget_bounds
There are two separate checks in the bounds checker, the first one being
a special case of the second. As this function is performance critical
due to checking access to any eb member, reducing the size can slightly
improve performance.
On a release build on x86_64 the helper is completely inlined so the
function call overhead is also gone.
There was a report of 5% performance drop on metadata heavy workload,
that disappeared after disabling asserts. The most significant part of
that is the bounds checker.
https://lore.kernel.org/linux-btrfs/20200724164147.39925-1-josef@toxicpanda.com/
After the analysis, the optimized code removes the worst overhead which
is the function call and the performance was restored.
https://lore.kernel.org/linux-btrfs/20200730110943.GE3703@twin.jikos.cz/
1. baseline, asserts on, setget check on
run time: 46s
run time with perf: 48s
2. asserts on, comment out setget check
run time: 44s
run time with perf: 47s
So this is confirms the 5% difference
3. asserts on, optimized seget check
run time: 44s
run time with perf: 47s
The optimizations are reducing the number of ifs to 1 and inlining the
hot path. Low-level stuff, gets the performance back. Patch below.
4. asserts off, no setget check
run time: 44s
run time with perf: 45s
This verifies that asserts other than the setget check have negligible
impact on performance and it's not harmful to keep them on.
Analysis where the performance is lost:
* check_setget_bounds is short function, but it's still a function call,
changing the flow of instructions and given how many times it's
called the overhead adds up
* there are two conditions, one to check if the range is
completely outside (member_offset > eb->len) or partially inside
(member_offset + size > eb->len)
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-07-28 02:59:20 +08:00
|
|
|
if (unlikely(member_offset + size > eb->len)) {
|
2020-05-01 05:38:11 +08:00
|
|
|
btrfs_warn(eb->fs_info,
|
btrfs: remove redundant check in up check_setget_bounds
There are two separate checks in the bounds checker, the first one being
a special case of the second. As this function is performance critical
due to checking access to any eb member, reducing the size can slightly
improve performance.
On a release build on x86_64 the helper is completely inlined so the
function call overhead is also gone.
There was a report of 5% performance drop on metadata heavy workload,
that disappeared after disabling asserts. The most significant part of
that is the bounds checker.
https://lore.kernel.org/linux-btrfs/20200724164147.39925-1-josef@toxicpanda.com/
After the analysis, the optimized code removes the worst overhead which
is the function call and the performance was restored.
https://lore.kernel.org/linux-btrfs/20200730110943.GE3703@twin.jikos.cz/
1. baseline, asserts on, setget check on
run time: 46s
run time with perf: 48s
2. asserts on, comment out setget check
run time: 44s
run time with perf: 47s
So this is confirms the 5% difference
3. asserts on, optimized seget check
run time: 44s
run time with perf: 47s
The optimizations are reducing the number of ifs to 1 and inlining the
hot path. Low-level stuff, gets the performance back. Patch below.
4. asserts off, no setget check
run time: 44s
run time with perf: 45s
This verifies that asserts other than the setget check have negligible
impact on performance and it's not harmful to keep them on.
Analysis where the performance is lost:
* check_setget_bounds is short function, but it's still a function call,
changing the flow of instructions and given how many times it's
called the overhead adds up
* there are two conditions, one to check if the range is
completely outside (member_offset > eb->len) or partially inside
(member_offset + size > eb->len)
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-07-28 02:59:20 +08:00
|
|
|
"bad eb member %s: ptr 0x%lx start %llu member offset %lu size %d",
|
|
|
|
(member_offset > eb->len ? "start" : "end"),
|
2020-05-01 05:38:11 +08:00
|
|
|
(unsigned long)ptr, eb->start, member_offset, size);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-10-19 22:50:59 +08:00
|
|
|
void btrfs_init_map_token(struct btrfs_map_token *token, struct extent_buffer *eb)
|
|
|
|
{
|
|
|
|
token->eb = eb;
|
|
|
|
token->kaddr = page_address(eb->pages[0]);
|
|
|
|
token->offset = 0;
|
|
|
|
}
|
|
|
|
|
2012-07-10 10:22:35 +08:00
|
|
|
/*
|
2020-05-07 02:54:13 +08:00
|
|
|
* Macro templates that define helpers to read/write extent buffer data of a
|
|
|
|
* given size, that are also used via ctree.h for access to item members by
|
|
|
|
* specialized helpers.
|
2008-09-30 03:18:18 +08:00
|
|
|
*
|
2020-05-07 02:54:13 +08:00
|
|
|
* Generic helpers:
|
|
|
|
* - btrfs_set_8 (for 8/16/32/64)
|
|
|
|
* - btrfs_get_8 (for 8/16/32/64)
|
2008-09-30 03:18:18 +08:00
|
|
|
*
|
2020-05-07 02:54:13 +08:00
|
|
|
* Generic helpers with a token (cached address of the most recently accessed
|
|
|
|
* page):
|
|
|
|
* - btrfs_set_token_8 (for 8/16/32/64)
|
|
|
|
* - btrfs_get_token_8 (for 8/16/32/64)
|
2008-09-30 03:18:18 +08:00
|
|
|
*
|
2020-05-07 02:54:13 +08:00
|
|
|
* The set/get functions handle data spanning two pages transparently, in case
|
|
|
|
* metadata block size is larger than page. Every pointer to metadata items is
|
|
|
|
* an offset into the extent buffer page array, cast to a specific type. This
|
|
|
|
* gives us all the type checking.
|
2019-08-09 23:12:38 +08:00
|
|
|
*
|
2020-05-07 02:54:13 +08:00
|
|
|
* The extent buffer pages stored in the array pages do not form a contiguous
|
|
|
|
* phyusical range, but the API functions assume the linear offset to the range
|
|
|
|
* from 0 to metadata node size.
|
2008-09-30 03:18:18 +08:00
|
|
|
*/
|
|
|
|
|
2012-07-10 10:22:35 +08:00
|
|
|
#define DEFINE_BTRFS_SETGET_BITS(bits) \
|
2020-04-29 08:15:56 +08:00
|
|
|
u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
|
|
|
|
const void *ptr, unsigned long off) \
|
2007-10-16 04:18:56 +08:00
|
|
|
{ \
|
2020-04-29 23:45:33 +08:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
2020-12-02 14:48:04 +08:00
|
|
|
const unsigned long idx = get_eb_page_index(member_offset); \
|
|
|
|
const unsigned long oip = get_eb_offset_in_page(token->eb, \
|
|
|
|
member_offset); \
|
2020-04-29 23:45:33 +08:00
|
|
|
const int size = sizeof(u##bits); \
|
2020-04-30 23:57:55 +08:00
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
|
|
|
const int part = PAGE_SIZE - oip; \
|
2012-07-10 10:22:35 +08:00
|
|
|
\
|
2019-08-09 23:30:23 +08:00
|
|
|
ASSERT(token); \
|
2020-04-30 01:29:04 +08:00
|
|
|
ASSERT(token->kaddr); \
|
2020-05-01 05:38:11 +08:00
|
|
|
ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
|
2020-04-29 23:45:33 +08:00
|
|
|
if (token->offset <= member_offset && \
|
|
|
|
member_offset + size <= token->offset + PAGE_SIZE) { \
|
|
|
|
return get_unaligned_le##bits(token->kaddr + oip); \
|
2012-07-10 10:22:35 +08:00
|
|
|
} \
|
2020-04-30 23:57:55 +08:00
|
|
|
token->kaddr = page_address(token->eb->pages[idx]); \
|
|
|
|
token->offset = idx << PAGE_SHIFT; \
|
2021-06-23 21:48:53 +08:00
|
|
|
if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE ) \
|
2020-04-29 23:45:33 +08:00
|
|
|
return get_unaligned_le##bits(token->kaddr + oip); \
|
2020-04-30 23:57:55 +08:00
|
|
|
\
|
|
|
|
memcpy(lebytes, token->kaddr + oip, part); \
|
2020-04-29 23:45:33 +08:00
|
|
|
token->kaddr = page_address(token->eb->pages[idx + 1]); \
|
|
|
|
token->offset = (idx + 1) << PAGE_SHIFT; \
|
2020-04-30 23:57:55 +08:00
|
|
|
memcpy(lebytes + part, token->kaddr, size - part); \
|
|
|
|
return get_unaligned_le##bits(lebytes); \
|
2007-10-16 04:18:56 +08:00
|
|
|
} \
|
2019-08-09 23:12:38 +08:00
|
|
|
u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
|
|
|
|
const void *ptr, unsigned long off) \
|
|
|
|
{ \
|
2020-04-29 22:04:44 +08:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
2020-12-02 14:48:04 +08:00
|
|
|
const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \
|
|
|
|
const unsigned long idx = get_eb_page_index(member_offset); \
|
2020-04-30 23:57:55 +08:00
|
|
|
char *kaddr = page_address(eb->pages[idx]); \
|
2020-04-29 22:04:44 +08:00
|
|
|
const int size = sizeof(u##bits); \
|
2020-04-30 23:57:55 +08:00
|
|
|
const int part = PAGE_SIZE - oip; \
|
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
2019-08-09 23:12:38 +08:00
|
|
|
\
|
2020-05-01 05:38:11 +08:00
|
|
|
ASSERT(check_setget_bounds(eb, ptr, off, size)); \
|
2021-06-23 21:48:53 +08:00
|
|
|
if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) \
|
2020-04-29 22:04:44 +08:00
|
|
|
return get_unaligned_le##bits(kaddr + oip); \
|
2020-04-30 23:57:55 +08:00
|
|
|
\
|
|
|
|
memcpy(lebytes, kaddr + oip, part); \
|
|
|
|
kaddr = page_address(eb->pages[idx + 1]); \
|
|
|
|
memcpy(lebytes + part, kaddr, size - part); \
|
|
|
|
return get_unaligned_le##bits(lebytes); \
|
2019-08-09 23:12:38 +08:00
|
|
|
} \
|
2020-04-29 08:15:56 +08:00
|
|
|
void btrfs_set_token_##bits(struct btrfs_map_token *token, \
|
2017-06-29 11:56:53 +08:00
|
|
|
const void *ptr, unsigned long off, \
|
2020-04-29 08:15:56 +08:00
|
|
|
u##bits val) \
|
2007-10-16 04:18:56 +08:00
|
|
|
{ \
|
2020-04-30 00:23:37 +08:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
2020-12-02 14:48:04 +08:00
|
|
|
const unsigned long idx = get_eb_page_index(member_offset); \
|
|
|
|
const unsigned long oip = get_eb_offset_in_page(token->eb, \
|
|
|
|
member_offset); \
|
2020-04-30 00:23:37 +08:00
|
|
|
const int size = sizeof(u##bits); \
|
2020-04-30 23:57:55 +08:00
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
|
|
|
const int part = PAGE_SIZE - oip; \
|
2012-07-10 10:22:35 +08:00
|
|
|
\
|
2019-08-09 23:30:23 +08:00
|
|
|
ASSERT(token); \
|
2020-04-30 01:29:04 +08:00
|
|
|
ASSERT(token->kaddr); \
|
2020-05-01 05:38:11 +08:00
|
|
|
ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
|
2020-04-30 00:23:37 +08:00
|
|
|
if (token->offset <= member_offset && \
|
|
|
|
member_offset + size <= token->offset + PAGE_SIZE) { \
|
|
|
|
put_unaligned_le##bits(val, token->kaddr + oip); \
|
2012-07-10 10:22:35 +08:00
|
|
|
return; \
|
|
|
|
} \
|
2020-04-30 23:57:55 +08:00
|
|
|
token->kaddr = page_address(token->eb->pages[idx]); \
|
|
|
|
token->offset = idx << PAGE_SHIFT; \
|
2021-06-23 21:48:53 +08:00
|
|
|
if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \
|
2020-04-30 00:23:37 +08:00
|
|
|
put_unaligned_le##bits(val, token->kaddr + oip); \
|
2012-07-10 10:22:35 +08:00
|
|
|
return; \
|
|
|
|
} \
|
2020-04-30 23:57:55 +08:00
|
|
|
put_unaligned_le##bits(val, lebytes); \
|
|
|
|
memcpy(token->kaddr + oip, lebytes, part); \
|
2020-04-30 00:23:37 +08:00
|
|
|
token->kaddr = page_address(token->eb->pages[idx + 1]); \
|
|
|
|
token->offset = (idx + 1) << PAGE_SHIFT; \
|
2020-04-30 23:57:55 +08:00
|
|
|
memcpy(token->kaddr, lebytes + part, size - part); \
|
2019-08-09 23:12:38 +08:00
|
|
|
} \
|
2020-04-29 09:04:10 +08:00
|
|
|
void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
|
2019-08-09 23:12:38 +08:00
|
|
|
unsigned long off, u##bits val) \
|
|
|
|
{ \
|
2020-04-30 00:07:04 +08:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
2020-12-02 14:48:04 +08:00
|
|
|
const unsigned long oip = get_eb_offset_in_page(eb, member_offset); \
|
|
|
|
const unsigned long idx = get_eb_page_index(member_offset); \
|
2020-04-30 23:57:55 +08:00
|
|
|
char *kaddr = page_address(eb->pages[idx]); \
|
2020-04-30 00:07:04 +08:00
|
|
|
const int size = sizeof(u##bits); \
|
2020-04-30 23:57:55 +08:00
|
|
|
const int part = PAGE_SIZE - oip; \
|
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
2019-08-09 23:12:38 +08:00
|
|
|
\
|
2020-05-01 05:38:11 +08:00
|
|
|
ASSERT(check_setget_bounds(eb, ptr, off, size)); \
|
2021-06-23 21:48:53 +08:00
|
|
|
if (INLINE_EXTENT_BUFFER_PAGES == 1 || oip + size <= PAGE_SIZE) { \
|
2020-04-30 00:07:04 +08:00
|
|
|
put_unaligned_le##bits(val, kaddr + oip); \
|
2019-08-09 23:12:38 +08:00
|
|
|
return; \
|
|
|
|
} \
|
2020-04-30 23:57:55 +08:00
|
|
|
\
|
|
|
|
put_unaligned_le##bits(val, lebytes); \
|
|
|
|
memcpy(kaddr + oip, lebytes, part); \
|
|
|
|
kaddr = page_address(eb->pages[idx + 1]); \
|
|
|
|
memcpy(kaddr, lebytes + part, size - part); \
|
2012-07-10 10:22:35 +08:00
|
|
|
}
|
2007-10-16 04:18:56 +08:00
|
|
|
|
2012-07-10 10:22:35 +08:00
|
|
|
DEFINE_BTRFS_SETGET_BITS(8)
|
|
|
|
DEFINE_BTRFS_SETGET_BITS(16)
|
|
|
|
DEFINE_BTRFS_SETGET_BITS(32)
|
|
|
|
DEFINE_BTRFS_SETGET_BITS(64)
|
2007-10-16 04:18:56 +08:00
|
|
|
|
2017-06-29 11:56:53 +08:00
|
|
|
void btrfs_node_key(const struct extent_buffer *eb,
|
2007-11-07 04:09:29 +08:00
|
|
|
struct btrfs_disk_key *disk_key, int nr)
|
|
|
|
{
|
2022-11-16 00:16:16 +08:00
|
|
|
unsigned long ptr = btrfs_node_key_ptr_offset(eb, nr);
|
2007-11-07 04:09:29 +08:00
|
|
|
read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
|
|
|
|
struct btrfs_key_ptr, key, disk_key);
|
|
|
|
}
|