Merge remote-tracking branch 'efi/urgent' into x86/urgent
There have been reports of EFI crashes since -rc1. The following two commits fix known issues. * Fix boot failure on 32-bit EFI due to the recent EFI memmap changes merged during the merge window - Borislav Petkov * Avoid a crash during efi_bgrt_init() by detecting invalid BGRT headers based on the 'status' field. Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
This commit is contained in:
commit
31fce91e7a
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Shuffling Zombie Juror
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -132,6 +132,8 @@ extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
|
|||
extern void efi_sync_low_kernel_mappings(void);
|
||||
extern void efi_setup_page_tables(void);
|
||||
extern void __init old_map_region(efi_memory_desc_t *md);
|
||||
extern void __init runtime_code_page_mkexec(void);
|
||||
extern void __init efi_runtime_mkexec(void);
|
||||
|
||||
struct efi_setup_data {
|
||||
u64 fw_vendor;
|
||||
|
|
|
@ -42,7 +42,7 @@ void __init efi_bgrt_init(void)
|
|||
|
||||
if (bgrt_tab->header.length < sizeof(*bgrt_tab))
|
||||
return;
|
||||
if (bgrt_tab->version != 1)
|
||||
if (bgrt_tab->version != 1 || bgrt_tab->status != 1)
|
||||
return;
|
||||
if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address)
|
||||
return;
|
||||
|
|
|
@ -792,7 +792,7 @@ void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
|
|||
set_memory_nx(addr, npages);
|
||||
}
|
||||
|
||||
static void __init runtime_code_page_mkexec(void)
|
||||
void __init runtime_code_page_mkexec(void)
|
||||
{
|
||||
efi_memory_desc_t *md;
|
||||
void *p;
|
||||
|
@ -1069,8 +1069,7 @@ void __init efi_enter_virtual_mode(void)
|
|||
efi.update_capsule = virt_efi_update_capsule;
|
||||
efi.query_capsule_caps = virt_efi_query_capsule_caps;
|
||||
|
||||
if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
|
||||
runtime_code_page_mkexec();
|
||||
efi_runtime_mkexec();
|
||||
|
||||
kfree(new_memmap);
|
||||
|
||||
|
|
|
@ -77,3 +77,9 @@ void efi_call_phys_epilog(void)
|
|||
|
||||
local_irq_restore(efi_rt_eflags);
|
||||
}
|
||||
|
||||
void __init efi_runtime_mkexec(void)
|
||||
{
|
||||
if (__supported_pte_mask & _PAGE_NX)
|
||||
runtime_code_page_mkexec();
|
||||
}
|
||||
|
|
|
@ -233,3 +233,12 @@ void __init parse_efi_setup(u64 phys_addr, u32 data_len)
|
|||
{
|
||||
efi_setup = phys_addr + sizeof(struct setup_data);
|
||||
}
|
||||
|
||||
void __init efi_runtime_mkexec(void)
|
||||
{
|
||||
if (!efi_enabled(EFI_OLD_MEMMAP))
|
||||
return;
|
||||
|
||||
if (__supported_pte_mask & _PAGE_NX)
|
||||
runtime_code_page_mkexec();
|
||||
}
|
||||
|
|
|
@ -890,12 +890,10 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
|
|||
} else {
|
||||
/* Failback to copying a page */
|
||||
struct page *page = alloc_page(GFP_KERNEL);
|
||||
char *src = buf->ops->map(pipe, buf, 1);
|
||||
char *dst;
|
||||
char *src;
|
||||
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
dst = kmap(page);
|
||||
|
||||
offset = sd->pos & ~PAGE_MASK;
|
||||
|
||||
|
@ -903,9 +901,8 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
|
|||
if (len + offset > PAGE_SIZE)
|
||||
len = PAGE_SIZE - offset;
|
||||
|
||||
memcpy(dst + offset, src + buf->offset, len);
|
||||
|
||||
kunmap(page);
|
||||
src = buf->ops->map(pipe, buf, 1);
|
||||
memcpy(page_address(page) + offset, src + buf->offset, len);
|
||||
buf->ops->unmap(pipe, buf, src);
|
||||
|
||||
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
|
||||
|
|
|
@ -1010,6 +1010,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
|
|||
bytes = min(bytes, working_bytes);
|
||||
kaddr = kmap_atomic(page_out);
|
||||
memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
|
||||
if (*pg_index == (vcnt - 1) && *pg_offset == 0)
|
||||
memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
|
||||
kunmap_atomic(kaddr);
|
||||
flush_dcache_page(page_out);
|
||||
|
||||
|
|
|
@ -2385,6 +2385,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|||
spin_unlock(&delayed_refs->lock);
|
||||
locked_ref = NULL;
|
||||
cond_resched();
|
||||
count++;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -4525,7 +4525,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
|
|||
spin_lock(&root->fs_info->super_lock);
|
||||
strcpy(super_block->label, label);
|
||||
spin_unlock(&root->fs_info->super_lock);
|
||||
ret = btrfs_end_transaction(trans, root);
|
||||
ret = btrfs_commit_transaction(trans, root);
|
||||
|
||||
out_unlock:
|
||||
mnt_drop_write_file(file);
|
||||
|
@ -4668,7 +4668,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
||||
|
@ -4689,7 +4689,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
|
|||
btrfs_set_super_incompat_flags(super_block, newflags);
|
||||
spin_unlock(&root->fs_info->super_lock);
|
||||
|
||||
return btrfs_end_transaction(trans, root);
|
||||
return btrfs_commit_transaction(trans, root);
|
||||
}
|
||||
|
||||
long btrfs_ioctl(struct file *file, unsigned int
|
||||
|
|
|
@ -2774,8 +2774,6 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BTRFS_ASSERT
|
||||
|
||||
static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino)
|
||||
{
|
||||
struct rb_node *n = sctx->waiting_dir_moves.rb_node;
|
||||
|
@ -2796,8 +2794,6 @@ static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino)
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino)
|
||||
{
|
||||
struct rb_node **p = &sctx->pending_dir_moves.rb_node;
|
||||
|
@ -2902,7 +2898,9 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
|
|||
}
|
||||
|
||||
sctx->send_progress = sctx->cur_ino + 1;
|
||||
ASSERT(del_waiting_dir_move(sctx, pm->ino) == 0);
|
||||
ret = del_waiting_dir_move(sctx, pm->ino);
|
||||
ASSERT(ret == 0);
|
||||
|
||||
ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
|
|
@ -2559,8 +2559,8 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
|
|||
if (rc > 0) {
|
||||
ssize_t err;
|
||||
|
||||
err = generic_write_sync(file, pos, rc);
|
||||
if (err < 0 && rc > 0)
|
||||
err = generic_write_sync(file, iocb->ki_pos - rc, rc);
|
||||
if (err < 0)
|
||||
rc = err;
|
||||
}
|
||||
|
||||
|
|
|
@ -152,7 +152,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
|
|||
if (ret > 0) {
|
||||
ssize_t err;
|
||||
|
||||
err = generic_write_sync(file, pos, ret);
|
||||
err = generic_write_sync(file, iocb->ki_pos - ret, ret);
|
||||
if (err < 0 && ret > 0)
|
||||
ret = err;
|
||||
}
|
||||
|
|
|
@ -2134,7 +2134,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
|||
ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (ret > 0) {
|
||||
int err = generic_write_sync(file, pos, ret);
|
||||
int err = generic_write_sync(file, iocb->ki_pos - ret, ret);
|
||||
if (err < 0)
|
||||
ret = err;
|
||||
}
|
||||
|
|
17
fs/sync.c
17
fs/sync.c
|
@ -222,23 +222,6 @@ SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
|
|||
return do_fsync(fd, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* generic_write_sync - perform syncing after a write if file / inode is sync
|
||||
* @file: file to which the write happened
|
||||
* @pos: offset where the write started
|
||||
* @count: length of the write
|
||||
*
|
||||
* This is just a simple wrapper about our general syncing function.
|
||||
*/
|
||||
int generic_write_sync(struct file *file, loff_t pos, loff_t count)
|
||||
{
|
||||
if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
|
||||
return 0;
|
||||
return vfs_fsync_range(file, pos, pos + count - 1,
|
||||
(file->f_flags & __O_SYNC) ? 0 : 1);
|
||||
}
|
||||
EXPORT_SYMBOL(generic_write_sync);
|
||||
|
||||
/*
|
||||
* sys_sync_file_range() permits finely controlled syncing over a segment of
|
||||
* a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
|
||||
|
|
|
@ -799,7 +799,7 @@ xfs_file_aio_write(
|
|||
XFS_STATS_ADD(xs_write_bytes, ret);
|
||||
|
||||
/* Handle various SYNC-type writes */
|
||||
err = generic_write_sync(file, pos, ret);
|
||||
err = generic_write_sync(file, iocb->ki_pos - ret, ret);
|
||||
if (err < 0)
|
||||
ret = err;
|
||||
}
|
||||
|
|
|
@ -2274,7 +2274,13 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
|
|||
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
|
||||
int datasync);
|
||||
extern int vfs_fsync(struct file *file, int datasync);
|
||||
extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
|
||||
static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
|
||||
{
|
||||
if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
|
||||
return 0;
|
||||
return vfs_fsync_range(file, pos, pos + count - 1,
|
||||
(file->f_flags & __O_SYNC) ? 0 : 1);
|
||||
}
|
||||
extern void emergency_sync(void);
|
||||
extern void emergency_remount(void);
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
|
|
@ -2553,8 +2553,8 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
|
|||
if (ret > 0) {
|
||||
ssize_t err;
|
||||
|
||||
err = generic_write_sync(file, pos, ret);
|
||||
if (err < 0 && ret > 0)
|
||||
err = generic_write_sync(file, iocb->ki_pos - ret, ret);
|
||||
if (err < 0)
|
||||
ret = err;
|
||||
}
|
||||
return ret;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/inet_diag.h>
|
||||
#include <linux/xfrm.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/sock_diag.h>
|
||||
|
||||
#include "flask.h"
|
||||
#include "av_permissions.h"
|
||||
|
@ -78,6 +79,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
|
|||
{
|
||||
{ TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
|
||||
{ DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
|
||||
{ SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
|
||||
};
|
||||
|
||||
static struct nlmsg_perm nlmsg_xfrm_perms[] =
|
||||
|
|
|
@ -1232,6 +1232,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
|
|||
struct context context;
|
||||
int rc = 0;
|
||||
|
||||
/* An empty security context is never valid. */
|
||||
if (!scontext_len)
|
||||
return -EINVAL;
|
||||
|
||||
if (!ss_initialized) {
|
||||
int i;
|
||||
|
||||
|
|
|
@ -63,11 +63,35 @@ static int build_id_cache__kcore_dir(char *dir, size_t sz)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool same_kallsyms_reloc(const char *from_dir, char *to_dir)
|
||||
{
|
||||
char from[PATH_MAX];
|
||||
char to[PATH_MAX];
|
||||
const char *name;
|
||||
u64 addr1 = 0, addr2 = 0;
|
||||
int i;
|
||||
|
||||
scnprintf(from, sizeof(from), "%s/kallsyms", from_dir);
|
||||
scnprintf(to, sizeof(to), "%s/kallsyms", to_dir);
|
||||
|
||||
for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
|
||||
addr1 = kallsyms__get_function_start(from, name);
|
||||
if (addr1)
|
||||
break;
|
||||
}
|
||||
|
||||
if (name)
|
||||
addr2 = kallsyms__get_function_start(to, name);
|
||||
|
||||
return addr1 == addr2;
|
||||
}
|
||||
|
||||
static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
|
||||
size_t to_dir_sz)
|
||||
{
|
||||
char from[PATH_MAX];
|
||||
char to[PATH_MAX];
|
||||
char to_subdir[PATH_MAX];
|
||||
struct dirent *dent;
|
||||
int ret = -1;
|
||||
DIR *d;
|
||||
|
@ -86,10 +110,11 @@ static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
|
|||
continue;
|
||||
scnprintf(to, sizeof(to), "%s/%s/modules", to_dir,
|
||||
dent->d_name);
|
||||
if (!compare_proc_modules(from, to)) {
|
||||
scnprintf(to, sizeof(to), "%s/%s", to_dir,
|
||||
dent->d_name);
|
||||
strlcpy(to_dir, to, to_dir_sz);
|
||||
scnprintf(to_subdir, sizeof(to_subdir), "%s/%s",
|
||||
to_dir, dent->d_name);
|
||||
if (!compare_proc_modules(from, to) &&
|
||||
same_kallsyms_reloc(from_dir, to_subdir)) {
|
||||
strlcpy(to_dir, to_subdir, to_dir_sz);
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -287,10 +287,7 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
|
|||
* have no _text sometimes.
|
||||
*/
|
||||
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
|
||||
machine, "_text");
|
||||
if (err < 0)
|
||||
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
|
||||
machine, "_stext");
|
||||
machine);
|
||||
if (err < 0)
|
||||
pr_err("Couldn't record guest kernel [%d]'s reference"
|
||||
" relocation symbol.\n", machine->pid);
|
||||
|
@ -457,10 +454,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
|
|||
}
|
||||
|
||||
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
|
||||
machine, "_text");
|
||||
if (err < 0)
|
||||
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
|
||||
machine, "_stext");
|
||||
machine);
|
||||
if (err < 0)
|
||||
pr_err("Couldn't record kernel reference relocation symbol\n"
|
||||
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
|
||||
|
|
|
@ -454,7 +454,6 @@ So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you
|
|||
will need at least this:
|
||||
- asm/perf_event.h - a basic stub will suffice at first
|
||||
- support for atomic64 types (and associated helper functions)
|
||||
- set_perf_event_pending() implemented
|
||||
|
||||
If your architecture does have hardware capabilities, you can override the
|
||||
weak stub hw_perf_event_init() to register hardware counters.
|
||||
|
|
|
@ -100,8 +100,8 @@
|
|||
|
||||
#ifdef __aarch64__
|
||||
#define mb() asm volatile("dmb ish" ::: "memory")
|
||||
#define wmb() asm volatile("dmb ishld" ::: "memory")
|
||||
#define rmb() asm volatile("dmb ishst" ::: "memory")
|
||||
#define wmb() asm volatile("dmb ishst" ::: "memory")
|
||||
#define rmb() asm volatile("dmb ishld" ::: "memory")
|
||||
#define cpu_relax() asm volatile("yield" ::: "memory")
|
||||
#endif
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ int test__vmlinux_matches_kallsyms(void)
|
|||
struct map *kallsyms_map, *vmlinux_map;
|
||||
struct machine kallsyms, vmlinux;
|
||||
enum map_type type = MAP__FUNCTION;
|
||||
struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
|
||||
u64 mem_start, mem_end;
|
||||
|
||||
/*
|
||||
|
@ -70,14 +69,6 @@ int test__vmlinux_matches_kallsyms(void)
|
|||
*/
|
||||
kallsyms_map = machine__kernel_map(&kallsyms, type);
|
||||
|
||||
sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
|
||||
if (sym == NULL) {
|
||||
pr_debug("dso__find_symbol_by_name ");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ref_reloc_sym.addr = UM(sym->start);
|
||||
|
||||
/*
|
||||
* Step 5:
|
||||
*
|
||||
|
@ -89,7 +80,6 @@ int test__vmlinux_matches_kallsyms(void)
|
|||
}
|
||||
|
||||
vmlinux_map = machine__kernel_map(&vmlinux, type);
|
||||
map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
|
||||
|
||||
/*
|
||||
* Step 6:
|
||||
|
|
|
@ -470,23 +470,32 @@ static int find_symbol_cb(void *arg, const char *name, char type,
|
|||
return 1;
|
||||
}
|
||||
|
||||
u64 kallsyms__get_function_start(const char *kallsyms_filename,
|
||||
const char *symbol_name)
|
||||
{
|
||||
struct process_symbol_args args = { .name = symbol_name, };
|
||||
|
||||
if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
|
||||
return 0;
|
||||
|
||||
return args.start;
|
||||
}
|
||||
|
||||
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
|
||||
perf_event__handler_t process,
|
||||
struct machine *machine,
|
||||
const char *symbol_name)
|
||||
struct machine *machine)
|
||||
{
|
||||
size_t size;
|
||||
const char *filename, *mmap_name;
|
||||
char path[PATH_MAX];
|
||||
const char *mmap_name;
|
||||
char name_buff[PATH_MAX];
|
||||
struct map *map;
|
||||
struct kmap *kmap;
|
||||
int err;
|
||||
/*
|
||||
* We should get this from /sys/kernel/sections/.text, but till that is
|
||||
* available use this, and after it is use this as a fallback for older
|
||||
* kernels.
|
||||
*/
|
||||
struct process_symbol_args args = { .name = symbol_name, };
|
||||
union perf_event *event = zalloc((sizeof(event->mmap) +
|
||||
machine->id_hdr_size));
|
||||
if (event == NULL) {
|
||||
|
@ -502,30 +511,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
|
|||
* see kernel/perf_event.c __perf_event_mmap
|
||||
*/
|
||||
event->header.misc = PERF_RECORD_MISC_KERNEL;
|
||||
filename = "/proc/kallsyms";
|
||||
} else {
|
||||
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
if (machine__is_default_guest(machine))
|
||||
filename = (char *) symbol_conf.default_guest_kallsyms;
|
||||
else {
|
||||
sprintf(path, "%s/proc/kallsyms", machine->root_dir);
|
||||
filename = path;
|
||||
}
|
||||
}
|
||||
|
||||
if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) {
|
||||
free(event);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
map = machine->vmlinux_maps[MAP__FUNCTION];
|
||||
kmap = map__kmap(map);
|
||||
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
|
||||
"%s%s", mmap_name, symbol_name) + 1;
|
||||
"%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
|
||||
size = PERF_ALIGN(size, sizeof(u64));
|
||||
event->mmap.header.type = PERF_RECORD_MMAP;
|
||||
event->mmap.header.size = (sizeof(event->mmap) -
|
||||
(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
|
||||
event->mmap.pgoff = args.start;
|
||||
event->mmap.pgoff = kmap->ref_reloc_sym->addr;
|
||||
event->mmap.start = map->start;
|
||||
event->mmap.len = map->end - event->mmap.start;
|
||||
event->mmap.pid = machine->pid;
|
||||
|
|
|
@ -214,8 +214,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
|
|||
struct machine *machine, bool mmap_data);
|
||||
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
|
||||
perf_event__handler_t process,
|
||||
struct machine *machine,
|
||||
const char *symbol_name);
|
||||
struct machine *machine);
|
||||
|
||||
int perf_event__synthesize_modules(struct perf_tool *tool,
|
||||
perf_event__handler_t process,
|
||||
|
@ -279,4 +278,7 @@ size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
|
|||
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
|
||||
size_t perf_event__fprintf(union perf_event *event, FILE *fp);
|
||||
|
||||
u64 kallsyms__get_function_start(const char *kallsyms_filename,
|
||||
const char *symbol_name);
|
||||
|
||||
#endif /* __PERF_RECORD_H */
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
#ifndef __ASM_GENERIC_HASH_H
|
||||
#define __ASM_GENERIC_HASH_H
|
||||
|
||||
/* Stub */
|
||||
|
||||
#endif /* __ASM_GENERIC_HASH_H */
|
|
@ -496,19 +496,22 @@ static int symbol__in_kernel(void *arg, const char *name,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
|
||||
size_t bufsz)
|
||||
{
|
||||
if (machine__is_default_guest(machine))
|
||||
scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
|
||||
else
|
||||
scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
|
||||
}
|
||||
|
||||
/* Figure out the start address of kernel map from /proc/kallsyms */
|
||||
static u64 machine__get_kernel_start_addr(struct machine *machine)
|
||||
{
|
||||
const char *filename;
|
||||
char path[PATH_MAX];
|
||||
char filename[PATH_MAX];
|
||||
struct process_args args;
|
||||
|
||||
if (machine__is_default_guest(machine))
|
||||
filename = (char *)symbol_conf.default_guest_kallsyms;
|
||||
else {
|
||||
sprintf(path, "%s/proc/kallsyms", machine->root_dir);
|
||||
filename = path;
|
||||
}
|
||||
machine__get_kallsyms_filename(machine, filename, PATH_MAX);
|
||||
|
||||
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
|
||||
return 0;
|
||||
|
@ -829,9 +832,25 @@ static int machine__create_modules(struct machine *machine)
|
|||
return 0;
|
||||
}
|
||||
|
||||
const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
|
||||
|
||||
int machine__create_kernel_maps(struct machine *machine)
|
||||
{
|
||||
struct dso *kernel = machine__get_kernel(machine);
|
||||
char filename[PATH_MAX];
|
||||
const char *name;
|
||||
u64 addr = 0;
|
||||
int i;
|
||||
|
||||
machine__get_kallsyms_filename(machine, filename, PATH_MAX);
|
||||
|
||||
for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
|
||||
addr = kallsyms__get_function_start(filename, name);
|
||||
if (addr)
|
||||
break;
|
||||
}
|
||||
if (!addr)
|
||||
return -1;
|
||||
|
||||
if (kernel == NULL ||
|
||||
__machine__create_kernel_maps(machine, kernel) < 0)
|
||||
|
@ -850,6 +869,13 @@ int machine__create_kernel_maps(struct machine *machine)
|
|||
* Now that we have all the maps created, just set the ->end of them:
|
||||
*/
|
||||
map_groups__fixup_end(&machine->kmaps);
|
||||
|
||||
if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
|
||||
addr)) {
|
||||
machine__destroy_kernel_maps(machine);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@ union perf_event;
|
|||
#define HOST_KERNEL_ID (-1)
|
||||
#define DEFAULT_GUEST_KERNEL_ID (0)
|
||||
|
||||
extern const char *ref_reloc_sym_names[];
|
||||
|
||||
struct machine {
|
||||
struct rb_node rb_node;
|
||||
pid_t pid;
|
||||
|
|
|
@ -39,6 +39,7 @@ void map__init(struct map *map, enum map_type type,
|
|||
map->start = start;
|
||||
map->end = end;
|
||||
map->pgoff = pgoff;
|
||||
map->reloc = 0;
|
||||
map->dso = dso;
|
||||
map->map_ip = map__map_ip;
|
||||
map->unmap_ip = map__unmap_ip;
|
||||
|
@ -288,7 +289,7 @@ u64 map__rip_2objdump(struct map *map, u64 rip)
|
|||
if (map->dso->rel)
|
||||
return rip - map->pgoff;
|
||||
|
||||
return map->unmap_ip(map, rip);
|
||||
return map->unmap_ip(map, rip) - map->reloc;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -311,7 +312,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
|
|||
if (map->dso->rel)
|
||||
return map->unmap_ip(map, ip + map->pgoff);
|
||||
|
||||
return ip;
|
||||
return ip + map->reloc;
|
||||
}
|
||||
|
||||
void map_groups__init(struct map_groups *mg)
|
||||
|
|
|
@ -36,6 +36,7 @@ struct map {
|
|||
bool erange_warned;
|
||||
u32 priv;
|
||||
u64 pgoff;
|
||||
u64 reloc;
|
||||
u32 maj, min; /* only valid for MMAP2 record */
|
||||
u64 ino; /* only valid for MMAP2 record */
|
||||
u64 ino_generation;/* only valid for MMAP2 record */
|
||||
|
|
|
@ -751,6 +751,8 @@ int dso__load_sym(struct dso *dso, struct map *map,
|
|||
if (strcmp(elf_name, kmap->ref_reloc_sym->name))
|
||||
continue;
|
||||
kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
|
||||
map->reloc = kmap->ref_reloc_sym->addr -
|
||||
kmap->ref_reloc_sym->unrelocated_addr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -922,6 +924,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
|
|||
(u64)shdr.sh_offset);
|
||||
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
|
||||
}
|
||||
new_symbol:
|
||||
/*
|
||||
* We need to figure out if the object was created from C++ sources
|
||||
* DWARF DW_compile_unit has this, but we don't always have access
|
||||
|
@ -933,7 +936,6 @@ int dso__load_sym(struct dso *dso, struct map *map,
|
|||
if (demangled != NULL)
|
||||
elf_name = demangled;
|
||||
}
|
||||
new_symbol:
|
||||
f = symbol__new(sym.st_value, sym.st_size,
|
||||
GELF_ST_BIND(sym.st_info), elf_name);
|
||||
free(demangled);
|
||||
|
|
|
@ -627,7 +627,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
|
|||
* kernel range is broken in several maps, named [kernel].N, as we don't have
|
||||
* the original ELF section names vmlinux have.
|
||||
*/
|
||||
static int dso__split_kallsyms(struct dso *dso, struct map *map,
|
||||
static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
|
||||
symbol_filter_t filter)
|
||||
{
|
||||
struct map_groups *kmaps = map__kmap(map)->kmaps;
|
||||
|
@ -692,6 +692,12 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map,
|
|||
char dso_name[PATH_MAX];
|
||||
struct dso *ndso;
|
||||
|
||||
if (delta) {
|
||||
/* Kernel was relocated at boot time */
|
||||
pos->start -= delta;
|
||||
pos->end -= delta;
|
||||
}
|
||||
|
||||
if (count == 0) {
|
||||
curr_map = map;
|
||||
goto filter_symbol;
|
||||
|
@ -721,6 +727,10 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map,
|
|||
curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
|
||||
map_groups__insert(kmaps, curr_map);
|
||||
++kernel_range;
|
||||
} else if (delta) {
|
||||
/* Kernel was relocated at boot time */
|
||||
pos->start -= delta;
|
||||
pos->end -= delta;
|
||||
}
|
||||
filter_symbol:
|
||||
if (filter && filter(curr_map, pos)) {
|
||||
|
@ -976,6 +986,23 @@ static int validate_kcore_modules(const char *kallsyms_filename,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int validate_kcore_addresses(const char *kallsyms_filename,
|
||||
struct map *map)
|
||||
{
|
||||
struct kmap *kmap = map__kmap(map);
|
||||
|
||||
if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
|
||||
u64 start;
|
||||
|
||||
start = kallsyms__get_function_start(kallsyms_filename,
|
||||
kmap->ref_reloc_sym->name);
|
||||
if (start != kmap->ref_reloc_sym->addr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return validate_kcore_modules(kallsyms_filename, map);
|
||||
}
|
||||
|
||||
struct kcore_mapfn_data {
|
||||
struct dso *dso;
|
||||
enum map_type type;
|
||||
|
@ -1019,8 +1046,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
|
|||
kallsyms_filename))
|
||||
return -EINVAL;
|
||||
|
||||
/* All modules must be present at their original addresses */
|
||||
if (validate_kcore_modules(kallsyms_filename, map))
|
||||
/* Modules and kernel must be present at their original addresses */
|
||||
if (validate_kcore_addresses(kallsyms_filename, map))
|
||||
return -EINVAL;
|
||||
|
||||
md.dso = dso;
|
||||
|
@ -1113,15 +1140,41 @@ out_err:
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the kernel is relocated at boot time, kallsyms won't match. Compute the
|
||||
* delta based on the relocation reference symbol.
|
||||
*/
|
||||
static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
|
||||
{
|
||||
struct kmap *kmap = map__kmap(map);
|
||||
u64 addr;
|
||||
|
||||
if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
|
||||
return 0;
|
||||
|
||||
addr = kallsyms__get_function_start(filename,
|
||||
kmap->ref_reloc_sym->name);
|
||||
if (!addr)
|
||||
return -1;
|
||||
|
||||
*delta = addr - kmap->ref_reloc_sym->addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dso__load_kallsyms(struct dso *dso, const char *filename,
|
||||
struct map *map, symbol_filter_t filter)
|
||||
{
|
||||
u64 delta = 0;
|
||||
|
||||
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
|
||||
return -1;
|
||||
|
||||
if (dso__load_all_kallsyms(dso, filename, map) < 0)
|
||||
return -1;
|
||||
|
||||
if (kallsyms__delta(map, filename, &delta))
|
||||
return -1;
|
||||
|
||||
symbols__fixup_duplicate(&dso->symbols[map->type]);
|
||||
symbols__fixup_end(&dso->symbols[map->type]);
|
||||
|
||||
|
@ -1133,7 +1186,7 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
|
|||
if (!dso__load_kcore(dso, map, filename))
|
||||
return dso__split_kallsyms_for_kcore(dso, map, filter);
|
||||
else
|
||||
return dso__split_kallsyms(dso, map, filter);
|
||||
return dso__split_kallsyms(dso, map, delta, filter);
|
||||
}
|
||||
|
||||
static int dso__load_perf_map(struct dso *dso, struct map *map,
|
||||
|
@ -1424,7 +1477,7 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
|
|||
continue;
|
||||
scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
|
||||
"%s/%s/kallsyms", dir, dent->d_name);
|
||||
if (!validate_kcore_modules(kallsyms_filename, map)) {
|
||||
if (!validate_kcore_addresses(kallsyms_filename, map)) {
|
||||
strlcpy(dir, kallsyms_filename, dir_sz);
|
||||
ret = 0;
|
||||
break;
|
||||
|
@ -1479,7 +1532,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
|
|||
if (fd != -1) {
|
||||
close(fd);
|
||||
/* If module maps match go with /proc/kallsyms */
|
||||
if (!validate_kcore_modules("/proc/kallsyms", map))
|
||||
if (!validate_kcore_addresses("/proc/kallsyms", map))
|
||||
goto proc_kallsyms;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue