2019-05-31 16:09:38 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-04-20 05:59:55 +08:00
|
|
|
/*
|
2019-12-04 08:46:31 +08:00
|
|
|
* sorttable.c: Sort the kernel's table
|
2012-04-20 05:59:55 +08:00
|
|
|
*
|
2019-12-04 08:46:32 +08:00
|
|
|
* Added ORC unwind tables sort support and other updates:
|
|
|
|
* Copyright (C) 1999-2019 Alibaba Group Holding Limited. by:
|
|
|
|
* Shile Zhang <shile.zhang@linux.alibaba.com>
|
|
|
|
*
|
2012-04-25 02:23:14 +08:00
|
|
|
* Copyright 2011 - 2012 Cavium, Inc.
|
2012-04-20 05:59:55 +08:00
|
|
|
*
|
|
|
|
* Based on code taken from recortmcount.c which is:
|
|
|
|
*
|
|
|
|
* Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
|
|
|
|
*
|
|
|
|
* Restructured to fit Linux format, as well as other updates:
|
2019-12-04 08:46:32 +08:00
|
|
|
* Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
|
2012-04-20 05:59:55 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Strategy: alter the vmlinux file in-place.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <getopt.h>
|
|
|
|
#include <elf.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
2021-12-12 19:33:58 +08:00
|
|
|
#include <errno.h>
|
|
|
|
#include <pthread.h>
|
2012-04-20 05:59:55 +08:00
|
|
|
|
2012-04-25 02:23:14 +08:00
|
|
|
#include <tools/be_byteshift.h>
|
|
|
|
#include <tools/le_byteshift.h>
|
|
|
|
|
2013-11-15 14:38:05 +08:00
|
|
|
#ifndef EM_ARCOMPACT
|
|
|
|
#define EM_ARCOMPACT 93
|
|
|
|
#endif
|
|
|
|
|
2014-02-18 19:29:11 +08:00
|
|
|
#ifndef EM_XTENSA
|
|
|
|
#define EM_XTENSA 94
|
|
|
|
#endif
|
|
|
|
|
2013-05-09 00:29:24 +08:00
|
|
|
#ifndef EM_AARCH64
|
|
|
|
#define EM_AARCH64 183
|
|
|
|
#endif
|
|
|
|
|
2014-01-24 07:52:46 +08:00
|
|
|
#ifndef EM_MICROBLAZE
|
|
|
|
#define EM_MICROBLAZE 189
|
|
|
|
#endif
|
|
|
|
|
2013-11-22 15:35:58 +08:00
|
|
|
#ifndef EM_ARCV2
|
|
|
|
#define EM_ARCV2 195
|
|
|
|
#endif
|
|
|
|
|
2021-09-25 06:43:38 +08:00
|
|
|
#ifndef EM_RISCV
|
|
|
|
#define EM_RISCV 243
|
|
|
|
#endif
|
|
|
|
|
2022-05-31 18:04:10 +08:00
|
|
|
#ifndef EM_LOONGARCH
|
|
|
|
#define EM_LOONGARCH 258
|
|
|
|
#endif
|
|
|
|
|
2019-12-04 08:46:28 +08:00
|
|
|
static uint32_t (*r)(const uint32_t *);
|
|
|
|
static uint16_t (*r2)(const uint16_t *);
|
|
|
|
static uint64_t (*r8)(const uint64_t *);
|
|
|
|
static void (*w)(uint32_t, uint32_t *);
|
|
|
|
static void (*w2)(uint16_t, uint16_t *);
|
|
|
|
static void (*w8)(uint64_t, uint64_t *);
|
|
|
|
typedef void (*table_sort_t)(char *, int);
|
|
|
|
|
2012-04-20 05:59:55 +08:00
|
|
|
/*
|
|
|
|
* Get the whole file as a programming convenience in order to avoid
|
|
|
|
* malloc+lseek+read+free of many pieces. If successful, then mmap
|
|
|
|
* avoids copying unused pieces; else just read the whole file.
|
|
|
|
* Open for both read and write.
|
|
|
|
*/
|
2019-12-04 08:46:27 +08:00
|
|
|
static void *mmap_file(char const *fname, size_t *size)
|
2012-04-20 05:59:55 +08:00
|
|
|
{
|
2019-12-04 08:46:27 +08:00
|
|
|
int fd;
|
|
|
|
struct stat sb;
|
|
|
|
void *addr = NULL;
|
2012-04-20 05:59:55 +08:00
|
|
|
|
2019-12-04 08:46:27 +08:00
|
|
|
fd = open(fname, O_RDWR);
|
|
|
|
if (fd < 0) {
|
2012-04-20 05:59:55 +08:00
|
|
|
perror(fname);
|
2019-12-04 08:46:27 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (fstat(fd, &sb) < 0) {
|
|
|
|
perror(fname);
|
|
|
|
goto out;
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
|
|
|
if (!S_ISREG(sb.st_mode)) {
|
|
|
|
fprintf(stderr, "not a regular file: %s\n", fname);
|
2019-12-04 08:46:27 +08:00
|
|
|
goto out;
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
2019-12-04 08:46:28 +08:00
|
|
|
|
2019-12-04 08:46:27 +08:00
|
|
|
addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
|
2012-04-20 05:59:55 +08:00
|
|
|
if (addr == MAP_FAILED) {
|
|
|
|
fprintf(stderr, "Could not mmap file: %s\n", fname);
|
2019-12-04 08:46:27 +08:00
|
|
|
goto out;
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
2019-12-04 08:46:27 +08:00
|
|
|
|
|
|
|
*size = sb.st_size;
|
|
|
|
|
|
|
|
out:
|
|
|
|
close(fd);
|
2012-04-20 05:59:55 +08:00
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2012-04-25 02:23:14 +08:00
|
|
|
static uint32_t rbe(const uint32_t *x)
|
2012-04-20 05:59:55 +08:00
|
|
|
{
|
2012-04-25 02:23:14 +08:00
|
|
|
return get_unaligned_be32(x);
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
2019-12-04 08:46:28 +08:00
|
|
|
|
2012-04-25 02:23:14 +08:00
|
|
|
static uint16_t r2be(const uint16_t *x)
|
2012-04-20 05:59:55 +08:00
|
|
|
{
|
2012-04-25 02:23:14 +08:00
|
|
|
return get_unaligned_be16(x);
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
2019-12-04 08:46:28 +08:00
|
|
|
|
|
|
|
static uint64_t r8be(const uint64_t *x)
|
2012-04-20 05:59:55 +08:00
|
|
|
{
|
2019-12-04 08:46:28 +08:00
|
|
|
return get_unaligned_be64(x);
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
2019-12-04 08:46:28 +08:00
|
|
|
|
2012-04-25 02:23:14 +08:00
|
|
|
static uint32_t rle(const uint32_t *x)
|
|
|
|
{
|
|
|
|
return get_unaligned_le32(x);
|
|
|
|
}
|
2019-12-04 08:46:28 +08:00
|
|
|
|
2012-04-25 02:23:14 +08:00
|
|
|
static uint16_t r2le(const uint16_t *x)
|
2012-04-20 05:59:55 +08:00
|
|
|
{
|
2012-04-25 02:23:14 +08:00
|
|
|
return get_unaligned_le16(x);
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
|
|
|
|
2019-12-04 08:46:28 +08:00
|
|
|
static uint64_t r8le(const uint64_t *x)
|
2012-04-25 02:23:14 +08:00
|
|
|
{
|
2019-12-04 08:46:28 +08:00
|
|
|
return get_unaligned_le64(x);
|
2012-04-25 02:23:14 +08:00
|
|
|
}
|
2019-12-04 08:46:28 +08:00
|
|
|
|
2012-04-25 02:23:14 +08:00
|
|
|
static void wbe(uint32_t val, uint32_t *x)
|
|
|
|
{
|
|
|
|
put_unaligned_be32(val, x);
|
|
|
|
}
|
2019-12-04 08:46:28 +08:00
|
|
|
|
2012-04-25 02:23:14 +08:00
|
|
|
static void w2be(uint16_t val, uint16_t *x)
|
|
|
|
{
|
|
|
|
put_unaligned_be16(val, x);
|
|
|
|
}
|
2019-12-04 08:46:28 +08:00
|
|
|
|
|
|
|
static void w8be(uint64_t val, uint64_t *x)
|
2012-04-25 02:23:14 +08:00
|
|
|
{
|
2019-12-04 08:46:28 +08:00
|
|
|
put_unaligned_be64(val, x);
|
2012-04-25 02:23:14 +08:00
|
|
|
}
|
2019-12-04 08:46:28 +08:00
|
|
|
|
2012-04-25 02:23:14 +08:00
|
|
|
static void wle(uint32_t val, uint32_t *x)
|
|
|
|
{
|
|
|
|
put_unaligned_le32(val, x);
|
|
|
|
}
|
2019-12-04 08:46:28 +08:00
|
|
|
|
2012-04-25 02:23:14 +08:00
|
|
|
static void w2le(uint16_t val, uint16_t *x)
|
2012-04-20 05:59:55 +08:00
|
|
|
{
|
2012-04-25 02:23:14 +08:00
|
|
|
put_unaligned_le16(val, x);
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
|
|
|
|
2019-12-04 08:46:28 +08:00
|
|
|
static void w8le(uint64_t val, uint64_t *x)
|
|
|
|
{
|
|
|
|
put_unaligned_le64(val, x);
|
|
|
|
}
|
2012-04-20 05:59:55 +08:00
|
|
|
|
2013-11-13 07:06:51 +08:00
|
|
|
/*
|
|
|
|
* Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
|
|
|
|
* the way to -256..-1, to avoid conflicting with real section
|
|
|
|
* indices.
|
|
|
|
*/
|
|
|
|
#define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
|
|
|
|
|
|
|
|
static inline int is_shndx_special(unsigned int i)
|
|
|
|
{
|
|
|
|
return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
|
|
|
|
static inline unsigned int get_secindex(unsigned int shndx,
|
|
|
|
unsigned int sym_offs,
|
|
|
|
const Elf32_Word *symtab_shndx_start)
|
|
|
|
{
|
|
|
|
if (is_shndx_special(shndx))
|
|
|
|
return SPECIAL(shndx);
|
|
|
|
if (shndx != SHN_XINDEX)
|
|
|
|
return shndx;
|
|
|
|
return r(&symtab_shndx_start[sym_offs]);
|
|
|
|
}
|
|
|
|
|
2012-04-20 05:59:55 +08:00
|
|
|
/* 32 bit and 64 bit are very similar */
|
2019-12-04 08:46:31 +08:00
|
|
|
#include "sorttable.h"
|
|
|
|
#define SORTTABLE_64
|
|
|
|
#include "sorttable.h"
|
2012-04-20 05:59:55 +08:00
|
|
|
|
2012-09-05 19:26:11 +08:00
|
|
|
static int compare_relative_table(const void *a, const void *b)
|
2012-04-25 02:23:14 +08:00
|
|
|
{
|
|
|
|
int32_t av = (int32_t)r(a);
|
|
|
|
int32_t bv = (int32_t)r(b);
|
|
|
|
|
|
|
|
if (av < bv)
|
|
|
|
return -1;
|
|
|
|
if (av > bv)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-12-04 08:46:28 +08:00
|
|
|
static void sort_relative_table(char *extab_image, int image_size)
|
2016-02-18 02:20:12 +08:00
|
|
|
{
|
2019-12-04 08:46:28 +08:00
|
|
|
int i = 0;
|
2016-02-18 02:20:12 +08:00
|
|
|
|
2019-12-04 08:46:28 +08:00
|
|
|
/*
|
|
|
|
* Do the same thing the runtime sort does, first normalize to
|
|
|
|
* being relative to the start of the section.
|
|
|
|
*/
|
2016-02-18 02:20:12 +08:00
|
|
|
while (i < image_size) {
|
|
|
|
uint32_t *loc = (uint32_t *)(extab_image + i);
|
|
|
|
w(r(loc) + i, loc);
|
2019-12-04 08:46:28 +08:00
|
|
|
i += 4;
|
2016-02-18 02:20:12 +08:00
|
|
|
}
|
|
|
|
|
2019-12-04 08:46:28 +08:00
|
|
|
qsort(extab_image, image_size / 8, 8, compare_relative_table);
|
2016-02-18 02:20:12 +08:00
|
|
|
|
2019-12-04 08:46:28 +08:00
|
|
|
/* Now denormalize. */
|
2016-02-18 02:20:12 +08:00
|
|
|
i = 0;
|
|
|
|
while (i < image_size) {
|
|
|
|
uint32_t *loc = (uint32_t *)(extab_image + i);
|
|
|
|
w(r(loc) - i, loc);
|
2019-12-04 08:46:28 +08:00
|
|
|
i += 4;
|
2016-02-18 02:20:12 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-11-08 19:42:20 +08:00
|
|
|
static void sort_relative_table_with_data(char *extab_image, int image_size)
|
arm64: extable: add `type` and `data` fields
Subsequent patches will add specialized handlers for fixups, in addition
to the simple PC fixup and BPF handlers we have today. In preparation,
this patch adds a new `type` field to struct exception_table_entry, and
uses this to distinguish the fixup and BPF cases. A `data` field is also
added so that subsequent patches can associate data specific to each
exception site (e.g. register numbers).
Handlers are named ex_handler_*() for consistency, following the exmaple
of x86. At the same time, get_ex_fixup() is split out into a helper so
that it can be used by other ex_handler_*() functions ins subsequent
patches.
This patch will increase the size of the exception tables, which will be
remedied by subsequent patches removing redundant fixup code. There
should be no functional change as a result of this patch.
Since each entry is now 12 bytes in size, we must reduce the alignment
of each entry from `.align 3` (i.e. 8 bytes) to `.align 2` (i.e. 4
bytes), which is the natrual alignment of the `insn` and `fixup` fields.
The current 8-byte alignment is a holdover from when the `insn` and
`fixup` fields was 8 bytes, and while not harmful has not been necessary
since commit:
6c94f27ac847ff8e ("arm64: switch to relative exception tables")
Similarly, RO_EXCEPTION_TABLE_ALIGN is dropped to 4 bytes.
Concurrently with this patch, x86's exception table entry format is
being updated (similarly to a 12-byte format, with 32-bytes of absolute
data). Once both have been merged it should be possible to unify the
sorttable logic for the two.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: James Morse <james.morse@arm.com>
Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20211019160219.5202-11-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-10-20 00:02:16 +08:00
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
while (i < image_size) {
|
|
|
|
uint32_t *loc = (uint32_t *)(extab_image + i);
|
|
|
|
|
|
|
|
w(r(loc) + i, loc);
|
|
|
|
w(r(loc + 1) + i + 4, loc + 1);
|
|
|
|
/* Don't touch the fixup type or data */
|
|
|
|
|
|
|
|
i += sizeof(uint32_t) * 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
qsort(extab_image, image_size / 12, 12, compare_relative_table);
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
while (i < image_size) {
|
|
|
|
uint32_t *loc = (uint32_t *)(extab_image + i);
|
|
|
|
|
|
|
|
w(r(loc) - i, loc);
|
|
|
|
w(r(loc + 1) - (i + 4), loc + 1);
|
|
|
|
/* Don't touch the fixup type or data */
|
|
|
|
|
|
|
|
i += sizeof(uint32_t) * 3;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-04 08:46:28 +08:00
|
|
|
static int do_file(char const *const fname, void *addr)
|
2012-04-20 05:59:55 +08:00
|
|
|
{
|
2019-12-04 08:46:27 +08:00
|
|
|
int rc = -1;
|
2019-12-04 08:46:28 +08:00
|
|
|
Elf32_Ehdr *ehdr = addr;
|
|
|
|
table_sort_t custom_sort = NULL;
|
2012-04-20 05:59:55 +08:00
|
|
|
|
|
|
|
switch (ehdr->e_ident[EI_DATA]) {
|
|
|
|
case ELFDATA2LSB:
|
2019-12-04 08:46:28 +08:00
|
|
|
r = rle;
|
|
|
|
r2 = r2le;
|
|
|
|
r8 = r8le;
|
|
|
|
w = wle;
|
|
|
|
w2 = w2le;
|
|
|
|
w8 = w8le;
|
2012-04-20 05:59:55 +08:00
|
|
|
break;
|
|
|
|
case ELFDATA2MSB:
|
2019-12-04 08:46:28 +08:00
|
|
|
r = rbe;
|
|
|
|
r2 = r2be;
|
|
|
|
r8 = r8be;
|
|
|
|
w = wbe;
|
|
|
|
w2 = w2be;
|
|
|
|
w8 = w8be;
|
2012-04-20 05:59:55 +08:00
|
|
|
break;
|
2019-12-04 08:46:28 +08:00
|
|
|
default:
|
|
|
|
fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
|
|
|
|
ehdr->e_ident[EI_DATA], fname);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
|
|
|
|
(r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN) ||
|
|
|
|
ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
|
2016-01-10 18:42:28 +08:00
|
|
|
fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
|
2019-12-04 08:46:27 +08:00
|
|
|
return -1;
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
|
|
|
|
2012-04-25 02:23:14 +08:00
|
|
|
switch (r2(&ehdr->e_machine)) {
|
2012-04-20 05:59:55 +08:00
|
|
|
case EM_386:
|
2021-11-08 19:42:20 +08:00
|
|
|
case EM_AARCH64:
|
2021-11-18 19:26:05 +08:00
|
|
|
case EM_RISCV:
|
2022-02-28 21:52:42 +08:00
|
|
|
case EM_S390:
|
2012-04-20 05:59:55 +08:00
|
|
|
case EM_X86_64:
|
2021-11-08 19:42:20 +08:00
|
|
|
custom_sort = sort_relative_table_with_data;
|
2016-02-18 02:20:12 +08:00
|
|
|
break;
|
2016-03-23 23:00:46 +08:00
|
|
|
case EM_PARISC:
|
2016-10-13 13:42:55 +08:00
|
|
|
case EM_PPC:
|
|
|
|
case EM_PPC64:
|
2012-09-05 19:26:11 +08:00
|
|
|
custom_sort = sort_relative_table;
|
|
|
|
break;
|
2013-11-15 14:38:05 +08:00
|
|
|
case EM_ARCOMPACT:
|
2013-11-22 15:35:58 +08:00
|
|
|
case EM_ARCV2:
|
2012-10-30 02:19:34 +08:00
|
|
|
case EM_ARM:
|
2022-05-31 18:04:10 +08:00
|
|
|
case EM_LOONGARCH:
|
2014-01-24 07:52:46 +08:00
|
|
|
case EM_MICROBLAZE:
|
2012-04-25 02:23:14 +08:00
|
|
|
case EM_MIPS:
|
2014-02-18 19:29:11 +08:00
|
|
|
case EM_XTENSA:
|
2012-04-20 05:59:55 +08:00
|
|
|
break;
|
2019-12-04 08:46:28 +08:00
|
|
|
default:
|
|
|
|
fprintf(stderr, "unrecognized e_machine %d %s\n",
|
|
|
|
r2(&ehdr->e_machine), fname);
|
|
|
|
return -1;
|
|
|
|
}
|
2012-04-20 05:59:55 +08:00
|
|
|
|
|
|
|
switch (ehdr->e_ident[EI_CLASS]) {
|
|
|
|
case ELFCLASS32:
|
2019-12-04 08:46:28 +08:00
|
|
|
if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr) ||
|
|
|
|
r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
|
2012-04-20 05:59:55 +08:00
|
|
|
fprintf(stderr,
|
2016-01-10 18:42:28 +08:00
|
|
|
"unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
|
2019-12-04 08:46:27 +08:00
|
|
|
break;
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
2019-12-04 08:46:30 +08:00
|
|
|
rc = do_sort_32(ehdr, fname, custom_sort);
|
2012-04-20 05:59:55 +08:00
|
|
|
break;
|
2019-12-04 08:46:28 +08:00
|
|
|
case ELFCLASS64:
|
|
|
|
{
|
2012-04-20 05:59:55 +08:00
|
|
|
Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
|
2019-12-04 08:46:28 +08:00
|
|
|
if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr) ||
|
|
|
|
r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
|
2012-04-20 05:59:55 +08:00
|
|
|
fprintf(stderr,
|
2019-12-04 08:46:28 +08:00
|
|
|
"unrecognized ET_EXEC/ET_DYN file: %s\n",
|
|
|
|
fname);
|
2019-12-04 08:46:27 +08:00
|
|
|
break;
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
2019-12-04 08:46:30 +08:00
|
|
|
rc = do_sort_64(ghdr, fname, custom_sort);
|
2019-12-04 08:46:28 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fprintf(stderr, "unrecognized ELF class %d %s\n",
|
|
|
|
ehdr->e_ident[EI_CLASS], fname);
|
2012-04-20 05:59:55 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-12-04 08:46:27 +08:00
|
|
|
return rc;
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
|
|
|
|
2019-12-04 08:46:28 +08:00
|
|
|
int main(int argc, char *argv[])
|
2012-04-20 05:59:55 +08:00
|
|
|
{
|
2019-12-04 08:46:27 +08:00
|
|
|
int i, n_error = 0; /* gcc-4.3.0 false positive complaint */
|
|
|
|
size_t size = 0;
|
|
|
|
void *addr = NULL;
|
2012-04-20 05:59:55 +08:00
|
|
|
|
|
|
|
if (argc < 2) {
|
2019-12-04 08:46:31 +08:00
|
|
|
fprintf(stderr, "usage: sorttable vmlinux...\n");
|
2012-04-20 05:59:55 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Process each file in turn, allowing deep failure. */
|
|
|
|
for (i = 1; i < argc; i++) {
|
2019-12-04 08:46:27 +08:00
|
|
|
addr = mmap_file(argv[i], &size);
|
|
|
|
if (!addr) {
|
|
|
|
++n_error;
|
|
|
|
continue;
|
|
|
|
}
|
2012-04-20 05:59:55 +08:00
|
|
|
|
2019-12-04 08:46:27 +08:00
|
|
|
if (do_file(argv[i], addr))
|
2012-04-20 05:59:55 +08:00
|
|
|
++n_error;
|
2019-12-04 08:46:27 +08:00
|
|
|
|
|
|
|
munmap(addr, size);
|
2012-04-20 05:59:55 +08:00
|
|
|
}
|
2019-12-04 08:46:28 +08:00
|
|
|
|
2012-04-20 05:59:55 +08:00
|
|
|
return !!n_error;
|
|
|
|
}
|