tools: rename bitmap_alloc() to bitmap_zalloc()

Rename bitmap_alloc() to bitmap_zalloc() in tools to follow the bitmap API
in the kernel.

No functional changes intended.

Link: https://lkml.kernel.org/r/20210814211713.180533-14-yury.norov@gmail.com
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Yury Norov <yury.norov@gmail.com>
Suggested-by: Yury Norov <yury.norov@gmail.com>
Acked-by: Yury Norov <yury.norov@gmail.com>
Tested-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Cc: Alexander Lobakin <alobakin@pm.me>
Cc: Alexey Klimov <aklimov@redhat.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Andy Shevchenko 2021-09-07 19:59:35 -07:00 committed by Linus Torvalds
parent 44e5599775
commit 7fc5b57132
13 changed files with 20 additions and 20 deletions

View File

@ -111,10 +111,10 @@ static inline int test_and_clear_bit(int nr, unsigned long *addr)
} }
/** /**
* bitmap_alloc - Allocate bitmap * bitmap_zalloc - Allocate bitmap
* @nbits: Number of bits * @nbits: Number of bits
*/ */
static inline unsigned long *bitmap_alloc(int nbits) static inline unsigned long *bitmap_zalloc(int nbits)
{ {
return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long)); return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
} }

View File

@ -54,7 +54,7 @@ static bool asm_test_bit(long nr, const unsigned long *addr)
static int do_for_each_set_bit(unsigned int num_bits) static int do_for_each_set_bit(unsigned int num_bits)
{ {
unsigned long *to_test = bitmap_alloc(num_bits); unsigned long *to_test = bitmap_zalloc(num_bits);
struct timeval start, end, diff; struct timeval start, end, diff;
u64 runtime_us; u64 runtime_us;
struct stats fb_time_stats, tb_time_stats; struct stats fb_time_stats, tb_time_stats;

View File

@ -139,11 +139,11 @@ static void *c2c_he_zalloc(size_t size)
if (!c2c_he) if (!c2c_he)
return NULL; return NULL;
c2c_he->cpuset = bitmap_alloc(c2c.cpus_cnt); c2c_he->cpuset = bitmap_zalloc(c2c.cpus_cnt);
if (!c2c_he->cpuset) if (!c2c_he->cpuset)
return NULL; return NULL;
c2c_he->nodeset = bitmap_alloc(c2c.nodes_cnt); c2c_he->nodeset = bitmap_zalloc(c2c.nodes_cnt);
if (!c2c_he->nodeset) if (!c2c_he->nodeset)
return NULL; return NULL;
@ -2047,7 +2047,7 @@ static int setup_nodes(struct perf_session *session)
struct perf_cpu_map *map = n[node].map; struct perf_cpu_map *map = n[node].map;
unsigned long *set; unsigned long *set;
set = bitmap_alloc(c2c.cpus_cnt); set = bitmap_zalloc(c2c.cpus_cnt);
if (!set) if (!set)
return -ENOMEM; return -ENOMEM;

View File

@ -2786,7 +2786,7 @@ int cmd_record(int argc, const char **argv)
if (rec->opts.affinity != PERF_AFFINITY_SYS) { if (rec->opts.affinity != PERF_AFFINITY_SYS) {
rec->affinity_mask.nbits = cpu__max_cpu(); rec->affinity_mask.nbits = cpu__max_cpu();
rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits); rec->affinity_mask.bits = bitmap_zalloc(rec->affinity_mask.nbits);
if (!rec->affinity_mask.bits) { if (!rec->affinity_mask.bits) {
pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits); pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
err = -ENOMEM; err = -ENOMEM;

View File

@ -14,7 +14,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
unsigned long *bm = NULL; unsigned long *bm = NULL;
int i; int i;
bm = bitmap_alloc(nbits); bm = bitmap_zalloc(nbits);
if (map && bm) { if (map && bm) {
for (i = 0; i < map->nr; i++) for (i = 0; i < map->nr; i++)

View File

@ -27,7 +27,7 @@ static unsigned long *get_bitmap(const char *str, int nbits)
unsigned long *bm = NULL; unsigned long *bm = NULL;
int i; int i;
bm = bitmap_alloc(nbits); bm = bitmap_zalloc(nbits);
if (map && bm) { if (map && bm) {
for (i = 0; i < map->nr; i++) { for (i = 0; i < map->nr; i++) {

View File

@ -25,11 +25,11 @@ int affinity__setup(struct affinity *a)
{ {
int cpu_set_size = get_cpu_set_size(); int cpu_set_size = get_cpu_set_size();
a->orig_cpus = bitmap_alloc(cpu_set_size * 8); a->orig_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->orig_cpus) if (!a->orig_cpus)
return -1; return -1;
sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus); sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
a->sched_cpus = bitmap_alloc(cpu_set_size * 8); a->sched_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->sched_cpus) { if (!a->sched_cpus) {
zfree(&a->orig_cpus); zfree(&a->orig_cpus);
return -1; return -1;

View File

@ -278,7 +278,7 @@ static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
if (ret) if (ret)
return ret; return ret;
set = bitmap_alloc(size); set = bitmap_zalloc(size);
if (!set) if (!set)
return -ENOMEM; return -ENOMEM;
@ -1294,7 +1294,7 @@ static int memory_node__read(struct memory_node *n, unsigned long idx)
size++; size++;
n->set = bitmap_alloc(size); n->set = bitmap_zalloc(size);
if (!n->set) { if (!n->set) {
closedir(dir); closedir(dir);
return -ENOMEM; return -ENOMEM;

View File

@ -313,7 +313,7 @@ static int metricgroup__setup_events(struct list_head *groups,
struct evsel *evsel, *tmp; struct evsel *evsel, *tmp;
unsigned long *evlist_used; unsigned long *evlist_used;
evlist_used = bitmap_alloc(perf_evlist->core.nr_entries); evlist_used = bitmap_zalloc(perf_evlist->core.nr_entries);
if (!evlist_used) if (!evlist_used)
return -ENOMEM; return -ENOMEM;

View File

@ -106,7 +106,7 @@ static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
data = map->aio.data[idx]; data = map->aio.data[idx];
mmap_len = mmap__mmap_len(map); mmap_len = mmap__mmap_len(map);
node_index = cpu__get_node(cpu); node_index = cpu__get_node(cpu);
node_mask = bitmap_alloc(node_index + 1); node_mask = bitmap_zalloc(node_index + 1);
if (!node_mask) { if (!node_mask) {
pr_err("Failed to allocate node mask for mbind: error %m\n"); pr_err("Failed to allocate node mask for mbind: error %m\n");
return -1; return -1;
@ -258,7 +258,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
{ {
map->affinity_mask.nbits = cpu__max_cpu(); map->affinity_mask.nbits = cpu__max_cpu();
map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits); map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
if (!map->affinity_mask.bits) if (!map->affinity_mask.bits)
return -1; return -1;

View File

@ -121,7 +121,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm); guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages); guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_num_pages = vm_num_host_pages(mode, guest_num_pages); host_num_pages = vm_num_host_pages(mode, guest_num_pages);
bmap = bitmap_alloc(host_num_pages); bmap = bitmap_zalloc(host_num_pages);
if (dirty_log_manual_caps) { if (dirty_log_manual_caps) {
cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2; cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;

View File

@ -749,8 +749,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem); pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
bmap = bitmap_alloc(host_num_pages); bmap = bitmap_zalloc(host_num_pages);
host_bmap_track = bitmap_alloc(host_num_pages); host_bmap_track = bitmap_zalloc(host_num_pages);
/* Add an extra memory slot for testing dirty logging */ /* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,

View File

@ -111,7 +111,7 @@ int main(int argc, char *argv[])
nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096); nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096); nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
bmap = bitmap_alloc(TEST_MEM_PAGES); bmap = bitmap_zalloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM); host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
while (!done) { while (!done) {