@@ -112,10 +112,10 @@ static inline int test_and_clear_bit(int
}
/**
- * bitmap_alloc - Allocate bitmap
+ * bitmap_zalloc - Allocate bitmap
* @nbits: Number of bits
*/
-static inline unsigned long *bitmap_alloc(int nbits)
+static inline unsigned long *bitmap_zalloc(int nbits)
{
return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
}
@@ -54,7 +54,7 @@ static bool asm_test_bit(long nr, const
static int do_for_each_set_bit(unsigned int num_bits)
{
- unsigned long *to_test = bitmap_alloc(num_bits);
+ unsigned long *to_test = bitmap_zalloc(num_bits);
struct timeval start, end, diff;
u64 runtime_us;
struct stats fb_time_stats, tb_time_stats;
@@ -139,11 +139,11 @@ static void *c2c_he_zalloc(size_t size)
if (!c2c_he)
return NULL;
- c2c_he->cpuset = bitmap_alloc(c2c.cpus_cnt);
+ c2c_he->cpuset = bitmap_zalloc(c2c.cpus_cnt);
if (!c2c_he->cpuset)
return NULL;
- c2c_he->nodeset = bitmap_alloc(c2c.nodes_cnt);
+ c2c_he->nodeset = bitmap_zalloc(c2c.nodes_cnt);
if (!c2c_he->nodeset)
return NULL;
@@ -2047,7 +2047,7 @@ static int setup_nodes(struct perf_sessi
struct perf_cpu_map *map = n[node].map;
unsigned long *set;
- set = bitmap_alloc(c2c.cpus_cnt);
+ set = bitmap_zalloc(c2c.cpus_cnt);
if (!set)
return -ENOMEM;
@@ -2786,7 +2786,7 @@ int cmd_record(int argc, const char **ar
if (rec->opts.affinity != PERF_AFFINITY_SYS) {
rec->affinity_mask.nbits = cpu__max_cpu();
- rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
+ rec->affinity_mask.bits = bitmap_zalloc(rec->affinity_mask.nbits);
if (!rec->affinity_mask.bits) {
pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
err = -ENOMEM;
@@ -14,7 +14,7 @@ static unsigned long *get_bitmap(const c
unsigned long *bm = NULL;
int i;
- bm = bitmap_alloc(nbits);
+ bm = bitmap_zalloc(nbits);
if (map && bm) {
for (i = 0; i < map->nr; i++)
@@ -27,7 +27,7 @@ static unsigned long *get_bitmap(const c
unsigned long *bm = NULL;
int i;
- bm = bitmap_alloc(nbits);
+ bm = bitmap_zalloc(nbits);
if (map && bm) {
for (i = 0; i < map->nr; i++) {
@@ -25,11 +25,11 @@ int affinity__setup(struct affinity *a)
{
int cpu_set_size = get_cpu_set_size();
- a->orig_cpus = bitmap_alloc(cpu_set_size * 8);
+ a->orig_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->orig_cpus)
return -1;
sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
- a->sched_cpus = bitmap_alloc(cpu_set_size * 8);
+ a->sched_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->sched_cpus) {
zfree(&a->orig_cpus);
return -1;
@@ -278,7 +278,7 @@ static int do_read_bitmap(struct feat_fd
if (ret)
return ret;
- set = bitmap_alloc(size);
+ set = bitmap_zalloc(size);
if (!set)
return -ENOMEM;
@@ -1294,7 +1294,7 @@ static int memory_node__read(struct memo
size++;
- n->set = bitmap_alloc(size);
+ n->set = bitmap_zalloc(size);
if (!n->set) {
closedir(dir);
return -ENOMEM;
@@ -313,7 +313,7 @@ static int metricgroup__setup_events(str
struct evsel *evsel, *tmp;
unsigned long *evlist_used;
- evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
+ evlist_used = bitmap_zalloc(perf_evlist->core.nr_entries);
if (!evlist_used)
return -ENOMEM;
@@ -106,7 +106,7 @@ static int perf_mmap__aio_bind(struct mm
data = map->aio.data[idx];
mmap_len = mmap__mmap_len(map);
node_index = cpu__get_node(cpu);
- node_mask = bitmap_alloc(node_index + 1);
+ node_mask = bitmap_zalloc(node_index + 1);
if (!node_mask) {
pr_err("Failed to allocate node mask for mbind: error %m\n");
return -1;
@@ -258,7 +258,7 @@ static void build_node_mask(int node, st
static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
{
map->affinity_mask.nbits = cpu__max_cpu();
- map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits);
+ map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits);
if (!map->affinity_mask.bits)
return -1;
@@ -121,7 +121,7 @@ static void run_test(enum vm_guest_mode
guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
- bmap = bitmap_alloc(host_num_pages);
+ bmap = bitmap_zalloc(host_num_pages);
if (dirty_log_manual_caps) {
cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
@@ -749,8 +749,8 @@ static void run_test(enum vm_guest_mode
pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
- bmap = bitmap_alloc(host_num_pages);
- host_bmap_track = bitmap_alloc(host_num_pages);
+ bmap = bitmap_zalloc(host_num_pages);
+ host_bmap_track = bitmap_zalloc(host_num_pages);
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
@@ -111,7 +111,7 @@ int main(int argc, char *argv[])
nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096);
nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096);
- bmap = bitmap_alloc(TEST_MEM_PAGES);
+ bmap = bitmap_zalloc(TEST_MEM_PAGES);
host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
while (!done) {