// SPDX-License-Identifier: GPL-2.01#include <linux/slab.h>2#include <linux/kernel.h>3#include <linux/bitops.h>4#include <linux/cpumask.h>5#include <linux/export.h>6#include <linux/memblock.h>7#include <linux/numa.h>89/* These are not inline because of header tangles. */10#ifdef CONFIG_CPUMASK_OFFSTACK11/**12* alloc_cpumask_var_node - allocate a struct cpumask on a given node13* @mask: pointer to cpumask_var_t where the cpumask is returned14* @flags: GFP_ flags15* @node: memory node from which to allocate or %NUMA_NO_NODE16*17* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is18* a nop returning a constant 1 (in <linux/cpumask.h>).19*20* Return: TRUE if memory allocation succeeded, FALSE otherwise.21*22* In addition, mask will be NULL if this fails. Note that gcc is23* usually smart enough to know that mask can never be NULL if24* CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case25* too.26*/27bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)28{29*mask = kmalloc_node(cpumask_size(), flags, node);3031#ifdef CONFIG_DEBUG_PER_CPU_MAPS32if (!*mask) {33printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");34dump_stack();35}36#endif3738return *mask != NULL;39}40EXPORT_SYMBOL(alloc_cpumask_var_node);4142/**43* alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.44* @mask: pointer to cpumask_var_t where the cpumask is returned45*46* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is47* a nop (in <linux/cpumask.h>).48* Either returns an allocated (zero-filled) cpumask, or causes the49* system to panic.50*/51void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)52{53*mask = memblock_alloc_or_panic(cpumask_size(), SMP_CACHE_BYTES);54}5556/**57* free_cpumask_var - frees memory allocated for a struct cpumask.58* @mask: cpumask to free59*60* This is safe on a NULL mask.61*/62void free_cpumask_var(cpumask_var_t mask)63{64kfree(mask);65}66EXPORT_SYMBOL(free_cpumask_var);6768/**69* free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var70* @mask: cpumask to free71*/72void __init free_bootmem_cpumask_var(cpumask_var_t mask)73{74memblock_free(mask, cpumask_size());75}76#endif7778/**79* cpumask_local_spread - select the i'th cpu based on NUMA distances80* @i: index number81* @node: local numa_node82*83* Return: online CPU according to a numa aware policy; local cpus are returned84* first, followed by non-local ones, then it wraps around.85*86* For those who wants to enumerate all CPUs based on their NUMA distances,87* i.e. call this function in a loop, like:88*89* for (i = 0; i < num_online_cpus(); i++) {90* cpu = cpumask_local_spread(i, node);91* do_something(cpu);92* }93*94* There's a better alternative based on for_each()-like iterators:95*96* for_each_numa_hop_mask(mask, node) {97* for_each_cpu_andnot(cpu, mask, prev)98* do_something(cpu);99* prev = mask;100* }101*102* It's simpler and more verbose than above. Complexity of iterator-based103* enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while104* cpumask_local_spread() when called for each cpu is105* O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)).106*/107unsigned int cpumask_local_spread(unsigned int i, int node)108{109unsigned int cpu;110111/* Wrap: we always want a cpu. */112i %= num_online_cpus();113114cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node);115116WARN_ON(cpu >= nr_cpu_ids);117return cpu;118}119EXPORT_SYMBOL(cpumask_local_spread);120121static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);122123/**124* cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p.125* @src1p: first &cpumask for intersection126* @src2p: second &cpumask for intersection127*128* Iterated calls using the same srcp1 and srcp2 will be distributed within129* their intersection.130*131* Return: >= nr_cpu_ids if the intersection is empty.132*/133unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,134const struct cpumask *src2p)135{136unsigned int next, prev;137138/* NOTE: our first selection will skip 0. */139prev = __this_cpu_read(distribute_cpu_mask_prev);140141next = cpumask_next_and_wrap(prev, src1p, src2p);142if (next < nr_cpu_ids)143__this_cpu_write(distribute_cpu_mask_prev, next);144145return next;146}147EXPORT_SYMBOL(cpumask_any_and_distribute);148149/**150* cpumask_any_distribute - Return an arbitrary cpu from srcp151* @srcp: &cpumask for selection152*153* Return: >= nr_cpu_ids if the intersection is empty.154*/155unsigned int cpumask_any_distribute(const struct cpumask *srcp)156{157unsigned int next, prev;158159/* NOTE: our first selection will skip 0. */160prev = __this_cpu_read(distribute_cpu_mask_prev);161next = cpumask_next_wrap(prev, srcp);162if (next < nr_cpu_ids)163__this_cpu_write(distribute_cpu_mask_prev, next);164165return next;166}167EXPORT_SYMBOL(cpumask_any_distribute);168169170