@@ -111,7 +111,7 @@ static unsigned int gic_find_unused_lr(struct vcpu *v,
{
unsigned int used_lr;
- for_each_set_bit(used_lr, lr_mask, nr_lrs)
+ bitmap_for_each(used_lr, lr_mask, nr_lrs)
{
gic_hw_ops->read_lr(used_lr, &lr_val);
if ( lr_val.virq == p->irq )
@@ -429,7 +429,7 @@ void vgic_set_irqs_pending(struct vcpu *v, uint32_t r, unsigned int rank)
/* LPIs will never be set pending via this function */
ASSERT(!is_lpi(32 * rank + 31));
- for_each_set_bit( i, &mask, 32 )
+ bitmap_for_each( i, &mask, 32 )
{
unsigned int irq = i + 32 * rank;
@@ -483,7 +483,7 @@ bool vgic_to_sgi(struct vcpu *v, register_t sgir, enum gic_sgi_mode irqmode,
perfc_incr(vgic_sgi_list);
base = target->aff1 << 4;
bitmap = target->list;
- for_each_set_bit( i, &bitmap, sizeof(target->list) * 8 )
+ bitmap_for_each( i, &bitmap, sizeof(target->list) * 8 )
{
vcpuid = base + i;
if ( vcpuid >= d->max_vcpus || d->vcpu[vcpuid] == NULL ||
@@ -728,7 +728,7 @@ void vgic_check_inflight_irqs_pending(struct domain *d, struct vcpu *v,
const unsigned long mask = r;
unsigned int i;
- for_each_set_bit( i, &mask, 32 )
+ bitmap_for_each( i, &mask, 32 )
{
struct pending_irq *p;
struct vcpu *v_target;
@@ -108,7 +108,7 @@ static void vgic_mmio_write_sgir(struct vcpu *source_vcpu,
return;
}
- for_each_set_bit( vcpu_id, &targets, 8 )
+ bitmap_for_each( vcpu_id, &targets, 8 )
{
struct vcpu *vcpu = d->vcpu[vcpu_id];
struct vgic_irq *irq = vgic_get_irq(d, vcpu, intid);
@@ -71,7 +71,7 @@ void vgic_mmio_write_senable(struct vcpu *vcpu,
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
unsigned int i;
- for_each_set_bit( i, &val, len * 8 )
+ bitmap_for_each( i, &val, len * 8 )
{
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
unsigned long flags;
@@ -116,7 +116,7 @@ void vgic_mmio_write_cenable(struct vcpu *vcpu,
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
unsigned int i;
- for_each_set_bit( i, &val, len * 8 )
+ bitmap_for_each( i, &val, len * 8 )
{
struct vgic_irq *irq;
unsigned long flags;
@@ -186,7 +186,7 @@ void vgic_mmio_write_spending(struct vcpu *vcpu,
unsigned long flags;
irq_desc_t *desc;
- for_each_set_bit( i, &val, len * 8 )
+ bitmap_for_each( i, &val, len * 8 )
{
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
@@ -234,7 +234,7 @@ void vgic_mmio_write_cpending(struct vcpu *vcpu,
unsigned long flags;
irq_desc_t *desc;
- for_each_set_bit( i, &val, len * 8 )
+ bitmap_for_each( i, &val, len * 8 )
{
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
@@ -328,7 +328,7 @@ void vgic_mmio_write_cactive(struct vcpu *vcpu,
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
unsigned int i;
- for_each_set_bit( i, &val, len * 8 )
+ bitmap_for_each( i, &val, len * 8 )
{
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
@@ -358,7 +358,7 @@ void vgic_mmio_write_sactive(struct vcpu *vcpu,
uint32_t intid = VGIC_ADDR_TO_INTID(addr, 1);
unsigned int i;
- for_each_set_bit( i, &val, len * 8 )
+ bitmap_for_each( i, &val, len * 8 )
{
struct vgic_irq *irq = vgic_get_irq(vcpu->domain, vcpu, intid + i);
@@ -157,7 +157,7 @@ static void zero_leaves(struct cpuid_leaf *l,
static void sanitise_featureset(uint32_t *fs)
{
- /* for_each_set_bit() uses unsigned longs. Extend with zeroes. */
+ /* bitmap_for_each() uses unsigned longs. Extend with zeroes. */
uint32_t disabled_features[
ROUNDUP(FSCAPINTS, sizeof(unsigned long)/sizeof(uint32_t))] = {};
unsigned int i;
@@ -174,8 +174,8 @@ static void sanitise_featureset(uint32_t *fs)
disabled_features[i] = ~fs[i] & deep_features[i];
}
- for_each_set_bit(i, (void *)disabled_features,
- sizeof(disabled_features) * 8)
+ bitmap_for_each(i, (void *)disabled_features,
+ sizeof(disabled_features) * 8)
{
const uint32_t *dfs = x86_cpu_policy_lookup_deep_deps(i);
unsigned int j;
@@ -237,7 +237,7 @@ static void recalculate_xstate(struct cpu_policy *p)
/* Subleafs 2+ */
xstates &= ~XSTATE_FP_SSE;
BUILD_BUG_ON(ARRAY_SIZE(p->xstate.comp) < 63);
- for_each_set_bit ( i, &xstates, 63 )
+ bitmap_for_each ( i, &xstates, 63 )
{
/*
* Pass through size (eax) and offset (ebx) directly. Visbility of
@@ -606,7 +606,7 @@ unsigned int xstate_uncompressed_size(uint64_t xcr0)
* with respect their index.
*/
xcr0 &= ~(X86_XCR0_SSE | X86_XCR0_X87);
- for_each_set_bit ( i, &xcr0, 63 )
+ bitmap_for_each ( i, &xcr0, 63 )
{
const struct xstate_component *c = &raw_cpu_policy.xstate.comp[i];
unsigned int s = c->offset + c->size;
@@ -634,7 +634,7 @@ unsigned int xstate_compressed_size(uint64_t xstates)
* componenets require aligning to 64 first.
*/
xstates &= ~(X86_XCR0_SSE | X86_XCR0_X87);
- for_each_set_bit ( i, &xstates, 63 )
+ bitmap_for_each ( i, &xstates, 63 )
{
const struct xstate_component *c = &raw_cpu_policy.xstate.comp[i];
@@ -271,6 +271,18 @@ static inline void bitmap_clear(unsigned long *map, unsigned int start,
#undef bitmap_switch
#undef bitmap_bytes
+/**
+ * bitmap_for_each - iterate over every set bit in a memory region
+ * @iter: The integer iterator
+ * @addr: The address to base the search on
+ * @size: The maximum size to search
+ */
+#define bitmap_for_each(iter, addr, size) \
+ for ( (iter) = find_first_bit(addr, size); \
+ (iter) < (size); \
+ (iter) = find_next_bit(addr, size, (iter) + 1) )
+
+
struct xenctl_bitmap;
int xenctl_bitmap_to_bitmap(unsigned long *bitmap,
const struct xenctl_bitmap *xenctl_bitmap,
@@ -248,17 +248,6 @@ static inline __u32 ror32(__u32 word, unsigned int shift)
#define __L16(x) (((x) & 0x0000ff00U) ? ( 8 + __L8( (x) >> 8)) : __L8( x))
#define ilog2(x) (((x) & 0xffff0000U) ? (16 + __L16((x) >> 16)) : __L16(x))
-/**
- * for_each_set_bit - iterate over every set bit in a memory region
- * @bit: The integer iterator
- * @addr: The address to base the search on
- * @size: The maximum size to search
- */
-#define for_each_set_bit(bit, addr, size) \
- for ( (bit) = find_first_bit(addr, size); \
- (bit) < (size); \
- (bit) = find_next_bit(addr, size, (bit) + 1) )
-
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#endif /* XEN_BITOPS_H */
The current implementation wants to take an in-memory bitmap. However, all ARM callers and all-but-1 x86 callers spill a scalar to the stack in order to use the "generic arbitrary bitmap" helpers under the hood. This functions, but it's far from ideal. Rename the construct and move it into bitmap.h, because having an iterator for an arbitrary bitmap is a useful thing. This will allow us to re-implement for_each_set_bit() to be more appropriate for scalar values. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Roger Pau Monné <roger.pau@citrix.com> CC: Stefano Stabellini <sstabellini@kernel.org> CC: Julien Grall <julien@xen.org> CC: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com> CC: Bertrand Marquis <bertrand.marquis@arm.com> CC: Michal Orzel <michal.orzel@amd.com> CC: Oleksii Kurochko <oleksii.kurochko@gmail.com> --- xen/arch/arm/gic-vgic.c | 2 +- xen/arch/arm/vgic.c | 6 +++--- xen/arch/arm/vgic/vgic-mmio-v2.c | 2 +- xen/arch/arm/vgic/vgic-mmio.c | 12 ++++++------ xen/arch/x86/cpu-policy.c | 8 ++++---- xen/arch/x86/xstate.c | 4 ++-- xen/include/xen/bitmap.h | 12 ++++++++++++ xen/include/xen/bitops.h | 11 ----------- 8 files changed, 29 insertions(+), 28 deletions(-)