@@ -2209,53 +2209,53 @@ static Int128 do_ld_whole_be16(CPUState *cpu, uintptr_t ra,
/*
* Wrapper for the above.
*/
static uint64_t do_ld_beN(CPUState *cpu, MMULookupPageData *p,
uint64_t ret_be, int mmu_idx, MMUAccessType type,
MemOp mop, uintptr_t ra)
{
MemOp atom;
unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) {
return do_ld_mmio_beN(cpu, p->full, ret_be, p->addr, p->size,
mmu_idx, type, ra);
}
/*
* It is a given that we cross a page and therefore there is no
* atomicity for the load as a whole, but subobjects may need attention.
*/
atom = mop & MO_ATOM_MASK;
switch (atom) {
case MO_ATOM_SUBALIGN:
return do_ld_parts_beN(p, ret_be);
case MO_ATOM_IFALIGN_PAIR:
case MO_ATOM_WITHIN16_PAIR:
tmp = mop & MO_SIZE;
tmp = tmp ? tmp - 1 : 0;
half_size = 1 << tmp;
if (atom == MO_ATOM_IFALIGN_PAIR
? p->size == half_size
: p->size >= half_size) {
if (!HAVE_al8_fast && p->size < 4) {
return do_ld_whole_be4(p, ret_be);
} else {
return do_ld_whole_be8(cpu, ra, p, ret_be);
}
}
- /* fall through */
+ fallthrough;
case MO_ATOM_IFALIGN:
case MO_ATOM_WITHIN16:
case MO_ATOM_NONE:
return do_ld_bytes_beN(p, ret_be);
default:
g_assert_not_reached();
}
}
/*
* Wrapper for the above, for 8 < size < 16.
*/
@@ -2625,57 +2625,57 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
/*
* Wrapper for the above.
*/
static uint64_t do_st_leN(CPUState *cpu, MMULookupPageData *p,
uint64_t val_le, int mmu_idx,
MemOp mop, uintptr_t ra)
{
MemOp atom;
unsigned tmp, half_size;
if (unlikely(p->flags & TLB_MMIO)) {
return do_st_mmio_leN(cpu, p->full, val_le, p->addr,
p->size, mmu_idx, ra);
} else if (unlikely(p->flags & TLB_DISCARD_WRITE)) {
return val_le >> (p->size * 8);
}
/*
* It is a given that we cross a page and therefore there is no atomicity
* for the store as a whole, but subobjects may need attention.
*/
atom = mop & MO_ATOM_MASK;
switch (atom) {
case MO_ATOM_SUBALIGN:
return store_parts_leN(p->haddr, p->size, val_le);
case MO_ATOM_IFALIGN_PAIR:
case MO_ATOM_WITHIN16_PAIR:
tmp = mop & MO_SIZE;
tmp = tmp ? tmp - 1 : 0;
half_size = 1 << tmp;
if (atom == MO_ATOM_IFALIGN_PAIR
? p->size == half_size
: p->size >= half_size) {
if (!HAVE_al8_fast && p->size <= 4) {
return store_whole_le4(p->haddr, p->size, val_le);
} else if (HAVE_al8) {
return store_whole_le8(p->haddr, p->size, val_le);
} else {
cpu_loop_exit_atomic(cpu, ra);
}
}
- /* fall through */
+ fallthrough;
case MO_ATOM_IFALIGN:
case MO_ATOM_WITHIN16:
case MO_ATOM_NONE:
return store_bytes_leN(p->haddr, p->size, val_le);
default:
g_assert_not_reached();
}
}
/*
* Wrapper for the above, for 8 < size < 16.
*/
@@ -22,86 +22,86 @@
/**
* required_atomicity:
*
* Return the lg2 bytes of atomicity required by @memop for @p.
* If the operation must be split into two operations to be
* examined separately for atomicity, return -lg2.
*/
static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
{
MemOp atom = memop & MO_ATOM_MASK;
MemOp size = memop & MO_SIZE;
MemOp half = size ? size - 1 : 0;
unsigned tmp;
int atmax;
switch (atom) {
case MO_ATOM_NONE:
atmax = MO_8;
break;
case MO_ATOM_IFALIGN_PAIR:
size = half;
- /* fall through */
+ fallthrough;
case MO_ATOM_IFALIGN:
tmp = (1 << size) - 1;
atmax = p & tmp ? MO_8 : size;
break;
case MO_ATOM_WITHIN16:
tmp = p & 15;
atmax = (tmp + (1 << size) <= 16 ? size : MO_8);
break;
case MO_ATOM_WITHIN16_PAIR:
tmp = p & 15;
if (tmp + (1 << size) <= 16) {
atmax = size;
} else if (tmp + (1 << half) == 16) {
/*
* The pair exactly straddles the boundary.
* Both halves are naturally aligned and atomic.
*/
atmax = half;
} else {
/*
* One of the pair crosses the boundary, and is non-atomic.
* The other of the pair does not cross, and is atomic.
*/
atmax = -half;
}
break;
case MO_ATOM_SUBALIGN:
/*
* Examine the alignment of p to determine if there are subobjects
* that must be aligned. Note that we only really need ctz4() --
* any more sigificant bits are discarded by the immediately
* following comparison.
*/
tmp = ctz32(p);
atmax = MIN(size, tmp);
break;
default:
g_assert_not_reached();
}
/*
* Here we have the architectural atomicity of the operation.
* However, when executing in a serial context, we need no extra
* host atomicity in order to avoid racing. This reduction
* avoids looping with cpu_loop_exit_atomic.
*/
if (cpu_in_serial_context(cpu)) {
return MO_8;
}
return atmax;
}
/**
* load_atomic2:
* @pv: host address
*
* Atomically load 2 aligned bytes from @pv.
*/
@@ -179,23 +179,23 @@ static void gen_wrapped(enum plugin_gen_from from,
static void plugin_gen_empty_callback(enum plugin_gen_from from)
{
switch (from) {
case PLUGIN_GEN_AFTER_INSN:
gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER,
gen_empty_mem_helper);
break;
case PLUGIN_GEN_FROM_INSN:
/*
* Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being
* the first callback of an instruction
*/
gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER,
gen_empty_mem_helper);
- /* fall through */
+ fallthrough;
case PLUGIN_GEN_FROM_TB:
gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb);
gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb);
break;
default:
g_assert_not_reached();
}
}
In preparation of raising -Wimplicit-fallthrough to 5, replace all fall-through comments with the fallthrough attribute pseudo-keyword. Signed-off-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org> --- accel/tcg/cputlb.c | 4 ++-- accel/tcg/ldst_atomicity.c.inc | 2 +- accel/tcg/plugin-gen.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-)