@@ -63,68 +63,68 @@ static void do_tr_or_bp(CPUMIPSState *env, unsigned int code, bool trap)
void cpu_loop(CPUMIPSState *env)
{
CPUState *cs = env_cpu(env);
int trapnr, si_code;
unsigned int code;
abi_long ret;
# ifdef TARGET_ABI_MIPSO32
unsigned int syscall_num;
# endif
for(;;) {
cpu_exec_start(cs);
trapnr = cpu_exec(cs);
cpu_exec_end(cs);
process_queued_cpu_work(cs);
switch(trapnr) {
case EXCP_SYSCALL:
env->active_tc.PC += 4;
# ifdef TARGET_ABI_MIPSO32
syscall_num = env->active_tc.gpr[2] - 4000;
if (syscall_num >= sizeof(mips_syscall_args)) {
/* syscall_num is larger that any defined for MIPS O32 */
ret = -TARGET_ENOSYS;
} else if (mips_syscall_args[syscall_num] ==
MIPS_SYSCALL_NUMBER_UNUSED) {
/* syscall_num belongs to the range not defined for MIPS O32 */
ret = -TARGET_ENOSYS;
} else {
/* syscall_num is valid */
int nb_args;
abi_ulong sp_reg;
abi_ulong arg5 = 0, arg6 = 0, arg7 = 0, arg8 = 0;
nb_args = mips_syscall_args[syscall_num];
sp_reg = env->active_tc.gpr[29];
switch (nb_args) {
/* these arguments are taken from the stack */
case 8:
if ((ret = get_user_ual(arg8, sp_reg + 28)) != 0) {
goto done_syscall;
}
- /* fall through */
+ fallthrough;
case 7:
if ((ret = get_user_ual(arg7, sp_reg + 24)) != 0) {
goto done_syscall;
}
- /* fall through */
+ fallthrough;
case 6:
if ((ret = get_user_ual(arg6, sp_reg + 20)) != 0) {
goto done_syscall;
}
- /* fall through */
+ fallthrough;
case 5:
if ((ret = get_user_ual(arg5, sp_reg + 16)) != 0) {
goto done_syscall;
}
- /* fall through */
+ fallthrough;
default:
break;
}
ret = do_syscall(env, env->active_tc.gpr[2],
env->active_tc.gpr[4],
env->active_tc.gpr[5],
env->active_tc.gpr[6],
env->active_tc.gpr[7],
arg5, arg6, arg7, arg8);
}
@@ -960,84 +960,84 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice)
{
abi_ulong len;
int ret = 0;
if (start & ~TARGET_PAGE_MASK) {
return -TARGET_EINVAL;
}
if (len_in == 0) {
return 0;
}
len = TARGET_PAGE_ALIGN(len_in);
if (len == 0 || !guest_range_valid_untagged(start, len)) {
return -TARGET_EINVAL;
}
/* Translate for some architectures which have different MADV_xxx values */
switch (advice) {
case TARGET_MADV_DONTNEED: /* alpha */
advice = MADV_DONTNEED;
break;
case TARGET_MADV_WIPEONFORK: /* parisc */
advice = MADV_WIPEONFORK;
break;
case TARGET_MADV_KEEPONFORK: /* parisc */
advice = MADV_KEEPONFORK;
break;
/* we do not care about the other MADV_xxx values yet */
}
/*
* Most advice values are hints, so ignoring and returning success is ok.
*
* However, some advice values such as MADV_DONTNEED, MADV_WIPEONFORK and
* MADV_KEEPONFORK are not hints and need to be emulated.
*
* A straight passthrough for those may not be safe because qemu sometimes
* turns private file-backed mappings into anonymous mappings.
* If all guest pages have PAGE_PASSTHROUGH set, mappings have the
* same semantics for the host as for the guest.
*
* We pass through MADV_WIPEONFORK and MADV_KEEPONFORK if possible and
* return failure if not.
*
* MADV_DONTNEED is passed through as well, if possible.
* If passthrough isn't possible, we nevertheless (wrongly!) return
* success, which is broken but some userspace programs fail to work
* otherwise. Completely implementing such emulation is quite complicated
* though.
*/
mmap_lock();
switch (advice) {
case MADV_WIPEONFORK:
case MADV_KEEPONFORK:
ret = -EINVAL;
- /* fall through */
+ fallthrough;
case MADV_DONTNEED:
if (page_check_range(start, len, PAGE_PASSTHROUGH)) {
ret = get_errno(madvise(g2h_untagged(start), len, advice));
if ((advice == MADV_DONTNEED) && (ret == 0)) {
page_reset_target_data(start, start + len - 1);
}
}
}
mmap_unlock();
return ret;
}
#ifndef TARGET_FORCE_SHMLBA
/*
* For most architectures, SHMLBA is the same as the page size;
* some architectures have larger values, in which case they should
* define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
* This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
* and defining its own value for SHMLBA.
*
* The kernel also permits SHMLBA to be set by the architecture to a
* value larger than the page size without setting __ARCH_FORCE_SHMLBA;
* this means that addresses are rounded to the large size if
* SHM_RND is set but addresses not aligned to that size are not rejected
* as long as they are at least page-aligned. Since the only architecture
* which uses this is ia64 this code doesn't provide for that oddity.
*/
@@ -7740,74 +7740,74 @@ static int do_safe_futex(int *uaddr, int op, int val,
/* ??? Using host futex calls even when target atomic operations
are not really atomic probably breaks things. However implementing
futexes locally would make futexes shared between multiple processes
tricky. However they're probably useless because guest atomic
operations won't work either. */
#if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
int op, int val, target_ulong timeout,
target_ulong uaddr2, int val3)
{
struct timespec ts, *pts = NULL;
void *haddr2 = NULL;
int base_op;
/* We assume FUTEX_* constants are the same on both host and target. */
#ifdef FUTEX_CMD_MASK
base_op = op & FUTEX_CMD_MASK;
#else
base_op = op;
#endif
switch (base_op) {
case FUTEX_WAIT:
case FUTEX_WAIT_BITSET:
val = tswap32(val);
break;
case FUTEX_WAIT_REQUEUE_PI:
val = tswap32(val);
haddr2 = g2h(cpu, uaddr2);
break;
case FUTEX_LOCK_PI:
case FUTEX_LOCK_PI2:
break;
case FUTEX_WAKE:
case FUTEX_WAKE_BITSET:
case FUTEX_TRYLOCK_PI:
case FUTEX_UNLOCK_PI:
timeout = 0;
break;
case FUTEX_FD:
val = target_to_host_signal(val);
timeout = 0;
break;
case FUTEX_CMP_REQUEUE:
case FUTEX_CMP_REQUEUE_PI:
val3 = tswap32(val3);
- /* fall through */
+ fallthrough;
case FUTEX_REQUEUE:
case FUTEX_WAKE_OP:
/*
* For these, the 4th argument is not TIMEOUT, but VAL2.
* But the prototype of do_safe_futex takes a pointer, so
* insert casts to satisfy the compiler. We do not need
* to tswap VAL2 since it's not compared to guest memory.
*/
pts = (struct timespec *)(uintptr_t)timeout;
timeout = 0;
haddr2 = g2h(cpu, uaddr2);
break;
default:
return -TARGET_ENOSYS;
}
if (timeout) {
pts = &ts;
if (time64
? target_to_host_timespec64(pts, timeout)
: target_to_host_timespec(pts, timeout)) {
return -TARGET_EFAULT;
}
}
return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
}
#endif
#if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
In preparation of raising -Wimplicit-fallthrough to 5, replace all fall-through comments with the fallthrough attribute pseudo-keyword. Signed-off-by: Emmanouil Pitsidianakis <manos.pitsidianakis@linaro.org> --- linux-user/mips/cpu_loop.c | 8 ++++---- linux-user/mmap.c | 2 +- linux-user/syscall.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-)