@@ -528,12 +528,12 @@ int futex_cmpxchg_value_locked(u32 *curv
return ret;
}
-int futex_get_value_locked(u32 *dest, u32 __user *from)
+int futex_get_value_locked(u32 *dest, u32 __user *from, unsigned int flags)
{
int ret;
pagefault_disable();
- ret = __get_user(*dest, from);
+ ret = futex_get_value(dest, from, flags);
pagefault_enable();
return ret ? -EFAULT : 0;
@@ -239,7 +239,7 @@ extern void futex_wake_mark(struct wake_
extern int fault_in_user_writeable(u32 __user *uaddr);
extern int futex_cmpxchg_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval);
-extern int futex_get_value_locked(u32 *dest, u32 __user *from);
+extern int futex_get_value_locked(u32 *dest, u32 __user *from, unsigned int flags);
extern struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key);
extern void __futex_unqueue(struct futex_q *q);
@@ -240,7 +240,7 @@ static int attach_to_pi_state(u32 __user
* still is what we expect it to be, otherwise retry the entire
* operation.
*/
- if (futex_get_value_locked(&uval2, uaddr))
+ if (futex_get_value_locked(&uval2, uaddr, FLAGS_SIZE_32))
goto out_efault;
if (uval != uval2)
@@ -359,7 +359,7 @@ static int handle_exit_race(u32 __user *
* The same logic applies to the case where the exiting task is
* already gone.
*/
- if (futex_get_value_locked(&uval2, uaddr))
+ if (futex_get_value_locked(&uval2, uaddr, FLAGS_SIZE_32))
return -EFAULT;
/* If the user space value has changed, try again. */
@@ -527,7 +527,7 @@ int futex_lock_pi_atomic(u32 __user *uad
* Read the user space value first so we can validate a few
* things before proceeding further.
*/
- if (futex_get_value_locked(&uval, uaddr))
+ if (futex_get_value_locked(&uval, uaddr, FLAGS_SIZE_32))
return -EFAULT;
if (unlikely(should_fail_futex(true)))
@@ -750,7 +750,7 @@ static int __fixup_pi_state_owner(u32 __
if (!pi_state->owner)
newtid |= FUTEX_OWNER_DIED;
- err = futex_get_value_locked(&uval, uaddr);
+ err = futex_get_value_locked(&uval, uaddr, FLAGS_SIZE_32);
if (err)
goto handle_err;
@@ -275,7 +275,7 @@ futex_proxy_trylock_atomic(u32 __user *p
u32 curval;
int ret;
- if (futex_get_value_locked(&curval, pifutex))
+ if (futex_get_value_locked(&curval, pifutex, FLAGS_SIZE_32))
return -EFAULT;
if (unlikely(should_fail_futex(true)))
@@ -453,7 +453,7 @@ int futex_requeue(u32 __user *uaddr1, un
if (likely(cmpval != NULL)) {
u32 curval;
- ret = futex_get_value_locked(&curval, uaddr1);
+ ret = futex_get_value_locked(&curval, uaddr1, FLAGS_SIZE_32);
if (unlikely(ret)) {
double_unlock_hb(hb1, hb2);
@@ -453,7 +453,7 @@ int futex_wait_multiple_setup(struct fut
u32 val = vs[i].w.val;
hb = futex_q_lock(q);
- ret = futex_get_value_locked(&uval, uaddr);
+ ret = futex_get_value_locked(&uval, uaddr, FLAGS_SIZE_32);
if (!ret && uval == val) {
/*
@@ -621,7 +621,7 @@ int futex_wait_setup(u32 __user *uaddr,
retry_private:
*hb = futex_q_lock(q);
- ret = futex_get_value_locked(&uval, uaddr);
+ ret = futex_get_value_locked(&uval, uaddr, FLAGS_SIZE_32);
if (ret) {
futex_q_unlock(*hb);