@@ -108,14 +108,13 @@ static inline pte_t native_ptep_get_and_
#ifdef CONFIG_SMP
static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
{
- pmd_t res;
+ pmd_t old;
- /* xchg acts as a barrier before setting of the high bits */
- res.pmd_low = xchg(&pmdp->pmd_low, 0);
- res.pmd_high = READ_ONCE(pmdp->pmd_high);
- WRITE_ONCE(pmdp->pmd_high, 0);
+ do {
+ old = *pmdp;
+ } while (cmpxchg64(&pmdp->pmd, old.pmd, 0ULL) != old.pmd);
- return res;
+ return old;
}
#else
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
@@ -151,28 +150,15 @@ static inline pmd_t pmdp_establish(struc
#endif
#ifdef CONFIG_SMP
-union split_pud {
- struct {
- u32 pud_low;
- u32 pud_high;
- };
- pud_t pud;
-};
-
static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
{
- union split_pud res, *orig = (union split_pud *)pudp;
+ pud_t old;
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
- pti_set_user_pgtbl(&pudp->p4d.pgd, __pgd(0));
-#endif
-
- /* xchg acts as a barrier before setting of the high bits */
- res.pud_low = xchg(&orig->pud_low, 0);
- res.pud_high = orig->pud_high;
- orig->pud_high = 0;
+ do {
+ old = *pudp;
+ } while (cmpxchg64(&pudp->pud, old.pud, 0ULL) != old.pud);
- return res.pud;
+ return old;
}
#else
#define native_pudp_get_and_clear(xp) native_local_pudp_get_and_clear(xp)
Given that ptep_get_and_clear() uses cmpxchg8b, and that should be by far the most common case, there's no point in having an optimized variant for pmd/pud. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- arch/x86/include/asm/pgtable-3level.h | 34 ++++++++++------------------------ 1 file changed, 10 insertions(+), 24 deletions(-)