@@ -171,9 +171,7 @@ out:
return (void *)MAPCACHE_VIRT_START + pfn_to_paddr(idx);
pmap:
- pmap_lock();
ret = pmap_map(mfn);
- pmap_unlock();
flush_tlb_one_local(ret);
return ret;
}
@@ -188,9 +186,7 @@ void unmap_domain_page(const void *ptr)
if ( va >= FIXADDR_START && va < FIXADDR_TOP )
{
- pmap_lock();
pmap_unmap((void *)ptr);
- pmap_unlock();
return;
}
@@ -15,18 +15,44 @@
* used anywhere else other than the stated purpose above.
*/
-static DEFINE_SPINLOCK(lock);
+static DEFINE_SPINLOCK(lock_cpu);
/* Bitmap to track which slot is used */
static unsigned long inuse;
-void pmap_lock(void)
+static unsigned int lock_cpu_id = ~0;
+static unsigned int lock_count;
+
+/*
+ * Only one pCPU is allowed to use PMAP entries. Another pCPU can use PMAP only
+ * when the current pCPU has unmapped all entries.
+ */
+static void pmap_cpu_up(void)
{
- spin_lock(&lock);
+ int ret = -1;
+ unsigned int cpu_id = smp_processor_id();
+
+ do
+ {
+ while ( cpu_id != lock_cpu_id && lock_count != 0 )
+ ;
+ spin_lock(&lock_cpu);
+ if ( cpu_id == lock_cpu_id || lock_count == 0 )
+ {
+ lock_cpu_id = cpu_id;
+ lock_count++;
+ ret = 0;
+ }
+ spin_unlock(&lock_cpu);
+ } while ( ret == -1 );
}
-void pmap_unlock(void)
+static void pmap_cpu_down(void)
{
- spin_unlock(&lock);
+ spin_lock(&lock_cpu);
+ ASSERT(smp_processor_id() == lock_cpu_id);
+ ASSERT(lock_count);
+ lock_count--;
+ spin_unlock(&lock_cpu);
}
void *pmap_map(mfn_t mfn)
@@ -37,7 +63,13 @@ void *pmap_map(mfn_t mfn)
l1_pgentry_t *pl1e;
ASSERT(!in_irq());
- ASSERT(spin_is_locked(&lock));
+
+ /*
+ * This semaphore-like locking means only one pCPU is allowed, which also
+ * suggests PMAP should only be used to bootstrap other structures. Any
+ * general purpose use of PMAP is a mistake.
+ */
+ pmap_cpu_up();
idx = find_first_zero_bit(&inuse, NUM_FIX_PMAP);
if ( idx == NUM_FIX_PMAP )
@@ -63,13 +95,15 @@ void pmap_unmap(void *p)
ASSERT(!in_irq());
ASSERT(slot >= FIX_PMAP_BEGIN && slot <= FIX_PMAP_END);
- ASSERT(spin_is_locked(&lock));
+
idx = slot - FIX_PMAP_BEGIN;
__clear_bit(idx, &inuse);
pl1e = &l1_fixmap[L1_PAGETABLE_ENTRIES - 1 - slot];
l1e_write_atomic(pl1e, l1e_from_mfn(_mfn(0), 0));
+
+ pmap_cpu_down();
}
static void __maybe_unused build_assertions(void)
@@ -4,8 +4,6 @@
/* Large enough for mapping 5 levels of page tables with some headroom */
#define NUM_FIX_PMAP 8
-void pmap_lock(void);
-void pmap_unlock(void);
void *pmap_map(mfn_t mfn);
void pmap_unmap(void *p);