@@ -100,6 +100,8 @@ struct kvmppc_vcpu_book3s {
#define CONTEXT_GUEST 1
#define CONTEXT_GUEST_END 2
+#ifdef CONFIG_PPC_BOOK3S_64
+
#define VSID_REAL_DR 0x7ffffffffff00000ULL
#define VSID_REAL_IR 0x7fffffffffe00000ULL
#define VSID_SPLIT_MASK 0x7fffffffffe00000ULL
@@ -107,9 +109,20 @@ struct kvmppc_vcpu_book3s {
#define VSID_BAT 0x7fffffffffb00000ULL
#define VSID_PR 0x8000000000000000ULL
-extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask);
+#else /* Only have 32 bits VSIDs */
+
+#define VSID_REAL_DR 0x7ff00000
+#define VSID_REAL_IR 0x7fe00000
+#define VSID_SPLIT_MASK 0x7fe00000
+#define VSID_REAL 0x7fc00000
+#define VSID_BAT 0x7fb00000
+#define VSID_PR 0x80000000
+
+#endif
+
+extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
-extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end);
+extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
@@ -124,9 +124,9 @@ struct kvm_arch {
};
struct kvmppc_pte {
- u64 eaddr;
+ ulong eaddr;
u64 vpage;
- u64 raddr;
+ ulong raddr;
bool may_read : 1;
bool may_write : 1;
bool may_execute : 1;
@@ -145,7 +145,7 @@ struct kvmppc_mmu {
int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data);
void (*reset_msr)(struct kvm_vcpu *vcpu);
void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large);
- int (*esid_to_vsid)(struct kvm_vcpu *vcpu, u64 esid, u64 *vsid);
+ int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, ulong *vsid);
u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
bool (*is_dcbz32)(struct kvm_vcpu *vcpu);
};
@@ -812,12 +812,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* so we can't use the NX bit inside the guest. Let's cross our fingers,
* that no guest that needs the dcbz hack does NX.
*/
- kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
r = RESUME_GUEST;
} else {
vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
r = RESUME_GUEST;
}
break;
@@ -843,7 +843,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.dear = dar;
to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
- kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
+ kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL);
r = RESUME_GUEST;
}
break;
@@ -60,8 +60,8 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *pte, bool data);
-static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
- u64 *vsid);
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
+ ulong *vsid);
static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr)
{
@@ -71,14 +71,14 @@ static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t e
static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
bool data)
{
- u64 vsid;
+ ulong vsid;
struct kvmppc_pte pte;
if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data))
return pte.vpage;
kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
- return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16);
+ return (((u64)eaddr >> 12) & 0xffff) | ((u64)vsid << 16);
}
static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu)
@@ -148,11 +148,11 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
bat->bepi_mask);
}
if ((eaddr & bat->bepi_mask) == bat->bepi) {
- u64 vsid;
+ ulong vsid;
kvmppc_mmu_book3s_32_esid_to_vsid(vcpu,
eaddr >> SID_SHIFT, &vsid);
- vsid <<= 16;
- pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid;
+ pte->vpage = (((u64)eaddr >> 12) & 0xffff) |
+ ((u64)vsid << 16);
pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask);
pte->may_read = bat->pp;
@@ -183,7 +183,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_sr *sre;
hva_t ptegp;
u32 pteg[16];
- u64 ptem = 0;
+ u32 ptem = 0;
int i;
int found = 0;
@@ -327,8 +327,8 @@ static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool lar
kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000);
}
-static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
- u64 *vsid)
+static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
+ ulong *vsid)
{
/* In case we only have one of MSR_IR or MSR_DR set, let's put
that in the real-mode context (and hope RM doesn't access
@@ -77,11 +77,9 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
kvm_release_pfn_clean(pte->pfn);
}
-void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 _guest_ea, u64 _ea_mask)
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
{
int i;
- u32 guest_ea = _guest_ea;
- u32 ea_mask = _ea_mask;
dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n",
vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
@@ -127,7 +125,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
}
}
-void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
int i;
@@ -179,20 +177,15 @@ static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
* a hash, so we don't waste cycles on looping */
-static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
+static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, ulong gvsid)
{
- return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
- ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
- ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
- ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
- ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
- ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
+ return (u16)(((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
}
-static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
+static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, ulong gvsid)
{
struct kvmppc_sid_map *map;
u16 sid_map_mask;
@@ -251,7 +244,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
{
pfn_t hpaddr;
u64 va;
- u64 vsid;
+ ulong vsid;
struct kvmppc_sid_map *map;
volatile u32 *pteg;
u32 eaddr = orig_pte->eaddr;
@@ -265,7 +258,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
/* Get host physical address for gpa */
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
if (kvm_is_error_hva(hpaddr)) {
- printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n",
+ printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
orig_pte->eaddr);
return -EINVAL;
}
@@ -361,7 +354,7 @@ next_pteg:
return 0;
}
-static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
+static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, ulong gvsid)
{
struct kvmppc_sid_map *map;
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -406,7 +399,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
{
u32 esid = eaddr >> SID_SHIFT;
- u64 gvsid;
+ ulong gvsid;
u32 sr;
struct kvmppc_sid_map *map;
struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
@@ -439,8 +439,8 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
}
-static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid,
- u64 *vsid)
+static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
+ ulong *vsid)
{
switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
case 0:
@@ -62,7 +62,7 @@ static void invalidate_pte(struct hpte_cache *pte)
kvm_release_pfn_clean(pte->pfn);
}
-void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
+void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
{
int i;
@@ -110,7 +110,7 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
}
}
-void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
+void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
{
int i;
@@ -175,7 +175,7 @@ static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
}
-static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
+static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, ulong gvsid)
{
struct kvmppc_sid_map *map;
u16 sid_map_mask;
@@ -206,7 +206,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
{
pfn_t hpaddr;
ulong hash, hpteg, va;
- u64 vsid;
+ ulong vsid;
int ret;
int rflags = 0x192;
int vflags = 0;
@@ -216,7 +216,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
/* Get host physical address for gpa */
hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
if (kvm_is_error_hva(hpaddr)) {
- printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr);
+ printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
return -EINVAL;
}
hpaddr <<= PAGE_SHIFT;
@@ -290,7 +290,7 @@ map_again:
return 0;
}
-static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
+static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, ulong gvsid)
{
struct kvmppc_sid_map *map;
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
@@ -370,7 +370,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
u64 esid = eaddr >> SID_SHIFT;
u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
u64 slb_vsid = SLB_VSID_USER;
- u64 gvsid;
+ ulong gvsid;
int slb_index;
struct kvmppc_sid_map *map;