@@ -146,7 +146,8 @@ void iommu_free_domid(domid_t domid, uns
int __must_check iommu_free_pgtables(struct domain *d);
struct domain_iommu;
-struct page_info *__must_check iommu_alloc_pgtable(struct domain_iommu *hd);
+struct page_info *__must_check iommu_alloc_pgtable(struct domain_iommu *hd,
+ uint64_t contig_mask);
void iommu_queue_free_pgtable(struct domain_iommu *hd, struct page_info *pg);
#endif /* !__ARCH_X86_IOMMU_H__ */
@@ -446,11 +446,13 @@ union amd_iommu_x2apic_control {
#define IOMMU_PAGE_TABLE_U32_PER_ENTRY (IOMMU_PAGE_TABLE_ENTRY_SIZE / 4)
#define IOMMU_PAGE_TABLE_ALIGNMENT 4096
+#define IOMMU_PTE_CONTIG_MASK 0x1e /* The ign0 field below. */
+
union amd_iommu_pte {
uint64_t raw;
struct {
bool pr:1;
- unsigned int ign0:4;
+ unsigned int ign0:4; /* Covered by IOMMU_PTE_CONTIG_MASK. */
bool a:1;
bool d:1;
unsigned int ign1:2;
@@ -21,6 +21,8 @@
#include "iommu.h"
+#include <asm/pt-contig-markers.h>
+
/* Given pfn and page table level, return pde index */
static unsigned int pfn_to_pde_idx(unsigned long pfn, unsigned int level)
{
@@ -113,9 +115,23 @@ static void set_iommu_ptes_present(unsig
return;
}
+ ASSERT(!(next_mfn & (page_sz - 1)));
+
while ( nr_ptes-- )
{
- set_iommu_pde_present(pde, next_mfn, 0, iw, ir);
+ ASSERT(!pde->next_level);
+ ASSERT(!pde->u);
+
+ if ( pde > table )
+ ASSERT(pde->ign0 == find_first_set_bit(pde - table));
+ else
+ ASSERT(pde->ign0 == CONTIG_LEVEL_SHIFT);
+
+ pde->iw = iw;
+ pde->ir = ir;
+ pde->fc = true; /* See set_iommu_pde_present(). */
+ pde->mfn = next_mfn;
+ pde->pr = true;
++pde;
next_mfn += page_sz;
@@ -295,7 +311,7 @@ static int iommu_pde_from_dfn(struct dom
mfn = next_table_mfn;
/* allocate lower level page table */
- table = iommu_alloc_pgtable(hd);
+ table = iommu_alloc_pgtable(hd, IOMMU_PTE_CONTIG_MASK);
if ( table == NULL )
{
AMD_IOMMU_ERROR("cannot allocate I/O page table\n");
@@ -325,7 +341,7 @@ static int iommu_pde_from_dfn(struct dom
if ( next_table_mfn == 0 )
{
- table = iommu_alloc_pgtable(hd);
+ table = iommu_alloc_pgtable(hd, IOMMU_PTE_CONTIG_MASK);
if ( table == NULL )
{
AMD_IOMMU_ERROR("cannot allocate I/O page table\n");
@@ -726,7 +742,7 @@ static int fill_qpt(union amd_iommu_pte
* page table pages, and the resulting allocations are always
* zeroed.
*/
- pgs[level] = iommu_alloc_pgtable(hd);
+ pgs[level] = iommu_alloc_pgtable(hd, 0);
if ( !pgs[level] )
{
rc = -ENOMEM;
@@ -784,7 +800,7 @@ int cf_check amd_iommu_quarantine_init(s
return 0;
}
- pdev->arch.amd.root_table = iommu_alloc_pgtable(hd);
+ pdev->arch.amd.root_table = iommu_alloc_pgtable(hd, 0);
if ( !pdev->arch.amd.root_table )
return -ENOMEM;
@@ -342,7 +342,7 @@ int amd_iommu_alloc_root(struct domain *
if ( unlikely(!hd->arch.amd.root_table) && d != dom_io )
{
- hd->arch.amd.root_table = iommu_alloc_pgtable(hd);
+ hd->arch.amd.root_table = iommu_alloc_pgtable(hd, 0);
if ( !hd->arch.amd.root_table )
return -ENOMEM;
}
@@ -334,7 +334,7 @@ static uint64_t addr_to_dma_page_maddr(s
goto out;
pte_maddr = level;
- if ( !(pg = iommu_alloc_pgtable(hd)) )
+ if ( !(pg = iommu_alloc_pgtable(hd, 0)) )
goto out;
hd->arch.vtd.pgd_maddr = page_to_maddr(pg);
@@ -376,7 +376,7 @@ static uint64_t addr_to_dma_page_maddr(s
}
pte_maddr = level - 1;
- pg = iommu_alloc_pgtable(hd);
+ pg = iommu_alloc_pgtable(hd, DMA_PTE_CONTIG_MASK);
if ( !pg )
break;
@@ -388,12 +388,13 @@ static uint64_t addr_to_dma_page_maddr(s
struct dma_pte *split = map_vtd_domain_page(pte_maddr);
unsigned long inc = 1UL << level_to_offset_bits(level - 1);
- split[0].val = pte->val;
+ split[0].val |= pte->val & ~DMA_PTE_CONTIG_MASK;
if ( inc == PAGE_SIZE )
split[0].val &= ~DMA_PTE_SP;
for ( offset = 1; offset < PTE_NUM; ++offset )
- split[offset].val = split[offset - 1].val + inc;
+ split[offset].val |=
+ (split[offset - 1].val & ~DMA_PTE_CONTIG_MASK) + inc;
iommu_sync_cache(split, PAGE_SIZE);
unmap_vtd_domain_page(split);
@@ -2176,7 +2177,7 @@ static int __must_check cf_check intel_i
if ( iommu_snoop )
dma_set_pte_snp(new);
- if ( old.val == new.val )
+ if ( !((old.val ^ new.val) & ~DMA_PTE_CONTIG_MASK) )
{
spin_unlock(&hd->arch.mapping_lock);
unmap_vtd_domain_page(page);
@@ -3064,7 +3065,7 @@ static int fill_qpt(struct dma_pte *this
* page table pages, and the resulting allocations are always
* zeroed.
*/
- pgs[level] = iommu_alloc_pgtable(hd);
+ pgs[level] = iommu_alloc_pgtable(hd, 0);
if ( !pgs[level] )
{
rc = -ENOMEM;
@@ -3121,7 +3122,7 @@ static int cf_check intel_iommu_quaranti
if ( !drhd )
return -ENODEV;
- pg = iommu_alloc_pgtable(hd);
+ pg = iommu_alloc_pgtable(hd, 0);
if ( !pg )
return -ENOMEM;
@@ -253,7 +253,10 @@ struct context_entry {
* 2-6: reserved
* 7: super page
* 8-11: available
- * 12-63: Host physcial address
+ * 12-51: Host physcial address
+ * 52-61: available (52-55 used for DMA_PTE_CONTIG_MASK)
+ * 62: reserved
+ * 63: available
*/
struct dma_pte {
u64 val;
@@ -263,6 +266,7 @@ struct dma_pte {
#define DMA_PTE_PROT (DMA_PTE_READ | DMA_PTE_WRITE)
#define DMA_PTE_SP (1 << 7)
#define DMA_PTE_SNP (1 << 11)
+#define DMA_PTE_CONTIG_MASK (0xfull << PADDR_BITS)
#define dma_clear_pte(p) do {(p).val = 0;} while(0)
#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while(0)
#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while(0)
@@ -276,7 +280,7 @@ struct dma_pte {
#define dma_pte_write(p) (dma_pte_prot(p) & DMA_PTE_WRITE)
#define dma_pte_addr(p) ((p).val & PADDR_MASK & PAGE_MASK_4K)
#define dma_set_pte_addr(p, addr) do {\
- (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
+ (p).val |= ((addr) & PADDR_MASK & PAGE_MASK_4K); } while (0)
#define dma_pte_present(p) (((p).val & DMA_PTE_PROT) != 0)
#define dma_pte_superpage(p) (((p).val & DMA_PTE_SP) != 0)
@@ -26,6 +26,7 @@
#include <asm/hvm/io.h>
#include <asm/io_apic.h>
#include <asm/mem_paging.h>
+#include <asm/pt-contig-markers.h>
#include <asm/setup.h>
const struct iommu_init_ops *__initdata iommu_init_ops;
@@ -538,11 +539,12 @@ int iommu_free_pgtables(struct domain *d
return 0;
}
-struct page_info *iommu_alloc_pgtable(struct domain_iommu *hd)
+struct page_info *iommu_alloc_pgtable(struct domain_iommu *hd,
+ uint64_t contig_mask)
{
unsigned int memflags = 0;
struct page_info *pg;
- void *p;
+ uint64_t *p;
#ifdef CONFIG_NUMA
if ( hd->node != NUMA_NO_NODE )
@@ -554,7 +556,29 @@ struct page_info *iommu_alloc_pgtable(st
return NULL;
p = __map_domain_page(pg);
- clear_page(p);
+
+ if ( contig_mask )
+ {
+ /* See pt-contig-markers.h for a description of the marker scheme. */
+ unsigned int i, shift = find_first_set_bit(contig_mask);
+
+ ASSERT((CONTIG_LEVEL_SHIFT & (contig_mask >> shift)) == CONTIG_LEVEL_SHIFT);
+
+ p[0] = (CONTIG_LEVEL_SHIFT + 0ull) << shift;
+ p[1] = 0;
+ p[2] = 1ull << shift;
+ p[3] = 0;
+
+ for ( i = 4; i < PAGE_SIZE / 8; i += 4 )
+ {
+ p[i + 0] = (find_first_set_bit(i) + 0ull) << shift;
+ p[i + 1] = 0;
+ p[i + 2] = 1ull << shift;
+ p[i + 3] = 0;
+ }
+ }
+ else
+ clear_page(p);
iommu_sync_cache(p, PAGE_SIZE);