From patchwork Wed Feb 19 06:07:37 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiang Liu X-Patchwork-Id: 3678211 X-Patchwork-Delegate: bhelgaas@google.com Return-Path: X-Original-To: patchwork-linux-pci@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork2.web.kernel.org (Postfix) with ESMTP id C7381BF13A for ; Wed, 19 Feb 2014 06:08:50 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id C3F852016C for ; Wed, 19 Feb 2014 06:08:49 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 264D4201BA for ; Wed, 19 Feb 2014 06:08:48 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752667AbaBSGIF (ORCPT ); Wed, 19 Feb 2014 01:08:05 -0500 Received: from mga09.intel.com ([134.134.136.24]:41963 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752451AbaBSGID (ORCPT ); Wed, 19 Feb 2014 01:08:03 -0500 Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga102.jf.intel.com with ESMTP; 18 Feb 2014 22:03:43 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.97,504,1389772800"; d="scan'208";a="485626818" Received: from gerry-dev.bj.intel.com ([10.238.158.74]) by orsmga002.jf.intel.com with ESMTP; 18 Feb 2014 22:07:59 -0800 From: Jiang Liu To: Joerg Roedel , David Woodhouse , Yinghai Lu , Bjorn Helgaas , Dan Williams , Vinod Koul , "Rafael J . Wysocki" Cc: Jiang Liu , Ashok Raj , Yijing Wang , Tony Luck , iommu@lists.linux-foundation.org, linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org, dmaengine@vger.kernel.org Subject: [Patch Part2 V2 17/17] iommu/vt-d: update IOMMU state when memory hotplug happens Date: Wed, 19 Feb 2014 14:07:37 +0800 Message-Id: <1392790057-32434-18-git-send-email-jiang.liu@linux.intel.com> X-Mailer: git-send-email 1.7.10.4 In-Reply-To: <1392790057-32434-1-git-send-email-jiang.liu@linux.intel.com> References: <1392790057-32434-1-git-send-email-jiang.liu@linux.intel.com> Sender: linux-pci-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-pci@vger.kernel.org X-Spam-Status: No, score=-4.6 required=5.0 tests=BAYES_00,KHOP_BIG_TO_CC, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP If static identity domain is created, IOMMU driver needs to update si_domain page table when memory hotplug event happens. Otherwise PCI device DMA operations can't access the hot-added memory regions. Signed-off-by: Jiang Liu --- drivers/iommu/intel-iommu.c | 71 ++++++++++++++++++++++++++++++++++++++++++- drivers/iommu/iova.c | 64 ++++++++++++++++++++++++++++++++++---- include/linux/iova.h | 2 ++ 3 files changed, 130 insertions(+), 7 deletions(-) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index dd576c0..484d669 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -3683,6 +3684,73 @@ static struct notifier_block device_nb = { .notifier_call = device_notifier, }; +static int intel_iommu_memory_notifier(struct notifier_block *nb, + unsigned long val, void *v) +{ + struct memory_notify *mhp = v; + unsigned long long start, end; + unsigned long start_vpfn, last_vpfn; + + switch (val) { + case MEM_GOING_ONLINE: + start = mhp->start_pfn << PAGE_SHIFT; + end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1; + if (iommu_domain_identity_map(si_domain, start, end)) { + pr_warn("dmar: failed to build identity map for [%llx-%llx]\n", + start, end); + return NOTIFY_BAD; + } + break; + + case MEM_OFFLINE: + case MEM_CANCEL_ONLINE: + start_vpfn = mm_to_dma_pfn(mhp->start_pfn); + last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1); + while (start_vpfn <= last_vpfn) { + struct iova *iova; + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + + iova = find_iova(&si_domain->iovad, start_vpfn); + if (iova == NULL) { + pr_debug("dmar: failed get IOVA for PFN %lx\n", + start_vpfn); + break; + } + + iova = split_and_remove_iova(&si_domain->iovad, iova, + start_vpfn, last_vpfn); + if (iova == NULL) { + pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n", + start_vpfn, last_vpfn); + return NOTIFY_BAD; + } + + rcu_read_lock(); + for_each_active_iommu(iommu, drhd) + iommu_flush_iotlb_psi(iommu, si_domain->id, + iova->pfn_lo, + iova->pfn_hi - iova->pfn_lo + 1, 0); + rcu_read_unlock(); + dma_pte_clear_range(si_domain, iova->pfn_lo, + iova->pfn_hi); + dma_pte_free_pagetable(si_domain, iova->pfn_lo, + iova->pfn_hi); + + start_vpfn = iova->pfn_hi + 1; + free_iova_mem(iova); + } + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block intel_iommu_memory_nb = { + .notifier_call = intel_iommu_memory_notifier, + .priority = 0 +}; + int __init intel_iommu_init(void) { int ret = -ENODEV; @@ -3755,8 +3823,9 @@ int __init intel_iommu_init(void) init_iommu_pm_ops(); bus_set_iommu(&pci_bus_type, &intel_iommu_ops); - bus_register_notifier(&pci_bus_type, &device_nb); + if (si_domain && !hw_pass_through) + register_memory_notifier(&intel_iommu_memory_nb); intel_iommu_enabled = 1; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 67da6cff..f6b17e6 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -342,19 +342,30 @@ __is_range_overlap(struct rb_node *node, return 0; } +static inline struct iova * +alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi) +{ + struct iova *iova; + + iova = alloc_iova_mem(); + if (iova) { + iova->pfn_lo = pfn_lo; + iova->pfn_hi = pfn_hi; + } + + return iova; +} + static struct iova * __insert_new_range(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi) { struct iova *iova; - iova = alloc_iova_mem(); - if (!iova) - return iova; + iova = alloc_and_init_iova(pfn_lo, pfn_hi); + if (iova) + iova_insert_rbtree(&iovad->rbroot, iova); - iova->pfn_hi = pfn_hi; - iova->pfn_lo = pfn_lo; - iova_insert_rbtree(&iovad->rbroot, iova); return iova; } @@ -433,3 +444,44 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) } spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); } + +struct iova * +split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, + unsigned long pfn_lo, unsigned long pfn_hi) +{ + unsigned long flags; + struct iova *prev = NULL, *next = NULL; + + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + if (iova->pfn_lo < pfn_lo) { + prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); + if (prev == NULL) + goto error; + } + if (iova->pfn_hi > pfn_hi) { + next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); + if (next == NULL) + goto error; + } + + __cached_rbnode_delete_update(iovad, iova); + rb_erase(&iova->node, &iovad->rbroot); + + if (prev) { + iova_insert_rbtree(&iovad->rbroot, prev); + iova->pfn_lo = pfn_lo; + } + if (next) { + iova_insert_rbtree(&iovad->rbroot, next); + iova->pfn_hi = pfn_hi; + } + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + + return iova; + +error: + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + if (prev) + free_iova_mem(prev); + return NULL; +} diff --git a/include/linux/iova.h b/include/linux/iova.h index 76a0759..3277f47 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -47,5 +47,7 @@ void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); +struct iova *split_and_remove_iova(struct iova_domain *iovad, + struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); #endif