aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c599
1 files changed, 459 insertions, 140 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 28cb713d728c..162b3236e72c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1,15 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright © 2006-2014 Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
* Authors: David Woodhouse <dwmw2@infradead.org>,
* Ashok Raj <ashok.raj@intel.com>,
* Shaohua Li <shaohua.li@intel.com>,
@@ -1391,7 +1383,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
/* pdev will be returned if device is not a vf */
pf_pdev = pci_physfn(pdev);
- info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
+ info->pfsid = pci_dev_id(pf_pdev);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
@@ -2341,32 +2333,33 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- struct scatterlist *sg, unsigned long phys_pfn,
- unsigned long nr_pages, int prot)
-{
- int ret;
- struct intel_iommu *iommu;
-
- /* Do the real mapping first */
- ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
- if (ret)
- return ret;
-
- /* Notify about the new mapping */
- if (domain_type_is_vm(domain)) {
- /* VM typed domains can have more than one IOMMUs */
- int iommu_id;
- for_each_domain_iommu(iommu_id, domain) {
- iommu = g_iommus[iommu_id];
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
- }
- } else {
- /* General domains only have one IOMMU */
- iommu = domain_get_iommu(domain);
- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
- }
+ struct scatterlist *sg, unsigned long phys_pfn,
+ unsigned long nr_pages, int prot)
+{
+ int ret;
+ struct intel_iommu *iommu;
- return 0;
+ /* Do the real mapping first */
+ ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot);
+ if (ret)
+ return ret;
+
+ /* Notify about the new mapping */
+ if (domain_type_is_vm(domain)) {
+ /* VM typed domains can have more than one IOMMUs */
+ int iommu_id;
+
+ for_each_domain_iommu(iommu_id, domain) {
+ iommu = g_iommus[iommu_id];
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
+ } else {
+ /* General domains only have one IOMMU */
+ iommu = domain_get_iommu(domain);
+ __mapping_notify_one(iommu, domain, iov_pfn, nr_pages);
+ }
+
+ return 0;
}
static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
@@ -2485,6 +2478,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->domain = domain;
info->iommu = iommu;
info->pasid_table = NULL;
+ info->auxd_enabled = 0;
+ INIT_LIST_HEAD(&info->auxiliary_domains);
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -3031,7 +3026,8 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
{
struct pci_dev *pdev = NULL;
struct dmar_drhd_unit *drhd;
- struct intel_iommu *iommu;
+ /* To avoid a -Wunused-but-set-variable warning. */
+ struct intel_iommu *iommu __maybe_unused;
struct device *dev;
int i;
int ret = 0;
@@ -3412,9 +3408,12 @@ static int __init init_dmars(void)
iommu_identity_mapping |= IDENTMAP_ALL;
#ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
- iommu_identity_mapping |= IDENTMAP_GFX;
+ dmar_map_gfx = 0;
#endif
+ if (!dmar_map_gfx)
+ iommu_identity_mapping |= IDENTMAP_GFX;
+
check_tylersburg_isoch();
if (iommu_identity_mapping) {
@@ -3496,7 +3495,13 @@ domains_done:
#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
+ /*
+ * Call dmar_alloc_hwirq() with dmar_global_lock held,
+ * could cause possible lock race condition.
+ */
+ up_write(&dmar_global_lock);
ret = intel_svm_enable_prq(iommu);
+ down_write(&dmar_global_lock);
if (ret)
goto free_iommu;
}
@@ -3606,45 +3611,40 @@ out:
}
/* Check if the dev needs to go through non-identity map and unmap process.*/
-static int iommu_no_mapping(struct device *dev)
+static bool iommu_need_mapping(struct device *dev)
{
int found;
if (iommu_dummy(dev))
- return 1;
+ return false;
if (!iommu_identity_mapping)
- return 0;
+ return true;
found = identity_mapping(dev);
if (found) {
if (iommu_should_identity_map(dev, 0))
- return 1;
- else {
- /*
- * 32 bit DMA is removed from si_domain and fall back
- * to non-identity mapping.
- */
- dmar_remove_one_dev_info(dev);
- dev_info(dev, "32bit DMA uses non-identity mapping\n");
- return 0;
- }
+ return false;
+
+ /*
+ * 32 bit DMA is removed from si_domain and fall back to
+ * non-identity mapping.
+ */
+ dmar_remove_one_dev_info(dev);
+ dev_info(dev, "32bit DMA uses non-identity mapping\n");
} else {
/*
* In case of a detached 64 bit DMA device from vm, the device
* is put into si_domain for identity mapping.
*/
- if (iommu_should_identity_map(dev, 0)) {
- int ret;
- ret = domain_add_dev_info(si_domain, dev);
- if (!ret) {
- dev_info(dev, "64bit DMA uses identity mapping\n");
- return 1;
- }
+ if (iommu_should_identity_map(dev, 0) &&
+ !domain_add_dev_info(si_domain, dev)) {
+ dev_info(dev, "64bit DMA uses identity mapping\n");
+ return false;
}
}
- return 0;
+ return true;
}
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
@@ -3660,9 +3660,6 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return paddr;
-
domain = get_valid_domain_for_dev(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3711,15 +3708,20 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- return __intel_map_single(dev, page_to_phys(page) + offset, size,
- dir, *dev->dma_mask);
+ if (iommu_need_mapping(dev))
+ return __intel_map_single(dev, page_to_phys(page) + offset,
+ size, dir, *dev->dma_mask);
+ return dma_direct_map_page(dev, page, offset, size, dir, attrs);
}
static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask);
+ if (iommu_need_mapping(dev))
+ return __intel_map_single(dev, phys_addr, size, dir,
+ *dev->dma_mask);
+ return dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
}
static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3730,9 +3732,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
unsigned long iova_pfn;
struct intel_iommu *iommu;
struct page *freelist;
-
- if (iommu_no_mapping(dev))
- return;
+ struct pci_dev *pdev = NULL;
domain = find_domain(dev);
BUG_ON(!domain);
@@ -3745,11 +3745,14 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
start_pfn = mm_to_dma_pfn(iova_pfn);
last_pfn = start_pfn + nrpages - 1;
+ if (dev_is_pci(dev))
+ pdev = to_pci_dev(dev);
+
dev_dbg(dev, "Device unmapping: pfn %lx-%lx\n", start_pfn, last_pfn);
freelist = domain_unmap(domain, start_pfn, last_pfn);
- if (intel_iommu_strict) {
+ if (intel_iommu_strict || (pdev && pdev->untrusted)) {
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
nrpages, !freelist, 0);
/* free iova */
@@ -3769,7 +3772,17 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- intel_unmap(dev, dev_addr, size);
+ if (iommu_need_mapping(dev))
+ intel_unmap(dev, dev_addr, size);
+ else
+ dma_direct_unmap_page(dev, dev_addr, size, dir, attrs);
+}
+
+static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+ if (iommu_need_mapping(dev))
+ intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3779,28 +3792,17 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
struct page *page = NULL;
int order;
+ if (!iommu_need_mapping(dev))
+ return dma_direct_alloc(dev, size, dma_handle, flags, attrs);
+
size = PAGE_ALIGN(size);
order = get_order(size);
- if (!iommu_no_mapping(dev))
- flags &= ~(GFP_DMA | GFP_DMA32);
- else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
- if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- else
- flags |= GFP_DMA32;
- }
-
if (gfpflags_allow_blocking(flags)) {
unsigned int count = size >> PAGE_SHIFT;
page = dma_alloc_from_contiguous(dev, count, order,
flags & __GFP_NOWARN);
- if (page && iommu_no_mapping(dev) &&
- page_to_phys(page) + size > dev->coherent_dma_mask) {
- dma_release_from_contiguous(dev, page, count);
- page = NULL;
- }
}
if (!page)
@@ -3826,6 +3828,9 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
int order;
struct page *page = virt_to_page(vaddr);
+ if (!iommu_need_mapping(dev))
+ return dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+
size = PAGE_ALIGN(size);
order = get_order(size);
@@ -3843,6 +3848,9 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *sg;
int i;
+ if (!iommu_need_mapping(dev))
+ return dma_direct_unmap_sg(dev, sglist, nelems, dir, attrs);
+
for_each_sg(sglist, sg, nelems, i) {
nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
}
@@ -3850,20 +3858,6 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
}
-static int intel_nontranslate_map_sg(struct device *hddev,
- struct scatterlist *sglist, int nelems, int dir)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sglist, sg, nelems, i) {
- BUG_ON(!sg_page(sg));
- sg->dma_address = sg_phys(sg);
- sg->dma_length = sg->length;
- }
- return nelems;
-}
-
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir, unsigned long attrs)
{
@@ -3878,8 +3872,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE);
- if (iommu_no_mapping(dev))
- return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
+ if (!iommu_need_mapping(dev))
+ return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
domain = get_valid_domain_for_dev(dev);
if (!domain)
@@ -3929,7 +3923,7 @@ static const struct dma_map_ops intel_dma_ops = {
.map_page = intel_map_page,
.unmap_page = intel_unmap_page,
.map_resource = intel_map_resource,
- .unmap_resource = intel_unmap_page,
+ .unmap_resource = intel_unmap_resource,
.dma_supported = dma_direct_supported,
};
@@ -4055,9 +4049,7 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
- if (dmar_map_gfx) {
- intel_iommu_gfx_mapped = 1;
- } else {
+ if (!dmar_map_gfx) {
drhd->ignored = 1;
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev)
@@ -4086,7 +4078,7 @@ static int init_iommu_hw(void)
iommu_disable_protect_mem_regions(iommu);
continue;
}
-
+
iommu_flush_write_buffer(iommu);
iommu_set_root_entry(iommu);
@@ -4896,6 +4888,9 @@ int __init intel_iommu_init(void)
goto out_free_reserved_range;
}
+ if (dmar_map_gfx)
+ intel_iommu_gfx_mapped = 1;
+
init_no_remapping_devices();
ret = init_dmars();
@@ -5065,35 +5060,139 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
domain_exit(to_dmar_domain(domain));
}
-static int intel_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+/*
+ * Check whether a @domain could be attached to the @dev through the
+ * aux-domain attach/detach APIs.
+ */
+static inline bool
+is_aux_domain(struct device *dev, struct iommu_domain *domain)
{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct intel_iommu *iommu;
- int addr_width;
- u8 bus, devfn;
+ struct device_domain_info *info = dev->archdata.iommu;
- if (device_is_rmrr_locked(dev)) {
- dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
- return -EPERM;
- }
+ return info && info->auxd_enabled &&
+ domain->type == IOMMU_DOMAIN_UNMANAGED;
+}
- /* normally dev is not mapped */
- if (unlikely(domain_context_mapped(dev))) {
- struct dmar_domain *old_domain;
+static void auxiliary_link_device(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
- old_domain = find_domain(dev);
- if (old_domain) {
- rcu_read_lock();
- dmar_remove_one_dev_info(dev);
- rcu_read_unlock();
+ assert_spin_locked(&device_domain_lock);
+ if (WARN_ON(!info))
+ return;
- if (!domain_type_is_vm_or_si(old_domain) &&
- list_empty(&old_domain->devices))
- domain_exit(old_domain);
+ domain->auxd_refcnt++;
+ list_add(&domain->auxd, &info->auxiliary_domains);
+}
+
+static void auxiliary_unlink_device(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
+
+ assert_spin_locked(&device_domain_lock);
+ if (WARN_ON(!info))
+ return;
+
+ list_del(&domain->auxd);
+ domain->auxd_refcnt--;
+
+ if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ intel_pasid_free_id(domain->default_pasid);
+}
+
+static int aux_domain_add_dev(struct dmar_domain *domain,
+ struct device *dev)
+{
+ int ret;
+ u8 bus, devfn;
+ unsigned long flags;
+ struct intel_iommu *iommu;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu)
+ return -ENODEV;
+
+ if (domain->default_pasid <= 0) {
+ int pasid;
+
+ pasid = intel_pasid_alloc_id(domain, PASID_MIN,
+ pci_max_pasids(to_pci_dev(dev)),
+ GFP_KERNEL);
+ if (pasid <= 0) {
+ pr_err("Can't allocate default pasid\n");
+ return -ENODEV;
}
+ domain->default_pasid = pasid;
}
+ spin_lock_irqsave(&device_domain_lock, flags);
+ /*
+ * iommu->lock must be held to attach domain to iommu and setup the
+ * pasid entry for second level translation.
+ */
+ spin_lock(&iommu->lock);
+ ret = domain_attach_iommu(domain, iommu);
+ if (ret)
+ goto attach_failed;
+
+ /* Setup the PASID entry for mediated devices: */
+ ret = intel_pasid_setup_second_level(iommu, domain, dev,
+ domain->default_pasid);
+ if (ret)
+ goto table_failed;
+ spin_unlock(&iommu->lock);
+
+ auxiliary_link_device(domain, dev);
+
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+
+table_failed:
+ domain_detach_iommu(domain, iommu);
+attach_failed:
+ spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+ if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ intel_pasid_free_id(domain->default_pasid);
+
+ return ret;
+}
+
+static void aux_domain_remove_dev(struct dmar_domain *domain,
+ struct device *dev)
+{
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+
+ if (!is_aux_domain(dev, &domain->domain))
+ return;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ iommu = info->iommu;
+
+ auxiliary_unlink_device(domain, dev);
+
+ spin_lock(&iommu->lock);
+ intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid);
+ domain_detach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static int prepare_domain_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct intel_iommu *iommu;
+ int addr_width;
+ u8 bus, devfn;
+
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
@@ -5126,7 +5225,58 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
dmar_domain->agaw--;
}
- return domain_add_dev_info(dmar_domain, dev);
+ return 0;
+}
+
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int ret;
+
+ if (device_is_rmrr_locked(dev)) {
+ dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
+ return -EPERM;
+ }
+
+ if (is_aux_domain(dev, domain))
+ return -EPERM;
+
+ /* normally dev is not mapped */
+ if (unlikely(domain_context_mapped(dev))) {
+ struct dmar_domain *old_domain;
+
+ old_domain = find_domain(dev);
+ if (old_domain) {
+ rcu_read_lock();
+ dmar_remove_one_dev_info(dev);
+ rcu_read_unlock();
+
+ if (!domain_type_is_vm_or_si(old_domain) &&
+ list_empty(&old_domain->devices))
+ domain_exit(old_domain);
+ }
+ }
+
+ ret = prepare_domain_attach_device(domain, dev);
+ if (ret)
+ return ret;
+
+ return domain_add_dev_info(to_dmar_domain(domain), dev);
+}
+
+static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ int ret;
+
+ if (!is_aux_domain(dev, domain))
+ return -EPERM;
+
+ ret = prepare_domain_attach_device(domain, dev);
+ if (ret)
+ return ret;
+
+ return aux_domain_add_dev(to_dmar_domain(domain), dev);
}
static void intel_iommu_detach_device(struct iommu_domain *domain,
@@ -5135,6 +5285,12 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
dmar_remove_one_dev_info(dev);
}
+static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ aux_domain_remove_dev(to_dmar_domain(domain), dev);
+}
+
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot)
@@ -5223,6 +5379,42 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys;
}
+static inline bool scalable_mode_support(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool ret = true;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!sm_supported(iommu)) {
+ ret = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static inline bool iommu_pasid_support(void)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ bool ret = true;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!pasid_supported(iommu)) {
+ ret = false;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static bool intel_iommu_capable(enum iommu_cap cap)
{
if (cap == IOMMU_CAP_CACHE_COHERENCY)
@@ -5307,8 +5499,7 @@ static void intel_iommu_put_resv_regions(struct device *dev,
}
}
-#ifdef CONFIG_INTEL_IOMMU_SVM
-int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
+int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
{
struct device_domain_info *info;
struct context_entry *context;
@@ -5317,7 +5508,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
u64 ctx_lo;
int ret;
- domain = get_valid_domain_for_dev(sdev->dev);
+ domain = get_valid_domain_for_dev(dev);
if (!domain)
return -EINVAL;
@@ -5325,7 +5516,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
spin_lock(&iommu->lock);
ret = -EINVAL;
- info = sdev->dev->archdata.iommu;
+ info = dev->archdata.iommu;
if (!info || !info->pasid_supported)
goto out;
@@ -5335,14 +5526,13 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
ctx_lo = context[0].lo;
- sdev->did = FLPT_DEFAULT_DID;
- sdev->sid = PCI_DEVID(info->bus, info->devfn);
-
if (!(ctx_lo & CONTEXT_PASIDE)) {
ctx_lo |= CONTEXT_PASIDE;
context[0].lo = ctx_lo;
wmb();
- iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
+ iommu->flush.flush_context(iommu,
+ domain->iommu_did[iommu->seq_id],
+ PCI_DEVID(info->bus, info->devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
}
@@ -5351,12 +5541,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
if (!info->pasid_enabled)
iommu_enable_dev_iotlb(info);
- if (info->ats_enabled) {
- sdev->dev_iotlb = 1;
- sdev->qdep = info->ats_qdep;
- if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
- sdev->qdep = 0;
- }
ret = 0;
out:
@@ -5366,6 +5550,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
return ret;
}
+#ifdef CONFIG_INTEL_IOMMU_SVM
struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
{
struct intel_iommu *iommu;
@@ -5387,12 +5572,142 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
}
#endif /* CONFIG_INTEL_IOMMU_SVM */
+static int intel_iommu_enable_auxd(struct device *dev)
+{
+ struct device_domain_info *info;
+ struct intel_iommu *iommu;
+ unsigned long flags;
+ u8 bus, devfn;
+ int ret;
+
+ iommu = device_to_iommu(dev, &bus, &devfn);
+ if (!iommu || dmar_disabled)
+ return -EINVAL;
+
+ if (!sm_supported(iommu) || !pasid_supported(iommu))
+ return -EINVAL;
+
+ ret = intel_iommu_enable_pasid(iommu, dev);
+ if (ret)
+ return -ENODEV;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ info->auxd_enabled = 1;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+}
+
+static int intel_iommu_disable_auxd(struct device *dev)
+{
+ struct device_domain_info *info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ info = dev->archdata.iommu;
+ if (!WARN_ON(!info))
+ info->auxd_enabled = 0;
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ return 0;
+}
+
+/*
+ * A PCI express designated vendor specific extended capability is defined
+ * in the section 3.7 of Intel scalable I/O virtualization technical spec
+ * for system software and tools to detect endpoint devices supporting the
+ * Intel scalable IO virtualization without host driver dependency.
+ *
+ * Returns the address of the matching extended capability structure within
+ * the device's PCI configuration space or 0 if the device does not support
+ * it.
+ */
+static int siov_find_pci_dvsec(struct pci_dev *pdev)
+{
+ int pos;
+ u16 vendor, id;
+
+ pos = pci_find_next_ext_capability(pdev, 0, 0x23);
+ while (pos) {
+ pci_read_config_word(pdev, pos + 4, &vendor);
+ pci_read_config_word(pdev, pos + 8, &id);
+ if (vendor == PCI_VENDOR_ID_INTEL && id == 5)
+ return pos;
+
+ pos = pci_find_next_ext_capability(pdev, pos, 0x23);
+ }
+
+ return 0;
+}
+
+static bool
+intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat)
+{
+ if (feat == IOMMU_DEV_FEAT_AUX) {
+ int ret;
+
+ if (!dev_is_pci(dev) || dmar_disabled ||
+ !scalable_mode_support() || !iommu_pasid_support())
+ return false;
+
+ ret = pci_pasid_features(to_pci_dev(dev));
+ if (ret < 0)
+ return false;
+
+ return !!siov_find_pci_dvsec(to_pci_dev(dev));
+ }
+
+ return false;
+}
+
+static int
+intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
+{
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return intel_iommu_enable_auxd(dev);
+
+ return -ENODEV;
+}
+
+static int
+intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
+{
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return intel_iommu_disable_auxd(dev);
+
+ return -ENODEV;
+}
+
+static bool
+intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
+{
+ struct device_domain_info *info = dev->archdata.iommu;
+
+ if (feat == IOMMU_DEV_FEAT_AUX)
+ return scalable_mode_support() && info && info->auxd_enabled;
+
+ return false;
+}
+
+static int
+intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+{
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+
+ return dmar_domain->default_pasid > 0 ?
+ dmar_domain->default_pasid : -EINVAL;
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
.domain_free = intel_iommu_domain_free,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
+ .aux_attach_dev = intel_iommu_aux_attach_device,
+ .aux_detach_dev = intel_iommu_aux_detach_device,
+ .aux_get_pasid = intel_iommu_aux_get_pasid,
.map = intel_iommu_map,
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
@@ -5401,6 +5716,10 @@ const struct iommu_ops intel_iommu_ops = {
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = intel_iommu_put_resv_regions,
.device_group = pci_device_group,
+ .dev_has_feat = intel_iommu_dev_has_feat,
+ .dev_feat_enabled = intel_iommu_dev_feat_enabled,
+ .dev_enable_feat = intel_iommu_dev_enable_feat,
+ .dev_disable_feat = intel_iommu_dev_disable_feat,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};