aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel-svm.c')
-rw-r--r--drivers/iommu/intel-svm.c241
1 files changed, 98 insertions, 143 deletions
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 887150907526..3a4b09ae8561 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -29,21 +29,10 @@
#include "intel-pasid.h"
-#define PASID_ENTRY_P BIT_ULL(0)
-#define PASID_ENTRY_FLPM_5LP BIT_ULL(9)
-#define PASID_ENTRY_SRE BIT_ULL(11)
-
static irqreturn_t prq_event_thread(int irq, void *d);
-struct pasid_state_entry {
- u64 val;
-};
-
int intel_svm_init(struct intel_iommu *iommu)
{
- struct page *pages;
- int order;
-
if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
!cap_fl1gp_support(iommu->cap))
return -EINVAL;
@@ -52,41 +41,6 @@ int intel_svm_init(struct intel_iommu *iommu)
!cap_5lp_support(iommu->cap))
return -EINVAL;
- /* Start at 2 because it's defined as 2^(1+PSS) */
- iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
-
- /* Eventually I'm promised we will get a multi-level PASID table
- * and it won't have to be physically contiguous. Until then,
- * limit the size because 8MiB contiguous allocations can be hard
- * to come by. The limit of 0x20000, which is 1MiB for each of
- * the PASID and PASID-state tables, is somewhat arbitrary. */
- if (iommu->pasid_max > 0x20000)
- iommu->pasid_max = 0x20000;
-
- order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
- if (ecap_dis(iommu->ecap)) {
- /* Just making it explicit... */
- BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (pages)
- iommu->pasid_state_table = page_address(pages);
- else
- pr_warn("IOMMU: %s: Failed to allocate PASID state table\n",
- iommu->name);
- }
-
- return 0;
-}
-
-int intel_svm_exit(struct intel_iommu *iommu)
-{
- int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
-
- if (iommu->pasid_state_table) {
- free_pages((unsigned long)iommu->pasid_state_table, order);
- iommu->pasid_state_table = NULL;
- }
-
return 0;
}
@@ -163,27 +117,40 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
* because that's the only option the hardware gives us. Despite
* the fact that they are actually only accessible through one. */
if (gl)
- desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | QI_EIOTLB_TYPE;
+ desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
+ QI_EIOTLB_DID(sdev->did) |
+ QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) |
+ QI_EIOTLB_TYPE;
else
- desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | QI_EIOTLB_TYPE;
- desc.high = 0;
+ desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
+ QI_EIOTLB_DID(sdev->did) |
+ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
+ QI_EIOTLB_TYPE;
+ desc.qw1 = 0;
} else {
int mask = ilog2(__roundup_pow_of_two(pages));
- desc.low = QI_EIOTLB_PASID(svm->pasid) | QI_EIOTLB_DID(sdev->did) |
- QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | QI_EIOTLB_TYPE;
- desc.high = QI_EIOTLB_ADDR(address) | QI_EIOTLB_GL(gl) |
- QI_EIOTLB_IH(ih) | QI_EIOTLB_AM(mask);
+ desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
+ QI_EIOTLB_DID(sdev->did) |
+ QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
+ QI_EIOTLB_TYPE;
+ desc.qw1 = QI_EIOTLB_ADDR(address) |
+ QI_EIOTLB_GL(gl) |
+ QI_EIOTLB_IH(ih) |
+ QI_EIOTLB_AM(mask);
}
+ desc.qw2 = 0;
+ desc.qw3 = 0;
qi_submit_sync(&desc, svm->iommu);
if (sdev->dev_iotlb) {
- desc.low = QI_DEV_EIOTLB_PASID(svm->pasid) | QI_DEV_EIOTLB_SID(sdev->sid) |
- QI_DEV_EIOTLB_QDEP(sdev->qdep) | QI_DEIOTLB_TYPE;
+ desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
+ QI_DEV_EIOTLB_SID(sdev->sid) |
+ QI_DEV_EIOTLB_QDEP(sdev->qdep) |
+ QI_DEIOTLB_TYPE;
if (pages == -1) {
- desc.high = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) | QI_DEV_EIOTLB_SIZE;
+ desc.qw1 = QI_DEV_EIOTLB_ADDR(-1ULL >> 1) |
+ QI_DEV_EIOTLB_SIZE;
} else if (pages > 1) {
/* The least significant zero bit indicates the size. So,
* for example, an "address" value of 0x12345f000 will
@@ -191,10 +158,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
unsigned long mask = __rounddown_pow_of_two(address ^ last);
- desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE;
+ desc.qw1 = QI_DEV_EIOTLB_ADDR((address & ~mask) |
+ (mask - 1)) | QI_DEV_EIOTLB_SIZE;
} else {
- desc.high = QI_DEV_EIOTLB_ADDR(address);
+ desc.qw1 = QI_DEV_EIOTLB_ADDR(address);
}
+ desc.qw2 = 0;
+ desc.qw3 = 0;
qi_submit_sync(&desc, svm->iommu);
}
}
@@ -204,25 +174,12 @@ static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
{
struct intel_svm_dev *sdev;
- /* Try deferred invalidate if available */
- if (svm->iommu->pasid_state_table &&
- !cmpxchg64(&svm->iommu->pasid_state_table[svm->pasid].val, 0, 1ULL << 63))
- return;
-
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl);
rcu_read_unlock();
}
-static void intel_change_pte(struct mmu_notifier *mn, struct mm_struct *mm,
- unsigned long address, pte_t pte)
-{
- struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
-
- intel_flush_svm_range(svm, address, 1, 1, 0);
-}
-
/* Pages have been freed at this point */
static void intel_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
@@ -234,17 +191,6 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0);
}
-
-static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *sdev, int pasid)
-{
- struct qi_desc desc;
-
- desc.high = 0;
- desc.low = QI_PC_TYPE | QI_PC_DID(sdev->did) | QI_PC_PASID_SEL | QI_PC_PASID(pasid);
-
- qi_submit_sync(&desc, svm->iommu);
-}
-
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
@@ -264,8 +210,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
*/
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) {
- intel_pasid_clear_entry(sdev->dev, svm->pasid);
- intel_flush_pasid_dev(svm, sdev, svm->pasid);
+ intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
}
rcu_read_unlock();
@@ -274,7 +219,6 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
static const struct mmu_notifier_ops intel_mmuops = {
.release = intel_mm_release,
- .change_pte = intel_change_pte,
.invalidate_range = intel_invalidate_range,
};
@@ -284,15 +228,13 @@ static LIST_HEAD(global_svm_list);
int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
{
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
- struct pasid_entry *entry;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
struct mm_struct *mm = NULL;
- u64 pasid_entry_val;
int pasid_max;
int ret;
- if (!iommu)
+ if (!iommu || dmar_disabled)
return -EINVAL;
if (dev_is_pci(dev)) {
@@ -397,24 +339,22 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
kfree(sdev);
goto out;
}
- pasid_entry_val = (u64)__pa(mm->pgd) | PASID_ENTRY_P;
- } else
- pasid_entry_val = (u64)__pa(init_mm.pgd) |
- PASID_ENTRY_P | PASID_ENTRY_SRE;
- if (cpu_feature_enabled(X86_FEATURE_LA57))
- pasid_entry_val |= PASID_ENTRY_FLPM_5LP;
-
- entry = intel_pasid_get_entry(dev, svm->pasid);
- entry->val = pasid_entry_val;
-
- wmb();
+ }
- /*
- * Flush PASID cache when a PASID table entry becomes
- * present.
- */
- if (cap_caching_mode(iommu->cap))
- intel_flush_pasid_dev(svm, sdev, svm->pasid);
+ spin_lock(&iommu->lock);
+ ret = intel_pasid_setup_first_level(iommu, dev,
+ mm ? mm->pgd : init_mm.pgd,
+ svm->pasid, FLPT_DEFAULT_DID,
+ mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
+ spin_unlock(&iommu->lock);
+ if (ret) {
+ if (mm)
+ mmu_notifier_unregister(&svm->notifier, mm);
+ intel_pasid_free_id(svm->pasid);
+ kfree(svm);
+ kfree(sdev);
+ goto out;
+ }
list_add_tail(&svm->list, &global_svm_list);
}
@@ -460,10 +400,9 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
* to use. We have a *shared* PASID table, because it's
* large and has to be physically contiguous. So it's
* hard to be as defensive as we might like. */
- intel_flush_pasid_dev(svm, sdev, svm->pasid);
+ intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
kfree_rcu(sdev, rcu);
- intel_pasid_clear_entry(dev, svm->pasid);
if (list_empty(&svm->devs)) {
intel_pasid_free_id(svm->pasid);
@@ -522,20 +461,31 @@ EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid);
/* Page request queue descriptor */
struct page_req_dsc {
- u64 srr:1;
- u64 bof:1;
- u64 pasid_present:1;
- u64 lpig:1;
- u64 pasid:20;
- u64 bus:8;
- u64 private:23;
- u64 prg_index:9;
- u64 rd_req:1;
- u64 wr_req:1;
- u64 exe_req:1;
- u64 priv_req:1;
- u64 devfn:8;
- u64 addr:52;
+ union {
+ struct {
+ u64 type:8;
+ u64 pasid_present:1;
+ u64 priv_data_present:1;
+ u64 rsvd:6;
+ u64 rid:16;
+ u64 pasid:20;
+ u64 exe_req:1;
+ u64 pm_req:1;
+ u64 rsvd2:10;
+ };
+ u64 qw_0;
+ };
+ union {
+ struct {
+ u64 rd_req:1;
+ u64 wr_req:1;
+ u64 lpig:1;
+ u64 prg_index:9;
+ u64 addr:52;
+ };
+ u64 qw_1;
+ };
+ u64 priv_data[2];
};
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
@@ -648,7 +598,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
/* Accounting for major/minor faults? */
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list) {
- if (sdev->sid == PCI_DEVID(req->bus, req->devfn))
+ if (sdev->sid == req->rid)
break;
}
/* Other devices can go away, but the drivers are not permitted
@@ -661,34 +611,39 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (sdev && sdev->ops && sdev->ops->fault_cb) {
int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
- (req->exe_req << 1) | (req->priv_req);
- sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, req->private, rwxp, result);
+ (req->exe_req << 1) | (req->pm_req);
+ sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr,
+ req->priv_data, rwxp, result);
}
/* We get here in the error case where the PASID lookup failed,
and these can be NULL. Do not use them below this point! */
sdev = NULL;
svm = NULL;
no_pasid:
- if (req->lpig) {
- /* Page Group Response */
- resp.low = QI_PGRP_PASID(req->pasid) |
- QI_PGRP_DID((req->bus << 8) | req->devfn) |
+ if (req->lpig || req->priv_data_present) {
+ /*
+ * Per VT-d spec. v3.0 ch7.7, system software must
+ * respond with page group response if private data
+ * is present (PDP) or last page in group (LPIG) bit
+ * is set. This is an additional VT-d feature beyond
+ * PCI ATS spec.
+ */
+ resp.qw0 = QI_PGRP_PASID(req->pasid) |
+ QI_PGRP_DID(req->rid) |
QI_PGRP_PASID_P(req->pasid_present) |
+ QI_PGRP_PDP(req->pasid_present) |
+ QI_PGRP_RESP_CODE(result) |
QI_PGRP_RESP_TYPE;
- resp.high = QI_PGRP_IDX(req->prg_index) |
- QI_PGRP_PRIV(req->private) | QI_PGRP_RESP_CODE(result);
-
- qi_submit_sync(&resp, iommu);
- } else if (req->srr) {
- /* Page Stream Response */
- resp.low = QI_PSTRM_IDX(req->prg_index) |
- QI_PSTRM_PRIV(req->private) | QI_PSTRM_BUS(req->bus) |
- QI_PSTRM_PASID(req->pasid) | QI_PSTRM_RESP_TYPE;
- resp.high = QI_PSTRM_ADDR(address) | QI_PSTRM_DEVFN(req->devfn) |
- QI_PSTRM_RESP_CODE(result);
-
- qi_submit_sync(&resp, iommu);
+ resp.qw1 = QI_PGRP_IDX(req->prg_index) |
+ QI_PGRP_LPIG(req->lpig);
+
+ if (req->priv_data_present)
+ memcpy(&resp.qw2, req->priv_data,
+ sizeof(req->priv_data));
}
+ resp.qw2 = 0;
+ resp.qw3 = 0;
+ qi_submit_sync(&resp, iommu);
head = (head + sizeof(*req)) & PRQ_RING_MASK;
}