aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/i915/i915_gem_gtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c614
1 files changed, 498 insertions, 116 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 40d446ba0b85..5eaa6893daaa 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -135,11 +135,12 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt)
{
- bool has_aliasing_ppgtt;
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
- has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
+ if (!dev_priv->info.has_aliasing_ppgtt)
+ return 0;
+
has_full_ppgtt = dev_priv->info.has_full_ppgtt;
has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
@@ -149,9 +150,6 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
}
- if (!has_aliasing_ppgtt)
- return 0;
-
/*
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
* execlists, the sole mechanism available to submit work.
@@ -180,7 +178,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0;
}
- if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) {
+ if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
if (has_full_48bit_ppgtt)
return 3;
@@ -188,7 +186,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 2;
}
- return has_aliasing_ppgtt ? 1 : 0;
+ return 1;
}
static int ppgtt_bind_vma(struct i915_vma *vma,
@@ -205,8 +203,6 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
return ret;
}
- vma->pages = vma->obj->mm.pages;
-
/* Currently applicable only to VLV */
pte_flags = 0;
if (vma->obj->gt_ro)
@@ -222,6 +218,30 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
+static int ppgtt_set_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->pages);
+
+ vma->pages = vma->obj->mm.pages;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
+static void clear_pages(struct i915_vma *vma)
+{
+ GEM_BUG_ON(!vma->pages);
+
+ if (vma->pages != vma->obj->mm.pages) {
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+
+ memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
+}
+
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
@@ -230,13 +250,13 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
switch (level) {
case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED_INDEX;
+ pte |= PPAT_UNCACHED;
break;
case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC_INDEX;
+ pte |= PPAT_DISPLAY_ELLC;
break;
default:
- pte |= PPAT_CACHED_INDEX;
+ pte |= PPAT_CACHED;
break;
}
@@ -249,9 +269,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
- pde |= PPAT_CACHED_PDE_INDEX;
+ pde |= PPAT_CACHED_PDE;
else
- pde |= PPAT_UNCACHED_INDEX;
+ pde |= PPAT_UNCACHED;
return pde;
}
@@ -481,10 +501,8 @@ static void fill_page_dma(struct i915_address_space *vm,
const u64 val)
{
u64 * const vaddr = kmap_atomic(p->page);
- int i;
- for (i = 0; i < 512; i++)
- vaddr[i] = val;
+ memset64(vaddr, val, PAGE_SIZE / sizeof(val));
kunmap_atomic(vaddr);
}
@@ -499,22 +517,63 @@ static void fill_page_dma_32(struct i915_address_space *vm,
static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
- struct page *page;
+ struct page *page = NULL;
dma_addr_t addr;
+ int order;
- page = alloc_page(gfp | __GFP_ZERO);
- if (unlikely(!page))
- return -ENOMEM;
+ /*
+ * In order to utilize 64K pages for an object with a size < 2M, we will
+ * need to support a 64K scratch page, given that every 16th entry for a
+ * page-table operating in 64K mode must point to a properly aligned 64K
+ * region, including any PTEs which happen to point to scratch.
+ *
+ * This is only relevant for the 48b PPGTT where we support
+ * huge-gtt-pages, see also i915_vma_insert().
+ *
+ * TODO: we should really consider write-protecting the scratch-page and
+ * sharing between ppgtt
+ */
+ if (i915_vm_is_48bit(vm) &&
+ HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
+ order = get_order(I915_GTT_PAGE_SIZE_64K);
+ page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
+ if (page) {
+ addr = dma_map_page(vm->dma, page, 0,
+ I915_GTT_PAGE_SIZE_64K,
+ PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(vm->dma, addr))) {
+ __free_pages(page, order);
+ page = NULL;
+ }
- addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(vm->dma, addr))) {
- __free_page(page);
- return -ENOMEM;
+ if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
+ dma_unmap_page(vm->dma, addr,
+ I915_GTT_PAGE_SIZE_64K,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_pages(page, order);
+ page = NULL;
+ }
+ }
+ }
+
+ if (!page) {
+ order = 0;
+ page = alloc_page(gfp | __GFP_ZERO);
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(vm->dma, addr))) {
+ __free_page(page);
+ return -ENOMEM;
+ }
}
vm->scratch_page.page = page;
vm->scratch_page.daddr = addr;
+ vm->scratch_page.order = order;
+
return 0;
}
@@ -522,8 +581,9 @@ static void cleanup_scratch_page(struct i915_address_space *vm)
{
struct i915_page_dma *p = &vm->scratch_page;
- dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- __free_page(p->page);
+ dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_pages(p->page, p->order);
}
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
@@ -991,6 +1051,105 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
cache_level);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+}
+
+static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
+ struct i915_page_directory_pointer **pdps,
+ struct sgt_dma *iter,
+ enum i915_cache_level cache_level)
+{
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ u64 start = vma->node.start;
+ dma_addr_t rem = iter->sg->length;
+
+ do {
+ struct gen8_insert_pte idx = gen8_insert_pte(start);
+ struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
+ struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
+ unsigned int page_size;
+ bool maybe_64K = false;
+ gen8_pte_t encode = pte_encode;
+ gen8_pte_t *vaddr;
+ u16 index, max;
+
+ if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
+ rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
+ index = idx.pde;
+ max = I915_PDES;
+ page_size = I915_GTT_PAGE_SIZE_2M;
+
+ encode |= GEN8_PDE_PS_2M;
+
+ vaddr = kmap_atomic_px(pd);
+ } else {
+ struct i915_page_table *pt = pd->page_table[idx.pde];
+
+ index = idx.pte;
+ max = GEN8_PTES;
+ page_size = I915_GTT_PAGE_SIZE;
+
+ if (!index &&
+ vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
+ IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (max - index) << PAGE_SHIFT))
+ maybe_64K = true;
+
+ vaddr = kmap_atomic_px(pt);
+ }
+
+ do {
+ GEM_BUG_ON(iter->sg->length < page_size);
+ vaddr[index++] = encode | iter->dma;
+
+ start += page_size;
+ iter->dma += page_size;
+ rem -= page_size;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg)
+ break;
+
+ rem = iter->sg->length;
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + rem;
+
+ if (maybe_64K && index < max &&
+ !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
+ (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
+ rem >= (max - index) << PAGE_SHIFT)))
+ maybe_64K = false;
+
+ if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
+ break;
+ }
+ } while (rem >= page_size && index < max);
+
+ kunmap_atomic(vaddr);
+
+ /*
+ * Is it safe to mark the 2M block as 64K? -- Either we have
+ * filled whole page-table with 64K entries, or filled part of
+ * it and have reached the end of the sg table and we have
+ * enough padding.
+ */
+ if (maybe_64K &&
+ (index == max ||
+ (i915_vm_has_scratch_64K(vma->vm) &&
+ !iter->sg && IS_ALIGNED(vma->node.start +
+ vma->node.size,
+ I915_GTT_PAGE_SIZE_2M)))) {
+ vaddr = kmap_atomic_px(pd);
+ vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
+ kunmap_atomic(vaddr);
+ page_size = I915_GTT_PAGE_SIZE_64K;
+ }
+
+ vma->page_sizes.gtt |= page_size;
+ } while (iter->sg);
}
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
@@ -1005,11 +1164,18 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
.max = iter.dma + iter.sg->length,
};
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
- struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
- while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
- &idx, cache_level))
- GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+ if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
+ gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
+ } else {
+ struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
+
+ while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
+ &iter, &idx, cache_level))
+ GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+ }
}
static void gen8_free_page_tables(struct i915_address_space *vm,
@@ -1168,19 +1334,22 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
unsigned int pde;
gen8_for_each_pde(pt, pd, start, length, pde) {
+ int count = gen8_pte_count(start, length);
+
if (pt == vm->scratch_pt) {
pt = alloc_pt(vm);
if (IS_ERR(pt))
goto unwind;
- gen8_initialize_pt(vm, pt);
+ if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
+ gen8_initialize_pt(vm, pt);
gen8_ppgtt_set_pde(vm, pd, pt, pde);
pd->used_pdes++;
GEM_BUG_ON(pd->used_pdes > I915_PDES);
}
- pt->used_ptes += gen8_pte_count(start, length);
+ pt->used_ptes += count;
}
return 0;
@@ -1451,6 +1620,8 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma;
+ ppgtt->base.set_pages = ppgtt_set_pages;
+ ppgtt->base.clear_pages = clear_pages;
ppgtt->debug_dump = gen8_dump_ppgtt;
return 0;
@@ -1725,6 +1896,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
}
} while (1);
kunmap_atomic(vaddr);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
static int gen6_alloc_va_range(struct i915_address_space *vm,
@@ -1893,6 +2066,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma;
+ ppgtt->base.set_pages = ppgtt_set_pages;
+ ppgtt->base.clear_pages = clear_pages;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->debug_dump = gen6_dump_ppgtt;
@@ -1960,6 +2135,23 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
else if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+
+ /*
+ * To support 64K PTEs we need to first enable the use of the
+ * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
+ * mmio, otherwise the page-walker will simply ignore the IPS bit. This
+ * shouldn't be needed after GEN10.
+ *
+ * 64K pages were first introduced from BDW+, although technically they
+ * only *work* from gen9+. For pre-BDW we instead have the option for
+ * 32K pages, but we don't currently have any support for it in our
+ * driver.
+ */
+ if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
+ INTEL_GEN(dev_priv) <= 10)
+ I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
+ I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
+ GAMW_ECO_ENABLE_64K_IPS_FIELD);
}
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
@@ -1969,7 +2161,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
- if (i915.enable_execlists)
+ if (i915_modparams.enable_execlists)
return 0;
if (!USES_PPGTT(dev_priv))
@@ -2404,12 +2596,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags;
- if (unlikely(!vma->pages)) {
- int ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
- }
-
/* Currently applicable only to VLV */
pte_flags = 0;
if (obj->gt_ro)
@@ -2419,6 +2605,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
intel_runtime_pm_put(i915);
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+
/*
* Without aliasing PPGTT there's no difference between
* GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
@@ -2446,12 +2634,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
u32 pte_flags;
int ret;
- if (unlikely(!vma->pages)) {
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
- }
-
/* Currently applicable only to VLV */
pte_flags = 0;
if (vma->obj->gt_ro)
@@ -2466,7 +2648,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
vma->node.start,
vma->size);
if (ret)
- goto err_pages;
+ return ret;
}
appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
@@ -2480,17 +2662,6 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
}
return 0;
-
-err_pages:
- if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
- if (vma->pages != vma->obj->mm.pages) {
- GEM_BUG_ON(!vma->pages);
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
- }
- return ret;
}
static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
@@ -2528,6 +2699,21 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
+static int ggtt_set_pages(struct i915_vma *vma)
+{
+ int ret;
+
+ GEM_BUG_ON(vma->pages);
+
+ ret = i915_get_ggtt_vma_pages(vma);
+ if (ret)
+ return ret;
+
+ vma->page_sizes = vma->obj->mm.page_sizes;
+
+ return 0;
+}
+
static void i915_gtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@@ -2816,41 +3002,209 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
-static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
+static struct intel_ppat_entry *
+__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
+{
+ struct intel_ppat_entry *entry = &ppat->entries[index];
+
+ GEM_BUG_ON(index >= ppat->max_entries);
+ GEM_BUG_ON(test_bit(index, ppat->used));
+
+ entry->ppat = ppat;
+ entry->value = value;
+ kref_init(&entry->ref);
+ set_bit(index, ppat->used);
+ set_bit(index, ppat->dirty);
+
+ return entry;
+}
+
+static void __free_ppat_entry(struct intel_ppat_entry *entry)
+{
+ struct intel_ppat *ppat = entry->ppat;
+ unsigned int index = entry - ppat->entries;
+
+ GEM_BUG_ON(index >= ppat->max_entries);
+ GEM_BUG_ON(!test_bit(index, ppat->used));
+
+ entry->value = ppat->clear_value;
+ clear_bit(index, ppat->used);
+ set_bit(index, ppat->dirty);
+}
+
+/**
+ * intel_ppat_get - get a usable PPAT entry
+ * @i915: i915 device instance
+ * @value: the PPAT value required by the caller
+ *
+ * The function tries to search if there is an existing PPAT entry which
+ * matches with the required value. If perfectly matched, the existing PPAT
+ * entry will be used. If only partially matched, it will try to check if
+ * there is any available PPAT index. If yes, it will allocate a new PPAT
+ * index for the required entry and update the HW. If not, the partially
+ * matched entry will be used.
+ */
+const struct intel_ppat_entry *
+intel_ppat_get(struct drm_i915_private *i915, u8 value)
+{
+ struct intel_ppat *ppat = &i915->ppat;
+ struct intel_ppat_entry *entry;
+ unsigned int scanned, best_score;
+ int i;
+
+ GEM_BUG_ON(!ppat->max_entries);
+
+ scanned = best_score = 0;
+ for_each_set_bit(i, ppat->used, ppat->max_entries) {
+ unsigned int score;
+
+ score = ppat->match(ppat->entries[i].value, value);
+ if (score > best_score) {
+ entry = &ppat->entries[i];
+ if (score == INTEL_PPAT_PERFECT_MATCH) {
+ kref_get(&entry->ref);
+ return entry;
+ }
+ best_score = score;
+ }
+ scanned++;
+ }
+
+ if (scanned == ppat->max_entries) {
+ if (!best_score)
+ return ERR_PTR(-ENOSPC);
+
+ kref_get(&entry->ref);
+ return entry;
+ }
+
+ i = find_first_zero_bit(ppat->used, ppat->max_entries);
+ entry = __alloc_ppat_entry(ppat, i, value);
+ ppat->update_hw(i915);
+ return entry;
+}
+
+static void release_ppat(struct kref *kref)
+{
+ struct intel_ppat_entry *entry =
+ container_of(kref, struct intel_ppat_entry, ref);
+ struct drm_i915_private *i915 = entry->ppat->i915;
+
+ __free_ppat_entry(entry);
+ entry->ppat->update_hw(i915);
+}
+
+/**
+ * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
+ * @entry: an intel PPAT entry
+ *
+ * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
+ * entry is dynamically allocated, its reference count will be decreased. Once
+ * the reference count becomes into zero, the PPAT index becomes free again.
+ */
+void intel_ppat_put(const struct intel_ppat_entry *entry)
+{
+ struct intel_ppat *ppat = entry->ppat;
+ unsigned int index = entry - ppat->entries;
+
+ GEM_BUG_ON(!ppat->max_entries);
+
+ kref_put(&ppat->entries[index].ref, release_ppat);
+}
+
+static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
{
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ int i;
+
+ for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
+ I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
+ clear_bit(i, ppat->dirty);
+ }
+}
+
+static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
+{
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ u64 pat = 0;
+ int i;
+
+ for (i = 0; i < ppat->max_entries; i++)
+ pat |= GEN8_PPAT(i, ppat->entries[i].value);
+
+ bitmap_clear(ppat->dirty, 0, ppat->max_entries);
+
+ I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
+}
+
+static unsigned int bdw_private_pat_match(u8 src, u8 dst)
+{
+ unsigned int score = 0;
+ enum {
+ AGE_MATCH = BIT(0),
+ TC_MATCH = BIT(1),
+ CA_MATCH = BIT(2),
+ };
+
+ /* Cache attribute has to be matched. */
+ if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
+ return 0;
+
+ score |= CA_MATCH;
+
+ if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
+ score |= TC_MATCH;
+
+ if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
+ score |= AGE_MATCH;
+
+ if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
+ return INTEL_PPAT_PERFECT_MATCH;
+
+ return score;
+}
+
+static unsigned int chv_private_pat_match(u8 src, u8 dst)
+{
+ return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
+ INTEL_PPAT_PERFECT_MATCH : 0;
+}
+
+static void cnl_setup_private_ppat(struct intel_ppat *ppat)
+{
+ ppat->max_entries = 8;
+ ppat->update_hw = cnl_private_pat_update_hw;
+ ppat->match = bdw_private_pat_match;
+ ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
+
/* XXX: spec is unclear if this is still needed for CNL+ */
- if (!USES_PPGTT(dev_priv)) {
- I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_UC);
+ if (!USES_PPGTT(ppat->i915)) {
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
return;
}
- I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
- I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
- I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
+ __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void bdw_setup_private_ppat(struct intel_ppat *ppat)
{
- u64 pat;
+ ppat->max_entries = 8;
+ ppat->update_hw = bdw_private_pat_update_hw;
+ ppat->match = bdw_private_pat_match;
+ ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
- pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
- GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
- GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
- GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
- GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
- GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
- GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
- GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-
- if (!USES_PPGTT(dev_priv))
+ if (!USES_PPGTT(ppat->i915)) {
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
@@ -2864,17 +3218,26 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
* So we can still hold onto all our assumptions wrt cpu
* clflushing on LLC machines.
*/
- pat = GEN8_PPAT(0, GEN8_PPAT_UC);
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
+ return;
+ }
- /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
- * write would work. */
- I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
- I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
+ __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
+ __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
+ __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
+ __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
+ __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
-static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
+static void chv_setup_private_ppat(struct intel_ppat *ppat)
{
- u64 pat;
+ ppat->max_entries = 8;
+ ppat->update_hw = bdw_private_pat_update_hw;
+ ppat->match = chv_private_pat_match;
+ ppat->clear_value = CHV_PPAT_SNOOP;
/*
* Map WB on BDW to snooped on CHV.
@@ -2894,17 +3257,15 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
* Which means we must set the snoop bit in PAT entry 0
* in order to keep the global status page working.
*/
- pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
- GEN8_PPAT(1, 0) |
- GEN8_PPAT(2, 0) |
- GEN8_PPAT(3, 0) |
- GEN8_PPAT(4, CHV_PPAT_SNOOP) |
- GEN8_PPAT(5, CHV_PPAT_SNOOP) |
- GEN8_PPAT(6, CHV_PPAT_SNOOP) |
- GEN8_PPAT(7, CHV_PPAT_SNOOP);
- I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
- I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
+ __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 1, 0);
+ __alloc_ppat_entry(ppat, 2, 0);
+ __alloc_ppat_entry(ppat, 3, 0);
+ __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
+ __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
}
static void gen6_gmch_remove(struct i915_address_space *vm)
@@ -2915,6 +3276,31 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
cleanup_scratch_page(vm);
}
+static void setup_private_pat(struct drm_i915_private *dev_priv)
+{
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ int i;
+
+ ppat->i915 = dev_priv;
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ cnl_setup_private_ppat(ppat);
+ else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
+ chv_setup_private_ppat(ppat);
+ else
+ bdw_setup_private_ppat(ppat);
+
+ GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
+
+ for_each_clear_bit(i, ppat->used, ppat->max_entries) {
+ ppat->entries[i].value = ppat->clear_value;
+ ppat->entries[i].ppat = ppat;
+ set_bit(i, ppat->dirty);
+ }
+
+ ppat->update_hw(dev_priv);
+}
+
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *dev_priv = ggtt->base.i915;
@@ -2947,17 +3333,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
}
ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
-
- if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(dev_priv);
- else
- bdw_setup_private_ppat(dev_priv);
-
ggtt->base.cleanup = gen6_gmch_remove;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.insert_page = gen8_ggtt_insert_page;
ggtt->base.clear_range = nop_clear_range;
if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
@@ -2975,6 +3355,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->invalidate = gen6_ggtt_invalidate;
+ setup_private_pat(dev_priv);
+
return ggtt_probe_common(ggtt, size);
}
@@ -3014,6 +3396,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.insert_entries = gen6_ggtt_insert_entries;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.cleanup = gen6_gmch_remove;
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3059,6 +3443,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.clear_range = i915_ggtt_clear_range;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
+ ggtt->base.set_pages = ggtt_set_pages;
+ ggtt->base.clear_pages = clear_pages;
ggtt->base.cleanup = i915_gmch_remove;
ggtt->invalidate = gmch_ggtt_invalidate;
@@ -3095,7 +3481,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* currently don't have any bits spare to pass in this upper
* restriction!
*/
- if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
+ if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
}
@@ -3208,8 +3594,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry_safe(obj, on,
- &dev_priv->mm.bound_list, global_link) {
+ list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
bool ggtt_bound = false;
struct i915_vma *vma;
@@ -3232,13 +3617,10 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.closed = false;
if (INTEL_GEN(dev_priv) >= 8) {
- if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(dev_priv);
- else
- bdw_setup_private_ppat(dev_priv);
+ struct intel_ppat *ppat = &dev_priv->ppat;
+ bitmap_set(ppat->dirty, 0, ppat->max_entries);
+ dev_priv->ppat.update_hw(dev_priv);
return;
}