aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/xe/xe_gt_pagefault.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_gt_pagefault.c')
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c29
1 files changed, 21 insertions, 8 deletions
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 2606cd396df5..c5ad9a0a89c2 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -14,10 +14,12 @@
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
#include "xe_gt.h"
+#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_migrate.h"
+#include "xe_svm.h"
#include "xe_trace_bo.h"
#include "xe_vm.h"
@@ -124,18 +126,22 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
return 0;
}
-static int handle_vma_pagefault(struct xe_tile *tile, struct pagefault *pf,
- struct xe_vma *vma)
+static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
+ bool atomic)
{
struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_tile *tile = gt_to_tile(gt);
struct drm_exec exec;
struct dma_fence *fence;
ktime_t end = 0;
int err;
- bool atomic;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024);
trace_xe_vma_pagefault(vma);
- atomic = access_is_atomic(pf->access_type);
/* Check if VMA is valid */
if (vma_is_valid(tile, vma) && !atomic)
@@ -202,10 +208,10 @@ static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid)
static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
{
struct xe_device *xe = gt_to_xe(gt);
- struct xe_tile *tile = gt_to_tile(gt);
struct xe_vm *vm;
struct xe_vma *vma = NULL;
int err;
+ bool atomic;
/* SW isn't expected to handle TRTT faults */
if (pf->trva_fault)
@@ -231,7 +237,13 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
goto unlock_vm;
}
- err = handle_vma_pagefault(tile, pf, vma);
+ atomic = access_is_atomic(pf->access_type);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ err = xe_svm_handle_pagefault(vm, vma, gt_to_tile(gt),
+ pf->page_addr, atomic);
+ else
+ err = handle_vma_pagefault(gt, vma, atomic);
unlock_vm:
if (!err)
@@ -263,12 +275,13 @@ static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
"\tFaultType: %d\n"
"\tAccessType: %d\n"
"\tFaultLevel: %d\n"
- "\tEngineClass: %d\n"
+ "\tEngineClass: %d %s\n"
"\tEngineInstance: %d\n",
pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
lower_32_bits(pf->page_addr),
pf->fault_type, pf->access_type, pf->fault_level,
- pf->engine_class, pf->engine_instance);
+ pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
+ pf->engine_instance);
}
#define PF_MSG_LEN_DW 4