aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h102
1 files changed, 33 insertions, 69 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index bd37df5dd6d0..bcc5d40a8d5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -73,6 +73,7 @@
#include "amdgpu_gmc.h"
#include "amdgpu_gfx.h"
#include "amdgpu_sdma.h"
+#include "amdgpu_nbio.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
#include "amdgpu_csa.h"
@@ -106,6 +107,8 @@ struct amdgpu_mgpu_info
uint32_t num_apu;
};
+#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
+
/*
* Modules parameters.
*/
@@ -122,6 +125,7 @@ extern int amdgpu_disp_priority;
extern int amdgpu_hw_i2c;
extern int amdgpu_pcie_gen2;
extern int amdgpu_msi;
+extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
@@ -135,6 +139,7 @@ extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
+extern int amdgpu_exp_hw_support;
extern int amdgpu_dc;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
@@ -146,11 +151,7 @@ extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
extern uint amdgpu_pp_feature_mask;
-extern int amdgpu_ngg;
-extern int amdgpu_prim_buf_per_se;
-extern int amdgpu_pos_buf_per_se;
-extern int amdgpu_cntl_sb_buf_per_se;
-extern int amdgpu_param_buf_per_se;
+extern uint amdgpu_force_long_training;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
@@ -167,6 +168,12 @@ extern int amdgpu_mcbp;
extern int amdgpu_discovery;
extern int amdgpu_mes;
extern int amdgpu_noretry;
+extern int amdgpu_force_asic_type;
+#ifdef CONFIG_HSA_AMD
+extern int sched_policy;
+#else
+static const int sched_policy = KFD_SCHED_POLICY_HWS;
+#endif
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -283,6 +290,9 @@ struct amdgpu_ip_block_version {
const struct amd_ip_funcs *funcs;
};
+#define HW_REV(_Major, _Minor, _Rev) \
+ ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
+
struct amdgpu_ip_block {
struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version;
@@ -425,7 +435,6 @@ struct amdgpu_fpriv {
};
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
-int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev);
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib);
@@ -477,7 +486,6 @@ struct amdgpu_cs_parser {
uint64_t bytes_moved_vis_threshold;
uint64_t bytes_moved;
uint64_t bytes_moved_vis;
- struct amdgpu_bo_list_entry *evictable;
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
@@ -624,6 +632,11 @@ struct amdgpu_fw_vram_usage {
u64 size;
struct amdgpu_bo *reserved_bo;
void *va;
+
+ /* Offset on the top of VRAM, used as c2p write buffer.
+ */
+ u64 mem_train_fb_loc;
+ bool mem_train_support;
};
/*
@@ -644,71 +657,14 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
-
-/*
- * amdgpu nbio functions
- *
- */
-struct nbio_hdp_flush_reg {
- u32 ref_and_mask_cp0;
- u32 ref_and_mask_cp1;
- u32 ref_and_mask_cp2;
- u32 ref_and_mask_cp3;
- u32 ref_and_mask_cp4;
- u32 ref_and_mask_cp5;
- u32 ref_and_mask_cp6;
- u32 ref_and_mask_cp7;
- u32 ref_and_mask_cp8;
- u32 ref_and_mask_cp9;
- u32 ref_and_mask_sdma0;
- u32 ref_and_mask_sdma1;
- u32 ref_and_mask_sdma2;
- u32 ref_and_mask_sdma3;
- u32 ref_and_mask_sdma4;
- u32 ref_and_mask_sdma5;
- u32 ref_and_mask_sdma6;
- u32 ref_and_mask_sdma7;
-};
-
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
};
-struct amdgpu_nbio_funcs {
- const struct nbio_hdp_flush_reg *hdp_flush_reg;
- u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
- u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
- u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
- u32 (*get_rev_id)(struct amdgpu_device *adev);
- void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
- void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
- u32 (*get_memsize)(struct amdgpu_device *adev);
- void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
- bool use_doorbell, int doorbell_index, int doorbell_size);
- void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
- int doorbell_index, int instance);
- void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
- bool enable);
- void (*ih_doorbell_range)(struct amdgpu_device *adev,
- bool use_doorbell, int doorbell_index);
- void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
- bool enable);
- void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
- bool enable);
- void (*get_clockgating_state)(struct amdgpu_device *adev,
- u32 *flags);
- void (*ih_control)(struct amdgpu_device *adev);
- void (*init_registers)(struct amdgpu_device *adev);
- void (*detect_hw_virt)(struct amdgpu_device *adev);
- void (*remap_hdp_registers)(struct amdgpu_device *adev);
-};
-
struct amdgpu_df_funcs {
void (*sw_init)(struct amdgpu_device *adev);
+ void (*sw_fini)(struct amdgpu_device *adev);
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
bool enable);
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
@@ -813,6 +769,7 @@ struct amdgpu_device {
uint8_t *bios;
uint32_t bios_size;
struct amdgpu_bo *stolen_vga_memory;
+ struct amdgpu_bo *discovery_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -921,6 +878,12 @@ struct amdgpu_device {
u32 cg_flags;
u32 pg_flags;
+ /* nbio */
+ struct amdgpu_nbio nbio;
+
+ /* mmhub */
+ struct amdgpu_mmhub mmhub;
+
/* gfx */
struct amdgpu_gfx gfx;
@@ -974,9 +937,7 @@ struct amdgpu_device {
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
- const struct amdgpu_nbio_funcs *nbio_funcs;
const struct amdgpu_df_funcs *df_funcs;
- const struct amdgpu_mmhub_funcs *mmhub_funcs;
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -1009,8 +970,6 @@ struct amdgpu_device {
int asic_reset_res;
struct work_struct xgmi_reset_work;
- bool in_baco_reset;
-
long gfx_timeout;
long sdma_timeout;
long video_timeout;
@@ -1018,6 +977,9 @@ struct amdgpu_device {
uint64_t unique_id;
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
+
+ /* device pstate */
+ int pstate;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1032,6 +994,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
void amdgpu_device_fini(struct amdgpu_device *adev);
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ uint32_t *buf, size_t size, bool write);
uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,