summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorderaadt <deraadt@openbsd.org>2018-04-20 16:09:34 +0000
committerderaadt <deraadt@openbsd.org>2018-04-20 16:09:34 +0000
commitf3eef2b6b031013a208dbde8fe0d553e7413bf02 (patch)
tree8e4c73e37f94d58b9a922161427155feca03cbae
parentsync (diff)
downloadwireguard-openbsd-f3eef2b6b031013a208dbde8fe0d553e7413bf02.tar.xz
wireguard-openbsd-f3eef2b6b031013a208dbde8fe0d553e7413bf02.zip
oops, snapshot tests not ready yet
-rw-r--r--gnu/llvm/lib/Target/X86/CMakeLists.txt28
-rw-r--r--gnu/llvm/lib/Target/X86/X86.h44
-rw-r--r--gnu/llvm/lib/Target/X86/X86TargetMachine.cpp277
-rw-r--r--sys/arch/amd64/amd64/efifb.c21
-rw-r--r--sys/arch/amd64/amd64/mainbus.c17
-rw-r--r--sys/arch/amd64/include/efifbvar.h3
-rw-r--r--sys/dev/pci/drm/drm_cache.c10
-rw-r--r--sys/dev/pci/drm/drm_cache.h13
-rw-r--r--sys/dev/pci/drm/drm_linux.c16
-rw-r--r--sys/dev/pci/drm/drm_linux.h432
-rw-r--r--sys/dev/pci/drm/drm_linux_rbtree.h42
-rw-r--r--sys/dev/pci/drm/drm_pciids.h131
-rw-r--r--sys/dev/pci/drm/files.drm62
-rw-r--r--sys/dev/pci/drm/i915/i915_drv.c11
-rw-r--r--sys/dev/pci/drm/i915/intel_i2c.c2
-rw-r--r--sys/dev/pci/drm/i915/intel_sprite.c2
-rw-r--r--sys/dev/pci/drm/radeon/ObjectID.h41
-rw-r--r--sys/dev/pci/drm/radeon/atom-bits.h1
-rw-r--r--sys/dev/pci/drm/radeon/atom-names.h1
-rw-r--r--sys/dev/pci/drm/radeon/atom-types.h1
-rw-r--r--sys/dev/pci/drm/radeon/atom.c33
-rw-r--r--sys/dev/pci/drm/radeon/atom.h3
-rw-r--r--sys/dev/pci/drm/radeon/atombios.h1176
-rw-r--r--sys/dev/pci/drm/radeon/atombios_crtc.c465
-rw-r--r--sys/dev/pci/drm/radeon/atombios_dp.c46
-rw-r--r--sys/dev/pci/drm/radeon/atombios_encoders.c506
-rw-r--r--sys/dev/pci/drm/radeon/atombios_i2c.c39
-rw-r--r--sys/dev/pci/drm/radeon/avivod.h1
-rw-r--r--sys/dev/pci/drm/radeon/cayman_blit_shaders.c61
-rw-r--r--sys/dev/pci/drm/radeon/cayman_blit_shaders.h1
-rw-r--r--sys/dev/pci/drm/radeon/evergreen.c3389
-rw-r--r--sys/dev/pci/drm/radeon/evergreen_blit_shaders.c61
-rw-r--r--sys/dev/pci/drm/radeon/evergreen_blit_shaders.h1
-rw-r--r--sys/dev/pci/drm/radeon/evergreen_cs.c1462
-rw-r--r--sys/dev/pci/drm/radeon/evergreen_hdmi.c546
-rw-r--r--sys/dev/pci/drm/radeon/evergreen_reg.h79
-rw-r--r--sys/dev/pci/drm/radeon/evergreend.h592
-rw-r--r--sys/dev/pci/drm/radeon/mkregtable.c40
-rw-r--r--sys/dev/pci/drm/radeon/ni.c1704
-rw-r--r--sys/dev/pci/drm/radeon/ni_reg.h45
-rw-r--r--sys/dev/pci/drm/radeon/nid.h663
-rw-r--r--sys/dev/pci/drm/radeon/r100.c581
-rw-r--r--sys/dev/pci/drm/radeon/r100_track.h5
-rw-r--r--sys/dev/pci/drm/radeon/r100d.h12
-rw-r--r--sys/dev/pci/drm/radeon/r200.c72
-rw-r--r--sys/dev/pci/drm/radeon/r300.c150
-rw-r--r--sys/dev/pci/drm/radeon/r300_reg.h1
-rw-r--r--sys/dev/pci/drm/radeon/r300d.h12
-rw-r--r--sys/dev/pci/drm/radeon/r420.c17
-rw-r--r--sys/dev/pci/drm/radeon/r420d.h1
-rw-r--r--sys/dev/pci/drm/radeon/r500_reg.h3
-rw-r--r--sys/dev/pci/drm/radeon/r520.c4
-rw-r--r--sys/dev/pci/drm/radeon/r520d.h1
-rw-r--r--sys/dev/pci/drm/radeon/r600.c2006
-rw-r--r--sys/dev/pci/drm/radeon/r600_blit_shaders.c6
-rw-r--r--sys/dev/pci/drm/radeon/r600_blit_shaders.h2
-rw-r--r--sys/dev/pci/drm/radeon/r600_cs.c498
-rw-r--r--sys/dev/pci/drm/radeon/r600_hdmi.c653
-rw-r--r--sys/dev/pci/drm/radeon/r600_reg.h7
-rw-r--r--sys/dev/pci/drm/radeon/r600d.h490
-rw-r--r--sys/dev/pci/drm/radeon/radeon.h1593
-rw-r--r--sys/dev/pci/drm/radeon/radeon_acpi.c151
-rw-r--r--sys/dev/pci/drm/radeon/radeon_acpi.h1
-rw-r--r--sys/dev/pci/drm/radeon/radeon_agp.c7
-rw-r--r--sys/dev/pci/drm/radeon/radeon_asic.c1643
-rw-r--r--sys/dev/pci/drm/radeon/radeon_asic.h624
-rw-r--r--sys/dev/pci/drm/radeon/radeon_atombios.c1441
-rw-r--r--sys/dev/pci/drm/radeon/radeon_benchmark.c49
-rw-r--r--sys/dev/pci/drm/radeon/radeon_bios.c218
-rw-r--r--sys/dev/pci/drm/radeon/radeon_clocks.c1
-rw-r--r--sys/dev/pci/drm/radeon/radeon_combios.c57
-rw-r--r--sys/dev/pci/drm/radeon/radeon_connectors.c834
-rw-r--r--sys/dev/pci/drm/radeon/radeon_cs.c848
-rw-r--r--sys/dev/pci/drm/radeon/radeon_cursor.c321
-rw-r--r--sys/dev/pci/drm/radeon/radeon_device.c804
-rw-r--r--sys/dev/pci/drm/radeon/radeon_devlist.h102
-rw-r--r--sys/dev/pci/drm/radeon/radeon_display.c952
-rw-r--r--sys/dev/pci/drm/radeon/radeon_encoders.c89
-rw-r--r--sys/dev/pci/drm/radeon/radeon_family.h9
-rw-r--r--sys/dev/pci/drm/radeon/radeon_fb.c191
-rw-r--r--sys/dev/pci/drm/radeon/radeon_fence.c771
-rw-r--r--sys/dev/pci/drm/radeon/radeon_gart.c1014
-rw-r--r--sys/dev/pci/drm/radeon/radeon_gem.c368
-rw-r--r--sys/dev/pci/drm/radeon/radeon_i2c.c73
-rw-r--r--sys/dev/pci/drm/radeon/radeon_irq_kms.c95
-rw-r--r--sys/dev/pci/drm/radeon/radeon_kms.c1058
-rw-r--r--sys/dev/pci/drm/radeon/radeon_legacy_crtc.c32
-rw-r--r--sys/dev/pci/drm/radeon/radeon_legacy_encoders.c12
-rw-r--r--sys/dev/pci/drm/radeon/radeon_legacy_tv.c2
-rw-r--r--sys/dev/pci/drm/radeon/radeon_mode.h324
-rw-r--r--sys/dev/pci/drm/radeon/radeon_object.c442
-rw-r--r--sys/dev/pci/drm/radeon/radeon_object.h51
-rw-r--r--sys/dev/pci/drm/radeon/radeon_pm.c1233
-rw-r--r--sys/dev/pci/drm/radeon/radeon_prime.c197
-rw-r--r--sys/dev/pci/drm/radeon/radeon_reg.h19
-rw-r--r--sys/dev/pci/drm/radeon/radeon_ring.c515
-rw-r--r--sys/dev/pci/drm/radeon/radeon_sa.c39
-rw-r--r--sys/dev/pci/drm/radeon/radeon_semaphore.c66
-rw-r--r--sys/dev/pci/drm/radeon/radeon_test.c226
-rw-r--r--sys/dev/pci/drm/radeon/radeon_trace.h126
-rw-r--r--sys/dev/pci/drm/radeon/radeon_trace_points.c2
-rw-r--r--sys/dev/pci/drm/radeon/radeon_ttm.c604
-rw-r--r--sys/dev/pci/drm/radeon/reg_srcs/cayman2
-rw-r--r--sys/dev/pci/drm/radeon/reg_srcs/evergreen2
-rw-r--r--sys/dev/pci/drm/radeon/rs100d.h1
-rw-r--r--sys/dev/pci/drm/radeon/rs400.c42
-rw-r--r--sys/dev/pci/drm/radeon/rs400d.h1
-rw-r--r--sys/dev/pci/drm/radeon/rs600.c166
-rw-r--r--sys/dev/pci/drm/radeon/rs600d.h1
-rw-r--r--sys/dev/pci/drm/radeon/rs690.c346
-rw-r--r--sys/dev/pci/drm/radeon/rs690d.h4
-rw-r--r--sys/dev/pci/drm/radeon/rv200d.h1
-rw-r--r--sys/dev/pci/drm/radeon/rv250d.h1
-rw-r--r--sys/dev/pci/drm/radeon/rv350d.h1
-rw-r--r--sys/dev/pci/drm/radeon/rv515.c248
-rw-r--r--sys/dev/pci/drm/radeon/rv515d.h12
-rw-r--r--sys/dev/pci/drm/radeon/rv770.c1036
-rw-r--r--sys/dev/pci/drm/radeon/rv770d.h353
-rw-r--r--sys/dev/pci/drm/radeon/si.c5128
-rw-r--r--sys/dev/pci/drm/radeon/si_blit_shaders.c5
-rw-r--r--sys/dev/pci/drm/radeon/si_blit_shaders.h1
-rw-r--r--sys/dev/pci/drm/radeon/si_reg.h1
-rw-r--r--sys/dev/pci/drm/radeon/sid.h927
-rw-r--r--sys/dev/pci/drm/radeon_drm.h47
-rw-r--r--sys/dev/pci/drm/ttm/ttm_agp_backend.c2
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo.c686
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_api.h120
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_driver.h245
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_manager.c17
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_util.c126
-rw-r--r--sys/dev/pci/drm/ttm/ttm_bo_vm.c526
-rw-r--r--sys/dev/pci/drm/ttm/ttm_execbuf_util.c197
-rw-r--r--sys/dev/pci/drm/ttm/ttm_execbuf_util.h41
-rw-r--r--sys/dev/pci/drm/ttm/ttm_memory.c42
-rw-r--r--sys/dev/pci/drm/ttm/ttm_memory.h6
-rw-r--r--sys/dev/pci/drm/ttm/ttm_module.h1
-rw-r--r--sys/dev/pci/drm/ttm/ttm_page_alloc.c235
-rw-r--r--sys/dev/pci/drm/ttm/ttm_page_alloc.h18
-rw-r--r--sys/dev/pci/drm/ttm/ttm_placement.h4
-rw-r--r--sys/dev/pci/drm/ttm/ttm_tt.c49
-rw-r--r--sys/dev/rasops/rasops.c101
-rw-r--r--sys/dev/rasops/rasops.h3
142 files changed, 12980 insertions, 31558 deletions
diff --git a/gnu/llvm/lib/Target/X86/CMakeLists.txt b/gnu/llvm/lib/Target/X86/CMakeLists.txt
index 2c52e128ddd..55949155da9 100644
--- a/gnu/llvm/lib/Target/X86/CMakeLists.txt
+++ b/gnu/llvm/lib/Target/X86/CMakeLists.txt
@@ -10,46 +10,22 @@ tablegen(LLVM X86GenDAGISel.inc -gen-dag-isel)
tablegen(LLVM X86GenFastISel.inc -gen-fast-isel)
tablegen(LLVM X86GenCallingConv.inc -gen-callingconv)
tablegen(LLVM X86GenSubtargetInfo.inc -gen-subtarget)
-tablegen(LLVM X86GenEVEX2VEXTables.inc -gen-x86-EVEX2VEX-tables)
-tablegen(LLVM X86GenRegisterBank.inc -gen-register-bank)
-tablegen(LLVM X86GenGlobalISel.inc -gen-global-isel)
-
-if (X86_GEN_FOLD_TABLES)
- tablegen(LLVM X86GenFoldTables.inc -gen-x86-fold-tables)
-endif()
-
add_public_tablegen_target(X86CommonTableGen)
set(sources
X86AsmPrinter.cpp
X86CallFrameOptimization.cpp
- X86CallLowering.cpp
- X86CmovConversion.cpp
- X86DomainReassignment.cpp
X86ExpandPseudo.cpp
X86FastISel.cpp
- X86FixupBWInsts.cpp
- X86FixupGadgets.cpp
- X86FixupLEAs.cpp
- X86FixupSetCC.cpp
X86FloatingPoint.cpp
X86FrameLowering.cpp
- X86InstructionSelector.cpp
X86ISelDAGToDAG.cpp
X86ISelLowering.cpp
- X86InterleavedAccess.cpp
- X86InstrFMA3Info.cpp
X86InstrInfo.cpp
- X86EvexToVex.cpp
- X86LegalizerInfo.cpp
X86MCInstLower.cpp
X86MachineFunctionInfo.cpp
- X86MacroFusion.cpp
- X86OptimizeLEAs.cpp
X86PadShortFunction.cpp
- X86RegisterBankInfo.cpp
X86RegisterInfo.cpp
- X86RetpolineThunks.cpp
X86SelectionDAGInfo.cpp
X86ShuffleDecodeConstantPool.cpp
X86Subtarget.cpp
@@ -57,9 +33,9 @@ set(sources
X86TargetObjectFile.cpp
X86TargetTransformInfo.cpp
X86VZeroUpper.cpp
- X86WinAllocaExpander.cpp
+ X86FixupLEAs.cpp
X86WinEHState.cpp
- X86CallingConv.cpp
+ X86OptimizeLEAs.cpp
)
add_llvm_target(X86CodeGen ${sources})
diff --git a/gnu/llvm/lib/Target/X86/X86.h b/gnu/llvm/lib/Target/X86/X86.h
index db7930a265a..01e65b89f48 100644
--- a/gnu/llvm/lib/Target/X86/X86.h
+++ b/gnu/llvm/lib/Target/X86/X86.h
@@ -21,11 +21,6 @@ namespace llvm {
class FunctionPass;
class ImmutablePass;
-class InstructionSelector;
-class ModulePass;
-class PassRegistry;
-class X86RegisterBankInfo;
-class X86Subtarget;
class X86TargetMachine;
/// This pass converts a legalized DAG into a X86-specific DAG, ready for
@@ -63,12 +58,6 @@ FunctionPass *createX86FixupLEAs();
/// recalculations.
FunctionPass *createX86OptimizeLEAs();
-/// Return a pass that transforms setcc + movzx pairs into xor + setcc.
-FunctionPass *createX86FixupSetCC();
-
-/// Return a pass that expands WinAlloca pseudo-instructions.
-FunctionPass *createX86WinAllocaExpander();
-
/// Return a pass that optimizes the code-size of x86 call sequences. This is
/// done by replacing esp-relative movs with pushes.
FunctionPass *createX86CallFrameOptimization();
@@ -83,39 +72,6 @@ FunctionPass *createX86WinEHStatePass();
/// must run after prologue/epilogue insertion and before lowering
/// the MachineInstr to MC.
FunctionPass *createX86ExpandPseudoPass();
-
-/// This pass converts X86 cmov instructions into branch when profitable.
-FunctionPass *createX86CmovConverterPass();
-
-/// Return a Machine IR pass that selectively replaces
-/// certain byte and word instructions by equivalent 32 bit instructions,
-/// in order to eliminate partial register usage, false dependences on
-/// the upper portions of registers, and to save code size.
-FunctionPass *createX86FixupBWInsts();
-
-/// Return a Machine IR pass that reassigns instruction chains from one domain
-/// to another, when profitable.
-FunctionPass *createX86DomainReassignmentPass();
-
-void initializeFixupBWInstPassPass(PassRegistry &);
-
-/// Return a Machine Function pass that attempts to replace
-/// ROP friendly instructions with alternatives.
-FunctionPass *createX86FixupGadgetsPass();
-
-/// This pass replaces EVEX encoded of AVX-512 instructiosn by VEX
-/// encoding when possible in order to reduce code size.
-FunctionPass *createX86EvexToVexInsts();
-
-/// This pass creates the thunks for the retpoline feature.
-FunctionPass *createX86RetpolineThunksPass();
-
-InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM,
- X86Subtarget &,
- X86RegisterBankInfo &);
-
-void initializeEvexToVexInstPassPass(PassRegistry &);
-
} // End llvm namespace
#endif
diff --git a/gnu/llvm/lib/Target/X86/X86TargetMachine.cpp b/gnu/llvm/lib/Target/X86/X86TargetMachine.cpp
index 1d40c347efa..0e7e4c0c84a 100644
--- a/gnu/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/gnu/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -12,42 +12,16 @@
//===----------------------------------------------------------------------===//
#include "X86TargetMachine.h"
-#include "MCTargetDesc/X86MCTargetDesc.h"
#include "X86.h"
-#include "X86CallLowering.h"
-#include "X86LegalizerInfo.h"
-#include "X86MacroFusion.h"
-#include "X86Subtarget.h"
#include "X86TargetObjectFile.h"
#include "X86TargetTransformInfo.h"
-#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/CodeGen/ExecutionDepsFix.h"
-#include "llvm/CodeGen/GlobalISel/CallLowering.h"
-#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
-#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
-#include "llvm/CodeGen/GlobalISel/Legalizer.h"
-#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
-#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/TargetLoweringObjectFile.h"
-#include "llvm/CodeGen/TargetPassConfig.h"
-#include "llvm/IR/Attributes.h"
-#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
-#include "llvm/Pass.h"
-#include "llvm/Support/CodeGen.h"
+#include "llvm/IR/LegacyPassManager.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetOptions.h"
-#include <memory>
-#include <string>
-
using namespace llvm;
static cl::opt<bool> EnableMachineCombinerPass("x86-machine-combiner",
@@ -55,54 +29,33 @@ static cl::opt<bool> EnableMachineCombinerPass("x86-machine-combiner",
cl::init(true), cl::Hidden);
namespace llvm {
-
void initializeWinEHStatePassPass(PassRegistry &);
-void initializeFixupLEAPassPass(PassRegistry &);
-void initializeX86CallFrameOptimizationPass(PassRegistry &);
-void initializeX86CmovConverterPassPass(PassRegistry &);
-void initializeX86ExecutionDepsFixPass(PassRegistry &);
-void initializeX86DomainReassignmentPass(PassRegistry &);
-
-} // end namespace llvm
+}
extern "C" void LLVMInitializeX86Target() {
// Register the target.
- RegisterTargetMachine<X86TargetMachine> X(getTheX86_32Target());
- RegisterTargetMachine<X86TargetMachine> Y(getTheX86_64Target());
+ RegisterTargetMachine<X86TargetMachine> X(TheX86_32Target);
+ RegisterTargetMachine<X86TargetMachine> Y(TheX86_64Target);
PassRegistry &PR = *PassRegistry::getPassRegistry();
- initializeGlobalISel(PR);
initializeWinEHStatePassPass(PR);
- initializeFixupBWInstPassPass(PR);
- initializeEvexToVexInstPassPass(PR);
- initializeFixupLEAPassPass(PR);
- initializeX86CallFrameOptimizationPass(PR);
- initializeX86CmovConverterPassPass(PR);
- initializeX86ExecutionDepsFixPass(PR);
- initializeX86DomainReassignmentPass(PR);
}
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
if (TT.isOSBinFormatMachO()) {
if (TT.getArch() == Triple::x86_64)
- return llvm::make_unique<X86_64MachoTargetObjectFile>();
- return llvm::make_unique<TargetLoweringObjectFileMachO>();
+ return make_unique<X86_64MachoTargetObjectFile>();
+ return make_unique<TargetLoweringObjectFileMachO>();
}
- if (TT.isOSFreeBSD())
- return llvm::make_unique<X86FreeBSDTargetObjectFile>();
- if (TT.isOSLinux() || TT.isOSNaCl() || TT.isOSIAMCU())
- return llvm::make_unique<X86LinuxNaClTargetObjectFile>();
- if (TT.isOSSolaris())
- return llvm::make_unique<X86SolarisTargetObjectFile>();
- if (TT.isOSFuchsia())
- return llvm::make_unique<X86FuchsiaTargetObjectFile>();
+ if (TT.isOSLinux() || TT.isOSNaCl())
+ return make_unique<X86LinuxNaClTargetObjectFile>();
if (TT.isOSBinFormatELF())
- return llvm::make_unique<X86ELFTargetObjectFile>();
+ return make_unique<X86ELFTargetObjectFile>();
if (TT.isKnownWindowsMSVCEnvironment() || TT.isWindowsCoreCLREnvironment())
- return llvm::make_unique<X86WindowsTargetObjectFile>();
+ return make_unique<X86WindowsTargetObjectFile>();
if (TT.isOSBinFormatCOFF())
- return llvm::make_unique<TargetLoweringObjectFileCOFF>();
+ return make_unique<TargetLoweringObjectFileCOFF>();
llvm_unreachable("unknown subtarget type");
}
@@ -120,22 +73,17 @@ static std::string computeDataLayout(const Triple &TT) {
// Some ABIs align 64 bit integers and doubles to 64 bits, others to 32.
if (TT.isArch64Bit() || TT.isOSWindows() || TT.isOSNaCl())
Ret += "-i64:64";
- else if (TT.isOSIAMCU())
- Ret += "-i64:32-f64:32";
else
Ret += "-f64:32:64";
// Some ABIs align long double to 128 bits, others to 32.
- if (TT.isOSNaCl() || TT.isOSIAMCU())
+ if (TT.isOSNaCl())
; // No f80
else if (TT.isArch64Bit() || TT.isOSDarwin())
Ret += "-f80:128";
else
Ret += "-f80:32";
- if (TT.isOSIAMCU())
- Ret += "-f128:32";
-
// The registers can hold 8, 16, 32 or, in x86-64, 64 bits.
if (TT.isArch64Bit())
Ret += "-n8:16:32:64";
@@ -143,7 +91,7 @@ static std::string computeDataLayout(const Triple &TT) {
Ret += "-n8:16:32";
// The stack is aligned to 32 bits on some ABIs and 128 bits on others.
- if ((!TT.isArch64Bit() && TT.isOSWindows()) || TT.isOSIAMCU())
+ if (!TT.isArch64Bit() && TT.isOSWindows())
Ret += "-a:0:32-S32";
else
Ret += "-S128";
@@ -151,97 +99,50 @@ static std::string computeDataLayout(const Triple &TT) {
return Ret;
}
-static Reloc::Model getEffectiveRelocModel(const Triple &TT,
- Optional<Reloc::Model> RM) {
- bool is64Bit = TT.getArch() == Triple::x86_64;
- if (!RM.hasValue()) {
- // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode.
- // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we
- // use static relocation model by default.
- if (TT.isOSDarwin()) {
- if (is64Bit)
- return Reloc::PIC_;
- return Reloc::DynamicNoPIC;
- }
- if (TT.isOSWindows() && is64Bit)
- return Reloc::PIC_;
- return Reloc::Static;
- }
-
- // ELF and X86-64 don't have a distinct DynamicNoPIC model. DynamicNoPIC
- // is defined as a model for code which may be used in static or dynamic
- // executables but not necessarily a shared library. On X86-32 we just
- // compile in -static mode, in x86-64 we use PIC.
- if (*RM == Reloc::DynamicNoPIC) {
- if (is64Bit)
- return Reloc::PIC_;
- if (!TT.isOSDarwin())
- return Reloc::Static;
- }
-
- // If we are on Darwin, disallow static relocation model in X86-64 mode, since
- // the Mach-O file format doesn't support it.
- if (*RM == Reloc::Static && TT.isOSDarwin() && is64Bit)
- return Reloc::PIC_;
-
- return *RM;
-}
-
-static CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM,
- bool JIT, bool Is64Bit) {
- if (CM)
- return *CM;
- if (JIT)
- return Is64Bit ? CodeModel::Large : CodeModel::Small;
- return CodeModel::Small;
-}
-
-/// Create an X86 target.
+/// X86TargetMachine ctor - Create an X86 target.
///
X86TargetMachine::X86TargetMachine(const Target &T, const Triple &TT,
StringRef CPU, StringRef FS,
const TargetOptions &Options,
- Optional<Reloc::Model> RM,
- Optional<CodeModel::Model> CM,
- CodeGenOpt::Level OL, bool JIT)
- : LLVMTargetMachine(
- T, computeDataLayout(TT), TT, CPU, FS, Options,
- getEffectiveRelocModel(TT, RM),
- getEffectiveCodeModel(CM, JIT, TT.getArch() == Triple::x86_64), OL),
- TLOF(createTLOF(getTargetTriple())) {
+ Reloc::Model RM, CodeModel::Model CM,
+ CodeGenOpt::Level OL)
+ : LLVMTargetMachine(T, computeDataLayout(TT), TT, CPU, FS, Options, RM, CM,
+ OL),
+ TLOF(createTLOF(getTargetTriple())),
+ Subtarget(TT, CPU, FS, *this, Options.StackAlignmentOverride) {
// Windows stack unwinder gets confused when execution flow "falls through"
// after a call to 'noreturn' function.
// To prevent that, we emit a trap for 'unreachable' IR instructions.
// (which on X86, happens to be the 'ud2' instruction)
- // On PS4, the "return address" of a 'noreturn' call must still be within
- // the calling function, and TrapUnreachable is an easy way to get that.
- // The check here for 64-bit windows is a bit icky, but as we're unlikely
- // to ever want to mix 32 and 64-bit windows code in a single module
- // this should be fine.
- if ((TT.isOSWindows() && TT.getArch() == Triple::x86_64) || TT.isPS4())
+ if (Subtarget.isTargetWin64())
this->Options.TrapUnreachable = true;
+ // By default (and when -ffast-math is on), enable estimate codegen for
+ // everything except scalar division. By default, use 1 refinement step for
+ // all operations. Defaults may be overridden by using command-line options.
+ // Scalar division estimates are disabled because they break too much
+ // real-world code. These defaults match GCC behavior.
+ this->Options.Reciprocals.setDefaults("sqrtf", true, 1);
+ this->Options.Reciprocals.setDefaults("divf", false, 1);
+ this->Options.Reciprocals.setDefaults("vec-sqrtf", true, 1);
+ this->Options.Reciprocals.setDefaults("vec-divf", true, 1);
+
initAsmInfo();
}
-X86TargetMachine::~X86TargetMachine() = default;
+X86TargetMachine::~X86TargetMachine() {}
const X86Subtarget *
X86TargetMachine::getSubtargetImpl(const Function &F) const {
Attribute CPUAttr = F.getFnAttribute("target-cpu");
Attribute FSAttr = F.getFnAttribute("target-features");
- StringRef CPU = !CPUAttr.hasAttribute(Attribute::None)
- ? CPUAttr.getValueAsString()
- : (StringRef)TargetCPU;
- StringRef FS = !FSAttr.hasAttribute(Attribute::None)
- ? FSAttr.getValueAsString()
- : (StringRef)TargetFS;
-
- SmallString<512> Key;
- Key.reserve(CPU.size() + FS.size());
- Key += CPU;
- Key += FS;
+ std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
+ ? CPUAttr.getValueAsString().str()
+ : TargetCPU;
+ std::string FS = !FSAttr.hasAttribute(Attribute::None)
+ ? FSAttr.getValueAsString().str()
+ : TargetFS;
// FIXME: This is related to the code below to reset the target options,
// we need to know whether or not the soft float flag is set on the
@@ -249,15 +150,14 @@ X86TargetMachine::getSubtargetImpl(const Function &F) const {
// it as a key for the subtarget since that can be the only difference
// between two functions.
bool SoftFloat =
+ F.hasFnAttribute("use-soft-float") &&
F.getFnAttribute("use-soft-float").getValueAsString() == "true";
// If the soft float attribute is set on the function turn on the soft float
// subtarget feature.
if (SoftFloat)
- Key += FS.empty() ? "+soft-float" : ",+soft-float";
-
- FS = Key.substr(CPU.size());
+ FS += FS.empty() ? "+soft-float" : ",+soft-float";
- auto &I = SubtargetMap[Key];
+ auto &I = SubtargetMap[CPU + FS];
if (!I) {
// This needs to be done before we create a new subtarget since any
// creation will depend on the TM and the code generation flags on the
@@ -281,81 +181,47 @@ UseVZeroUpper("x86-use-vzeroupper", cl::Hidden,
// X86 TTI query.
//===----------------------------------------------------------------------===//
-TargetTransformInfo
-X86TargetMachine::getTargetTransformInfo(const Function &F) {
- return TargetTransformInfo(X86TTIImpl(this, F));
+TargetIRAnalysis X86TargetMachine::getTargetIRAnalysis() {
+ return TargetIRAnalysis([this](const Function &F) {
+ return TargetTransformInfo(X86TTIImpl(this, F));
+ });
}
+
//===----------------------------------------------------------------------===//
// Pass Pipeline Configuration
//===----------------------------------------------------------------------===//
namespace {
-
/// X86 Code Generator Pass Configuration Options.
class X86PassConfig : public TargetPassConfig {
public:
- X86PassConfig(X86TargetMachine &TM, PassManagerBase &PM)
+ X86PassConfig(X86TargetMachine *TM, PassManagerBase &PM)
: TargetPassConfig(TM, PM) {}
X86TargetMachine &getX86TargetMachine() const {
return getTM<X86TargetMachine>();
}
- ScheduleDAGInstrs *
- createMachineScheduler(MachineSchedContext *C) const override {
- ScheduleDAGMILive *DAG = createGenericSchedLive(C);
- DAG->addMutation(createX86MacroFusionDAGMutation());
- return DAG;
- }
-
void addIRPasses() override;
bool addInstSelector() override;
- bool addIRTranslator() override;
- bool addLegalizeMachineIR() override;
- bool addRegBankSelect() override;
- bool addGlobalInstructionSelect() override;
bool addILPOpts() override;
bool addPreISel() override;
- void addMachineSSAOptimization() override;
void addPreRegAlloc() override;
void addPostRegAlloc() override;
void addPreEmitPass() override;
- void addPreEmitPass2() override;
void addPreSched2() override;
};
-
-class X86ExecutionDepsFix : public ExecutionDepsFix {
-public:
- static char ID;
- X86ExecutionDepsFix() : ExecutionDepsFix(ID, X86::VR128XRegClass) {}
- StringRef getPassName() const override {
- return "X86 Execution Dependency Fix";
- }
-};
-char X86ExecutionDepsFix::ID;
-
-} // end anonymous namespace
-
-INITIALIZE_PASS(X86ExecutionDepsFix, "x86-execution-deps-fix",
- "X86 Execution Dependency Fix", false, false)
+} // namespace
TargetPassConfig *X86TargetMachine::createPassConfig(PassManagerBase &PM) {
- return new X86PassConfig(*this, PM);
+ return new X86PassConfig(this, PM);
}
void X86PassConfig::addIRPasses() {
- addPass(createAtomicExpandPass());
+ addPass(createAtomicExpandPass(&getX86TargetMachine()));
TargetPassConfig::addIRPasses();
-
- if (TM->getOptLevel() != CodeGenOpt::None)
- addPass(createInterleavedAccessPass());
-
- // Add passes that handle indirect branch removal and insertion of a retpoline
- // thunk. These will be a no-op unless a function subtarget has the retpoline
- // feature enabled.
- addPass(createIndirectBrExpandPass());
}
bool X86PassConfig::addInstSelector() {
@@ -368,26 +234,7 @@ bool X86PassConfig::addInstSelector() {
addPass(createCleanupLocalDynamicTLSPass());
addPass(createX86GlobalBaseRegPass());
- return false;
-}
-
-bool X86PassConfig::addIRTranslator() {
- addPass(new IRTranslator());
- return false;
-}
-bool X86PassConfig::addLegalizeMachineIR() {
- addPass(new Legalizer());
- return false;
-}
-
-bool X86PassConfig::addRegBankSelect() {
- addPass(new RegBankSelect());
- return false;
-}
-
-bool X86PassConfig::addGlobalInstructionSelect() {
- addPass(new InstructionSelect());
return false;
}
@@ -395,7 +242,6 @@ bool X86PassConfig::addILPOpts() {
addPass(&EarlyIfConverterID);
if (EnableMachineCombinerPass)
addPass(&MachineCombinerID);
- addPass(createX86CmovConverterPass());
return true;
}
@@ -408,18 +254,10 @@ bool X86PassConfig::addPreISel() {
}
void X86PassConfig::addPreRegAlloc() {
- if (getOptLevel() != CodeGenOpt::None) {
- addPass(&LiveRangeShrinkID);
- addPass(createX86FixupSetCC());
+ if (getOptLevel() != CodeGenOpt::None)
addPass(createX86OptimizeLEAs());
- addPass(createX86CallFrameOptimization());
- }
- addPass(createX86WinAllocaExpander());
-}
-void X86PassConfig::addMachineSSAOptimization() {
- addPass(createX86DomainReassignmentPass());
- TargetPassConfig::addMachineSSAOptimization();
+ addPass(createX86CallFrameOptimization());
}
void X86PassConfig::addPostRegAlloc() {
@@ -430,20 +268,13 @@ void X86PassConfig::addPreSched2() { addPass(createX86ExpandPseudoPass()); }
void X86PassConfig::addPreEmitPass() {
if (getOptLevel() != CodeGenOpt::None)
- addPass(new X86ExecutionDepsFix());
+ addPass(createExecutionDependencyFixPass(&X86::VR128RegClass));
if (UseVZeroUpper)
addPass(createX86IssueVZeroUpperPass());
if (getOptLevel() != CodeGenOpt::None) {
- addPass(createX86FixupBWInsts());
addPass(createX86PadShortFunctions());
addPass(createX86FixupLEAs());
- addPass(createX86EvexToVexInsts());
}
- addPass(createX86FixupGadgetsPass());
-}
-
-void X86PassConfig::addPreEmitPass2() {
- addPass(createX86RetpolineThunksPass());
}
diff --git a/sys/arch/amd64/amd64/efifb.c b/sys/arch/amd64/amd64/efifb.c
index 5cf95af7e62..8cbb1f501d9 100644
--- a/sys/arch/amd64/amd64/efifb.c
+++ b/sys/arch/amd64/amd64/efifb.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: efifb.c,v 1.13 2018/04/20 16:06:04 deraadt Exp $ */
+/* $OpenBSD: efifb.c,v 1.14 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright (c) 2015 YASUOKA Masahiko <yasuoka@yasuoka.net>
@@ -101,7 +101,6 @@ int efifb_show_screen(void *, void *, int, void (*cb) (void *, int, int),
void *);
int efifb_list_font(void *, struct wsdisplay_font *);
int efifb_load_font(void *, void *, struct wsdisplay_font *);
-void efifb_scrollback(void *, void *, int lines);
void efifb_efiinfo_init(struct efifb *);
void efifb_cnattach_common(void);
@@ -134,8 +133,7 @@ struct wsdisplay_accessops efifb_accessops = {
.free_screen = efifb_free_screen,
.show_screen = efifb_show_screen,
.load_font = efifb_load_font,
- .list_font = efifb_list_font,
- .scrollback = efifb_scrollback,
+ .list_font = efifb_list_font
};
struct cfdriver efifb_cd = {
@@ -401,15 +399,6 @@ efifb_list_font(void *v, struct wsdisplay_font *font)
return (rasops_list_font(ri, font));
}
-void
-efifb_scrollback(void *v, void *cookie, int lines)
-{
- struct efifb_softc *sc = v;
- struct rasops_info *ri = &sc->sc_fb->rinfo;
-
- rasops_scrollback(ri, cookie, lines);
-}
-
int
efifb_cnattach(void)
{
@@ -495,12 +484,6 @@ efifb_cndetach(void)
efifb_console.detached = 1;
}
-void
-efifb_cnreattach(void)
-{
- efifb_console.detached = 0;
-}
-
int
efifb_cb_cnattach(void)
{
diff --git a/sys/arch/amd64/amd64/mainbus.c b/sys/arch/amd64/amd64/mainbus.c
index d26cf4c24de..67b0021910f 100644
--- a/sys/arch/amd64/amd64/mainbus.c
+++ b/sys/arch/amd64/amd64/mainbus.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mainbus.c,v 1.41 2018/04/20 16:06:04 deraadt Exp $ */
+/* $OpenBSD: mainbus.c,v 1.42 2018/04/20 16:09:36 deraadt Exp $ */
/* $NetBSD: mainbus.c,v 1.1 2003/04/26 18:39:29 fvdl Exp $ */
/*
@@ -261,21 +261,6 @@ mainbus_attach(struct device *parent, struct device *self, void *aux)
#endif
}
-void
-mainbus_efifb_reattach(void)
-{
-#if NEFIFB > 0
- union mainbus_attach_args mba;
- struct device *self = device_mainbus();
-
- if (bios_efiinfo != NULL || efifb_cb_found()) {
- efifb_cnreattach();
- mba.mba_eaa.eaa_name = "efifb";
- config_found(self, &mba, mainbus_print);
- }
-#endif
-}
-
int
mainbus_print(void *aux, const char *pnp)
{
diff --git a/sys/arch/amd64/include/efifbvar.h b/sys/arch/amd64/include/efifbvar.h
index 26a5fe774e3..bf0509e646b 100644
--- a/sys/arch/amd64/include/efifbvar.h
+++ b/sys/arch/amd64/include/efifbvar.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: efifbvar.h,v 1.5 2018/04/20 16:06:04 deraadt Exp $ */
+/* $OpenBSD: efifbvar.h,v 1.6 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright (c) 2015 YASUOKA Masahiko <yasuoka@yasuoka.net>
@@ -28,7 +28,6 @@ struct pci_attach_args;
int efifb_cnattach(void);
int efifb_is_console(struct pci_attach_args *);
void efifb_cndetach(void);
-void efifb_cnreattach(void);
int efifb_cb_found(void);
int efifb_cb_cnattach(void);
diff --git a/sys/dev/pci/drm/drm_cache.c b/sys/dev/pci/drm/drm_cache.c
index f17a9c034a7..56654378487 100644
--- a/sys/dev/pci/drm/drm_cache.c
+++ b/sys/dev/pci/drm/drm_cache.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_cache.c,v 1.4 2018/04/20 16:06:05 deraadt Exp $ */
+/* $OpenBSD: drm_cache.c,v 1.5 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright (c) 2017 Mark Kettenis
*
@@ -17,7 +17,6 @@
#include <dev/pci/drm/drmP.h>
-#if defined(__i386__) || defined(__amd64__)
static void
drm_clflush_page(struct vm_page *page)
{
@@ -54,10 +53,3 @@ drm_clflush_virt_range(void *addr, unsigned long length)
{
pmap_flush_cache((vaddr_t)addr, length);
}
-#else
-void
-drm_clflush_pages(struct vm_page *pages[], unsigned long num_pages)
-{
- STUB();
-}
-#endif
diff --git a/sys/dev/pci/drm/drm_cache.h b/sys/dev/pci/drm/drm_cache.h
index 8481197c2af..4642b588170 100644
--- a/sys/dev/pci/drm/drm_cache.h
+++ b/sys/dev/pci/drm/drm_cache.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_cache.h,v 1.2 2018/04/20 16:06:05 deraadt Exp $ */
+/* $OpenBSD: drm_cache.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/**************************************************************************
*
* Copyright 2009 Red Hat Inc.
@@ -38,15 +38,4 @@
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
#endif
-static inline bool drm_arch_can_wc_memory(void)
-{
-#if defined(__powerpc__)
- return false;
-#elif defined(__mips__)
- return false;
-#else
- return true;
-#endif
-}
-
#endif
diff --git a/sys/dev/pci/drm/drm_linux.c b/sys/dev/pci/drm/drm_linux.c
index 4f3a54f596f..0245a7af73c 100644
--- a/sys/dev/pci/drm/drm_linux.c
+++ b/sys/dev/pci/drm/drm_linux.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_linux.c,v 1.21 2018/04/20 16:06:05 deraadt Exp $ */
+/* $OpenBSD: drm_linux.c,v 1.22 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright (c) 2013 Jonathan Gray <jsg@openbsd.org>
* Copyright (c) 2015, 2016 Mark Kettenis <kettenis@openbsd.org>
@@ -20,10 +20,6 @@
#include <dev/pci/ppbreg.h>
#include <sys/event.h>
-struct mutex sch_mtx = MUTEX_INITIALIZER(IPL_SCHED);
-void *sch_ident;
-int sch_priority;
-
void
flush_barrier(void *arg)
{
@@ -83,7 +79,7 @@ flush_delayed_work(struct delayed_work *dwork)
tsleep(&barrier, PWAIT, "fldwto", 1);
task_set(&task, flush_barrier, &barrier);
- task_add(dwork->tq ? dwork->tq : systq, &task);
+ task_add(dwork->tq, &task);
while (!barrier) {
sleep_setup(&sls, &barrier, PWAIT, "fldwbar");
sleep_finish(&sls, !barrier);
@@ -795,11 +791,3 @@ drm_sysfs_hotplug_event(struct drm_device *dev)
{
KNOTE(&dev->note, NOTE_CHANGE);
}
-
-unsigned int drm_fence_count;
-
-unsigned int
-fence_context_alloc(unsigned int num)
-{
- return __sync_add_and_fetch(&drm_fence_count, num) - num;
-}
diff --git a/sys/dev/pci/drm/drm_linux.h b/sys/dev/pci/drm/drm_linux.h
index b2830ff8633..5dead9a6040 100644
--- a/sys/dev/pci/drm/drm_linux.h
+++ b/sys/dev/pci/drm/drm_linux.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: drm_linux.h,v 1.86 2018/04/20 16:06:05 deraadt Exp $ */
+/* $OpenBSD: drm_linux.h,v 1.87 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright (c) 2013, 2014, 2015 Mark Kettenis
* Copyright (c) 2017 Martin Pieuchot
@@ -54,8 +54,6 @@
#pragma GCC diagnostic ignored "-Wformat-zero-length"
#endif
-#define STUB() do { printf("%s: stub\n", __func__); } while(0)
-
typedef int irqreturn_t;
enum irqreturn {
IRQ_NONE = 0,
@@ -560,72 +558,45 @@ _spin_unlock_irqrestore(struct mutex *mtxp, __unused unsigned long flags
#define free_irq(irq, dev)
#define synchronize_irq(x)
-typedef struct wait_queue wait_queue_t;
-struct wait_queue {
- unsigned int flags;
- void *private;
- int (*func)(wait_queue_t *, unsigned, int, void *);
-};
-
-extern struct mutex sch_mtx;
-extern void *sch_ident;
-extern int sch_priority;
+#define fence_wait(x, y)
+#define fence_put(x)
struct wait_queue_head {
struct mutex lock;
unsigned int count;
- struct wait_queue *_wq;
};
typedef struct wait_queue_head wait_queue_head_t;
-#define MAX_SCHEDULE_TIMEOUT (INT32_MAX)
-
static inline void
init_waitqueue_head(wait_queue_head_t *wq)
{
mtx_init(&wq->lock, IPL_TTY);
wq->count = 0;
- wq->_wq = NULL;
-}
-
-static inline void
-__add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
-{
- head->_wq = new;
-}
-
-static inline void
-__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
-{
- head->_wq = NULL;
}
#define __wait_event_intr_timeout(wq, condition, timo, prio) \
({ \
long ret = timo; \
+ mtx_enter(&(wq).lock); \
do { \
int deadline, __error; \
\
KASSERT(!cold); \
- \
- mtx_enter(&sch_mtx); \
atomic_inc_int(&(wq).count); \
deadline = ticks + ret; \
- __error = msleep(&wq, &sch_mtx, prio, "drmweti", ret); \
+ __error = msleep(&wq, &(wq).lock, prio, "drmweti", ret); \
ret = deadline - ticks; \
atomic_dec_int(&(wq).count); \
if (__error == ERESTART || __error == EINTR) { \
ret = -ERESTARTSYS; \
- mtx_leave(&sch_mtx); \
break; \
} \
if (timo && (ret <= 0 || __error == EWOULDBLOCK)) { \
- mtx_leave(&sch_mtx); \
ret = ((condition)) ? 1 : 0; \
break; \
} \
- mtx_leave(&sch_mtx); \
} while (ret > 0 && !(condition)); \
+ mtx_leave(&(wq).lock); \
ret; \
})
@@ -638,14 +609,6 @@ do { \
__wait_event_intr_timeout(wq, condition, 0, 0); \
} while (0)
-#define wait_event_interruptible_locked(wq, condition) \
-({ \
- int __ret = 0; \
- if (!(condition)) \
- __ret = __wait_event_intr_timeout(wq, condition, 0, PCATCH); \
- __ret; \
-})
-
/*
* Sleep until `condition' gets true or `timo' expires.
*
@@ -676,43 +639,16 @@ do { \
__ret; \
})
-static inline void
-_wake_up(wait_queue_head_t *wq LOCK_FL_VARS)
-{
- _mtx_enter(&wq->lock LOCK_FL_ARGS);
- if (wq->_wq != NULL && wq->_wq->func != NULL)
- wq->_wq->func(wq->_wq, 0, wq->_wq->flags, NULL);
- else {
- mtx_enter(&sch_mtx);
- wakeup(wq);
- mtx_leave(&sch_mtx);
- }
- _mtx_leave(&wq->lock LOCK_FL_ARGS);
-}
-
-#define wake_up_process(task) \
+#define wake_up(wq) \
do { \
- mtx_enter(&sch_mtx); \
- wakeup(task); \
- mtx_leave(&sch_mtx); \
+ mtx_enter(&(wq)->lock); \
+ wakeup(wq); \
+ mtx_leave(&(wq)->lock); \
} while (0)
+#define wake_up_all(wq) wake_up(wq)
+#define wake_up_all_locked(wq) wakeup(wq)
+#define wake_up_interruptible(wq) wake_up(wq)
-#define wake_up(wq) _wake_up(wq LOCK_FILE_LINE)
-#define wake_up_all(wq) _wake_up(wq LOCK_FILE_LINE)
-
-static inline void
-wake_up_all_locked(wait_queue_head_t *wq)
-{
- if (wq->_wq != NULL && wq->_wq->func != NULL)
- wq->_wq->func(wq->_wq, 0, wq->_wq->flags, NULL);
- else {
- mtx_enter(&sch_mtx);
- wakeup(wq);
- mtx_leave(&sch_mtx);
- }
-}
-
-#define wake_up_interruptible(wq) _wake_up(wq LOCK_FILE_LINE)
#define waitqueue_active(wq) ((wq)->count > 0)
struct completion {
@@ -912,7 +848,6 @@ typedef void *async_cookie_t;
#define TASK_UNINTERRUPTIBLE 0
#define TASK_INTERRUPTIBLE PCATCH
-#define TASK_RUNNING -1
#define signal_pending_state(x, y) CURSIG(curproc)
#define signal_pending(y) CURSIG(curproc)
@@ -939,7 +874,6 @@ timespec_sub(struct timespec t1, struct timespec t2)
#define time_in_range(x, min, max) ((x) >= (min) && (x) <= (max))
extern volatile unsigned long jiffies;
-#define jiffies_64 jiffies /* XXX */
#undef HZ
#define HZ hz
@@ -1284,306 +1218,27 @@ static inline void
prepare_to_wait(wait_queue_head_t *wq, wait_queue_head_t **wait, int state)
{
if (*wait == NULL) {
- mtx_enter(&sch_mtx);
+ mtx_enter(&wq->lock);
*wait = wq;
}
- MUTEX_ASSERT_LOCKED(&sch_mtx);
- sch_ident = wq;
- sch_priority = state;
}
static inline void
finish_wait(wait_queue_head_t *wq, wait_queue_head_t **wait)
{
- if (*wait) {
- MUTEX_ASSERT_LOCKED(&sch_mtx);
- sch_ident = NULL;
- mtx_leave(&sch_mtx);
- }
-}
-
-static inline void
-set_current_state(int state)
-{
- if (sch_ident != curproc)
- mtx_enter(&sch_mtx);
- MUTEX_ASSERT_LOCKED(&sch_mtx);
- sch_ident = curproc;
- sch_priority = state;
-}
-
-static inline void
-__set_current_state(int state)
-{
- KASSERT(state == TASK_RUNNING);
- if (sch_ident == curproc) {
- MUTEX_ASSERT_LOCKED(&sch_mtx);
- sch_ident = NULL;
- mtx_leave(&sch_mtx);
- }
+ if (*wait)
+ mtx_leave(&wq->lock);
}
static inline long
-schedule_timeout(long timeout)
+schedule_timeout(long timeout, wait_queue_head_t **wait)
{
- int err;
- long deadline;
-
if (cold) {
delay((timeout * 1000000) / hz);
- return 0;
+ return -ETIMEDOUT;
}
- if (timeout == MAX_SCHEDULE_TIMEOUT) {
- err = msleep(sch_ident, &sch_mtx, sch_priority, "schto", 0);
- sch_ident = curproc;
- return timeout;
- }
-
- deadline = ticks + timeout;
- err = msleep(sch_ident, &sch_mtx, sch_priority, "schto", timeout);
- timeout = deadline - ticks;
- if (timeout < 0)
- timeout = 0;
- sch_ident = curproc;
- return timeout;
-}
-
-struct seq_file;
-
-static inline void
-seq_printf(struct seq_file *m, const char *fmt, ...) {};
-
-#define preempt_enable()
-#define preempt_disable()
-
-#define FENCE_TRACE(fence, fmt, args...) do {} while(0)
-
-struct fence {
- struct kref refcount;
- const struct fence_ops *ops;
- unsigned long flags;
- unsigned int context;
- unsigned int seqno;
- spinlock_t *lock;
- struct list_head cb_list;
-};
-
-enum fence_flag_bits {
- FENCE_FLAG_SIGNALED_BIT,
- FENCE_FLAG_ENABLE_SIGNAL_BIT,
- FENCE_FLAG_USER_BITS,
-};
-
-struct fence_ops {
- const char * (*get_driver_name)(struct fence *);
- const char * (*get_timeline_name)(struct fence *);
- bool (*enable_signaling)(struct fence *);
- bool (*signaled)(struct fence *);
- long (*wait)(struct fence *, bool, long);
- void (*release)(struct fence *);
-};
-
-struct fence_cb;
-typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb);
-
-struct fence_cb {
- struct list_head node;
- fence_func_t func;
-};
-
-unsigned int fence_context_alloc(unsigned int);
-
-static inline struct fence *
-fence_get(struct fence *fence)
-{
- if (fence)
- kref_get(&fence->refcount);
- return fence;
-}
-
-static inline struct fence *
-fence_get_rcu(struct fence *fence)
-{
- if (fence)
- kref_get(&fence->refcount);
- return fence;
-}
-
-static inline void
-fence_release(struct kref *ref)
-{
- struct fence *fence = container_of(ref, struct fence, refcount);
- if (fence->ops && fence->ops->release)
- fence->ops->release(fence);
- else
- free(fence, M_DRM, 0);
-}
-
-static inline void
-fence_put(struct fence *fence)
-{
- if (fence)
- kref_put(&fence->refcount, fence_release);
-}
-
-static inline int
-fence_signal(struct fence *fence)
-{
- unsigned long flags;
-
- if (fence == NULL)
- return -EINVAL;
-
- if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return -EINVAL;
-
- if (test_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
- struct fence_cb *cur, *tmp;
-
- spin_lock_irqsave(fence->lock, flags);
- list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
- list_del_init(&cur->node);
- cur->func(fence, cur);
- }
- spin_unlock_irqrestore(fence->lock, flags);
- }
-
- return 0;
-}
-
-static inline int
-fence_signal_locked(struct fence *fence)
-{
- struct fence_cb *cur, *tmp;
-
- if (fence == NULL)
- return -EINVAL;
-
- if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return -EINVAL;
-
- list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) {
- list_del_init(&cur->node);
- cur->func(fence, cur);
- }
-
- return 0;
-}
-
-static inline bool
-fence_is_signaled(struct fence *fence)
-{
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return true;
-
- if (fence->ops->signaled && fence->ops->signaled(fence)) {
- fence_signal(fence);
- return true;
- }
-
- return false;
-}
-
-static inline long
-fence_wait_timeout(struct fence *fence, bool intr, signed long timeout)
-{
- if (timeout < 0)
- return -EINVAL;
-
- if (timeout == 0)
- return fence_is_signaled(fence);
-
- return fence->ops->wait(fence, intr, timeout);
-}
-
-static inline long
-fence_wait(struct fence *fence, bool intr)
-{
- return fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
-}
-
-static inline void
-fence_enable_sw_signaling(struct fence *fence)
-{
- unsigned long flags;
-
- if (!test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags) &&
- !test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- spin_lock_irqsave(fence->lock, flags);
- if (!fence->ops->enable_signaling(fence))
- fence_signal_locked(fence);
- spin_unlock_irqrestore(fence->lock, flags);
- }
-}
-
-static inline void
-fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, unsigned context, unsigned seqno)
-{
- fence->ops = ops;
- fence->lock = lock;
- fence->context = context;
- fence->seqno = seqno;
- fence->flags = 0;
- kref_init(&fence->refcount);
- INIT_LIST_HEAD(&fence->cb_list);
-}
-
-static inline int
-fence_add_callback(struct fence *fence, struct fence_cb *cb,
- fence_func_t func)
-{
- unsigned long flags;
- int ret = 0;
- bool was_set;
-
- if (WARN_ON(!fence || !func))
- return -EINVAL;
-
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- INIT_LIST_HEAD(&cb->node);
- return -ENOENT;
- }
-
- spin_lock_irqsave(fence->lock, flags);
-
- was_set = test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
-
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- ret = -ENOENT;
- else if (!was_set) {
- if (!fence->ops->enable_signaling(fence)) {
- fence_signal_locked(fence);
- ret = -ENOENT;
- }
- }
-
- if (!ret) {
- cb->func = func;
- list_add_tail(&cb->node, &fence->cb_list);
- } else
- INIT_LIST_HEAD(&cb->node);
- spin_unlock_irqrestore(fence->lock, flags);
-
- return ret;
-}
-
-static inline bool
-fence_remove_callback(struct fence *fence, struct fence_cb *cb)
-{
- unsigned long flags;
- bool ret;
-
- spin_lock_irqsave(fence->lock, flags);
-
- ret = !list_empty(&cb->node);
- if (ret)
- list_del_init(&cb->node);
-
- spin_unlock_irqrestore(fence->lock, flags);
-
- return ret;
+ return -msleep(*wait, &(*wait)->lock, PZERO, "schto", timeout);
}
struct idr_entry {
@@ -2209,55 +1864,6 @@ cpu_relax(void)
#define cpu_has_pat 1
#define cpu_has_clflush 1
-struct lock_class_key {
-};
-
-typedef struct {
- unsigned int sequence;
-} seqcount_t;
-
-static inline void
-__seqcount_init(seqcount_t *s, const char *name,
- struct lock_class_key *key)
-{
- s->sequence = 0;
-}
-
-static inline unsigned int
-read_seqcount_begin(const seqcount_t *s)
-{
- unsigned int r;
- for (;;) {
- r = s->sequence;
- if ((r & 1) == 0)
- break;
- cpu_relax();
- }
- membar_consumer();
- return r;
-}
-
-static inline int
-read_seqcount_retry(const seqcount_t *s, unsigned start)
-{
- membar_consumer();
- return (s->sequence != start);
-}
-
-static inline void
-write_seqcount_begin(seqcount_t *s)
-{
- s->sequence++;
- membar_producer();
-}
-
-static inline void
-write_seqcount_end(seqcount_t *s)
-{
- membar_producer();
- s->sequence++;
-}
-
static inline uint32_t ror32(uint32_t word, unsigned int shift)
{
return (word >> shift) | (word << (32 - shift));
diff --git a/sys/dev/pci/drm/drm_linux_rbtree.h b/sys/dev/pci/drm/drm_linux_rbtree.h
index 0245dcedccf..864feb3b0b3 100644
--- a/sys/dev/pci/drm/drm_linux_rbtree.h
+++ b/sys/dev/pci/drm/drm_linux_rbtree.h
@@ -60,10 +60,8 @@ RB_PROTOTYPE(linux_root, rb_node, __entry, panic_cmp);
#define rb_set_parent(r, p) rb_parent((r)) = (p)
#define rb_set_color(r, c) rb_color((r)) = (c)
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
-#define rb_entry_safe(ptr, type, member) \
- (ptr ? rb_entry(ptr, type, member) : NULL)
-#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
+#define RB_EMPTY_ROOT(root) RB_EMPTY((struct linux_root *)root)
#define RB_EMPTY_NODE(node) (rb_parent(node) == node)
#define RB_CLEAR_NODE(node) (rb_set_parent(node, node))
@@ -75,12 +73,6 @@ RB_PROTOTYPE(linux_root, rb_node, __entry, panic_cmp);
#define rb_prev(node) RB_PREV(linux_root, NULL, (node))
#define rb_first(root) RB_MIN(linux_root, (struct linux_root *)(root))
#define rb_last(root) RB_MAX(linux_root, (struct linux_root *)(root))
-#define rbtree_postorder_for_each_entry_safe(x, y, head, member) \
- for ((x) = rb_entry_safe(RB_MIN(linux_root, (struct linux_root *)head), \
- __typeof(*x), member); \
- ((x) != NULL) && ({(y) = \
- rb_entry_safe(linux_root_RB_NEXT(&x->member), typeof(*x), member); 1; }); \
- (x) = (y))
static inline void
rb_link_node(struct rb_node *node, struct rb_node *parent,
@@ -116,36 +108,4 @@ rb_replace_node(struct rb_node *victim, struct rb_node *new,
#undef RB_ROOT
#define RB_ROOT (struct rb_root) { NULL }
-struct interval_tree_node {
- struct rb_node rb;
- unsigned long start;
- unsigned long last;
-};
-
-static inline struct interval_tree_node *
-interval_tree_iter_first(struct rb_root *root,
- unsigned long start, unsigned long last)
-{
-#ifdef DRMDEBUG
- printf("%s: stub start: 0x%lx last: 0x%lx\n", __func__, start, last);
-#endif
- return NULL;
-}
-
-static inline void
-interval_tree_insert(struct interval_tree_node *node, struct rb_root *root)
-{
-#ifdef DRMDEBUG
- printf("%s: stub start: 0x%lx last: 0x%lx\n", __func__, node->start, node->last);
-#endif
-}
-
-static inline void
-interval_tree_remove(struct interval_tree_node *node, struct rb_root *root)
-{
-#ifdef DRMDEBUG
- printf("%s: stub start: 0x%lx last: 0x%lx\n", __func__, node->start, node->last);
-#endif
-}
-
#endif /* _LINUX_RBTREE_H_ */
diff --git a/sys/dev/pci/drm/drm_pciids.h b/sys/dev/pci/drm/drm_pciids.h
index 8bc073d297d..ef64ba06202 100644
--- a/sys/dev/pci/drm/drm_pciids.h
+++ b/sys/dev/pci/drm/drm_pciids.h
@@ -1,26 +1,4 @@
#define radeon_PCI_IDS \
- {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -160,40 +138,6 @@
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
@@ -268,18 +212,6 @@
{0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -304,7 +236,6 @@
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
- {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -629,38 +560,6 @@
{0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
- {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -747,6 +646,29 @@
{0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
{0, 0, 0}
+#define mach64_PCI_IDS \
+ {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
#define sisdrv_PCI_IDS \
{0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
@@ -786,6 +708,10 @@
{0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
+#define gamma_PCI_IDS \
+ {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0, 0, 0}
+
#define savage_PCI_IDS \
{0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
{0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
@@ -811,3 +737,6 @@
{0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
{0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
{0, 0, 0}
+
+#define ffb_PCI_IDS \
+ {0, 0, 0}
diff --git a/sys/dev/pci/drm/files.drm b/sys/dev/pci/drm/files.drm
index 0219e9f8205..79e0d686a61 100644
--- a/sys/dev/pci/drm/files.drm
+++ b/sys/dev/pci/drm/files.drm
@@ -1,5 +1,5 @@
# $NetBSD: files.drm,v 1.2 2007/03/28 11:29:37 jmcneill Exp $
-# $OpenBSD: files.drm,v 1.39 2018/04/20 16:06:05 deraadt Exp $
+# $OpenBSD: files.drm,v 1.40 2018/04/20 16:09:36 deraadt Exp $
# direct rendering modules
define drmbase {[console = -1]}
@@ -28,13 +28,12 @@ file dev/pci/drm/drm_bridge.c drm
file dev/pci/drm/drm_gem.c drm
file dev/pci/drm/drm_vma_manager.c drm
file dev/pci/drm/drm_linux.c drm
-file dev/pci/drm/drm_cache.c drm
+file dev/pci/drm/drm_cache.c drm & inteldrm
file dev/pci/drm/drm_panel.c drm & inteldrm
file dev/pci/drm/drm_dp_mst_topology.c drm
file dev/pci/drm/drm_mipi_dsi.c drm & inteldrm
file dev/pci/drm/linux_hdmi.c drm
file dev/pci/drm/linux_list_sort.c drm
-file dev/pci/drm/linux_reservation.c drm
define ttm
file dev/pci/drm/ttm/ttm_agp_backend.c ttm & agp
@@ -43,12 +42,10 @@ file dev/pci/drm/ttm/ttm_bo_manager.c ttm
file dev/pci/drm/ttm/ttm_bo_util.c ttm
file dev/pci/drm/ttm/ttm_bo_vm.c ttm
file dev/pci/drm/ttm/ttm_execbuf_util.c ttm
-#file dev/pci/drm/ttm/ttm_lock.c ttm
+file dev/pci/drm/ttm/ttm_lock.c ttm
file dev/pci/drm/ttm/ttm_memory.c ttm
-#file dev/pci/drm/ttm/ttm_module.c ttm
-#file dev/pci/drm/ttm/ttm_object.c ttm
+file dev/pci/drm/ttm/ttm_object.c ttm
file dev/pci/drm/ttm/ttm_page_alloc.c ttm
-file dev/pci/drm/ttm/ttm_page_alloc_dma.c ttm
file dev/pci/drm/ttm/ttm_tt.c ttm
device inteldrm: agpint, drmbase, wsemuldisplaydev, rasops32, rasops_rotation, i2cbus, i2c_bitbang
@@ -130,42 +127,28 @@ file dev/pci/drm/radeon/atombios_crtc.c radeondrm
file dev/pci/drm/radeon/atombios_dp.c radeondrm
file dev/pci/drm/radeon/atombios_encoders.c radeondrm
file dev/pci/drm/radeon/atombios_i2c.c radeondrm
-file dev/pci/drm/radeon/btc_dpm.c radeondrm
file dev/pci/drm/radeon/cayman_blit_shaders.c radeondrm
-file dev/pci/drm/radeon/ci_dpm.c radeondrm
-file dev/pci/drm/radeon/ci_smc.c radeondrm
-file dev/pci/drm/radeon/cik.c radeondrm
-file dev/pci/drm/radeon/cik_blit_shaders.c radeondrm
-file dev/pci/drm/radeon/cik_sdma.c radeondrm
-file dev/pci/drm/radeon/cypress_dpm.c radeondrm
-file dev/pci/drm/radeon/dce3_1_afmt.c radeondrm
-file dev/pci/drm/radeon/dce6_afmt.c radeondrm
file dev/pci/drm/radeon/evergreen.c radeondrm
+file dev/pci/drm/radeon/evergreen_blit_kms.c radeondrm
file dev/pci/drm/radeon/evergreen_blit_shaders.c radeondrm
file dev/pci/drm/radeon/evergreen_cs.c radeondrm
-file dev/pci/drm/radeon/evergreen_dma.c radeondrm
file dev/pci/drm/radeon/evergreen_hdmi.c radeondrm
-file dev/pci/drm/radeon/kv_dpm.c radeondrm
-file dev/pci/drm/radeon/kv_smc.c radeondrm
file dev/pci/drm/radeon/ni.c radeondrm
-file dev/pci/drm/radeon/ni_dma.c radeondrm
-file dev/pci/drm/radeon/ni_dpm.c radeondrm
file dev/pci/drm/radeon/r100.c radeondrm
file dev/pci/drm/radeon/r200.c radeondrm
file dev/pci/drm/radeon/r300.c radeondrm
file dev/pci/drm/radeon/r420.c radeondrm
file dev/pci/drm/radeon/r520.c radeondrm
file dev/pci/drm/radeon/r600.c radeondrm
+file dev/pci/drm/radeon/r600_audio.c radeondrm
+file dev/pci/drm/radeon/r600_blit_kms.c radeondrm
file dev/pci/drm/radeon/r600_blit_shaders.c radeondrm
file dev/pci/drm/radeon/r600_cs.c radeondrm
-file dev/pci/drm/radeon/r600_dma.c radeondrm
-file dev/pci/drm/radeon/r600_dpm.c radeondrm
file dev/pci/drm/radeon/r600_hdmi.c radeondrm
#file dev/pci/drm/radeon/radeon_acpi.c radeondrm
file dev/pci/drm/radeon/radeon_agp.c radeondrm
file dev/pci/drm/radeon/radeon_asic.c radeondrm
file dev/pci/drm/radeon/radeon_atombios.c radeondrm
-file dev/pci/drm/radeon/radeon_audio.c radeondrm
file dev/pci/drm/radeon/radeon_benchmark.c radeondrm
file dev/pci/drm/radeon/radeon_bios.c radeondrm
file dev/pci/drm/radeon/radeon_clocks.c radeondrm
@@ -176,61 +159,30 @@ file dev/pci/drm/radeon/radeon_cursor.c radeondrm
file dev/pci/drm/radeon/radeon_device.c radeondrm
file dev/pci/drm/radeon/radeon_display.c radeondrm
file dev/pci/drm/radeon/radeon_dp_auxch.c radeondrm
-file dev/pci/drm/radeon/radeon_dp_mst.c radeondrm
-file dev/pci/drm/radeon/radeon_drv.c radeondrm
file dev/pci/drm/radeon/radeon_encoders.c radeondrm
file dev/pci/drm/radeon/radeon_fb.c radeondrm
file dev/pci/drm/radeon/radeon_fence.c radeondrm
file dev/pci/drm/radeon/radeon_gart.c radeondrm
file dev/pci/drm/radeon/radeon_gem.c radeondrm
file dev/pci/drm/radeon/radeon_i2c.c radeondrm
-file dev/pci/drm/radeon/radeon_ib.c radeondrm
-#file dev/pci/drm/radeon/radeon_ioc32.c radeondrm COMPAT
file dev/pci/drm/radeon/radeon_irq_kms.c radeondrm
-#file dev/pci/drm/radeon/radeon_kfd.c radeondrm
file dev/pci/drm/radeon/radeon_kms.c radeondrm
file dev/pci/drm/radeon/radeon_legacy_crtc.c radeondrm
file dev/pci/drm/radeon/radeon_legacy_encoders.c radeondrm
file dev/pci/drm/radeon/radeon_legacy_tv.c radeondrm
-#file dev/pci/drm/radeon/radeon_mn.c radeondrm not needed?
file dev/pci/drm/radeon/radeon_object.c radeondrm
file dev/pci/drm/radeon/radeon_pm.c radeondrm
#file dev/pci/drm/radeon/radeon_prime.c radeondrm
file dev/pci/drm/radeon/radeon_ring.c radeondrm
file dev/pci/drm/radeon/radeon_sa.c radeondrm
file dev/pci/drm/radeon/radeon_semaphore.c radeondrm
-file dev/pci/drm/radeon/radeon_sync.c radeondrm
file dev/pci/drm/radeon/radeon_test.c radeondrm
#file dev/pci/drm/radeon/radeon_trace_points.c radeondrm
file dev/pci/drm/radeon/radeon_ttm.c radeondrm
-file dev/pci/drm/radeon/radeon_ucode.c radeondrm
-file dev/pci/drm/radeon/radeon_uvd.c radeondrm
-file dev/pci/drm/radeon/radeon_vce.c radeondrm
-file dev/pci/drm/radeon/radeon_vm.c radeondrm
file dev/pci/drm/radeon/rs400.c radeondrm
file dev/pci/drm/radeon/rs600.c radeondrm
file dev/pci/drm/radeon/rs690.c radeondrm
-file dev/pci/drm/radeon/rs780_dpm.c radeondrm
file dev/pci/drm/radeon/rv515.c radeondrm
-file dev/pci/drm/radeon/rv6xx_dpm.c radeondrm
-file dev/pci/drm/radeon/rv730_dpm.c radeondrm
-file dev/pci/drm/radeon/rv740_dpm.c radeondrm
file dev/pci/drm/radeon/rv770.c radeondrm
-file dev/pci/drm/radeon/rv770_dma.c radeondrm
-file dev/pci/drm/radeon/rv770_dpm.c radeondrm
-file dev/pci/drm/radeon/rv770_smc.c radeondrm
file dev/pci/drm/radeon/si.c radeondrm
file dev/pci/drm/radeon/si_blit_shaders.c radeondrm
-file dev/pci/drm/radeon/si_dma.c radeondrm
-file dev/pci/drm/radeon/si_dpm.c radeondrm
-file dev/pci/drm/radeon/si_smc.c radeondrm
-file dev/pci/drm/radeon/sumo_dpm.c radeondrm
-file dev/pci/drm/radeon/sumo_smc.c radeondrm
-file dev/pci/drm/radeon/trinity_dpm.c radeondrm
-file dev/pci/drm/radeon/trinity_smc.c radeondrm
-file dev/pci/drm/radeon/uvd_v1_0.c radeondrm
-file dev/pci/drm/radeon/uvd_v2_2.c radeondrm
-file dev/pci/drm/radeon/uvd_v3_1.c radeondrm
-file dev/pci/drm/radeon/uvd_v4_2.c radeondrm
-file dev/pci/drm/radeon/vce_v1_0.c radeondrm
-file dev/pci/drm/radeon/vce_v2_0.c radeondrm
diff --git a/sys/dev/pci/drm/i915/i915_drv.c b/sys/dev/pci/drm/i915/i915_drv.c
index f71019c4ee1..aa3361110a1 100644
--- a/sys/dev/pci/drm/i915/i915_drv.c
+++ b/sys/dev/pci/drm/i915/i915_drv.c
@@ -1925,7 +1925,6 @@ int inteldrm_list_font(void *, struct wsdisplay_font *);
int inteldrm_getchar(void *, int, int, struct wsdisplay_charcell *);
void inteldrm_burner(void *, u_int, u_int);
void inteldrm_burner_cb(void *);
-void inteldrm_scrollback(void *, void *, int lines);
struct wsscreen_descr inteldrm_stdscreen = {
"std",
@@ -1954,7 +1953,6 @@ struct wsdisplay_accessops inteldrm_accessops = {
.getchar = inteldrm_getchar,
.load_font = inteldrm_load_font,
.list_font = inteldrm_list_font,
- .scrollback = inteldrm_scrollback,
.burn_screen = inteldrm_burner
};
@@ -2176,15 +2174,6 @@ const struct backlight_ops inteldrm_backlight_ops = {
.get_brightness = inteldrm_backlight_get_brightness
};
-void
-inteldrm_scrollback(void *v, void *cookie, int lines)
-{
- struct inteldrm_softc *dev_priv = v;
- struct rasops_info *ri = &dev_priv->ro;
-
- rasops_scrollback(ri, cookie, lines);
-}
-
int inteldrm_match(struct device *, void *, void *);
void inteldrm_attach(struct device *, struct device *, void *);
int inteldrm_detach(struct device *, int);
diff --git a/sys/dev/pci/drm/i915/intel_i2c.c b/sys/dev/pci/drm/i915/intel_i2c.c
index 6fb3bb9f240..36428ad123b 100644
--- a/sys/dev/pci/drm/i915/intel_i2c.c
+++ b/sys/dev/pci/drm/i915/intel_i2c.c
@@ -390,7 +390,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
break;
- schedule_timeout(1);
+ schedule_timeout(1, &wait);
}
finish_wait(&dev_priv->gmbus_wait_queue, &wait);
diff --git a/sys/dev/pci/drm/i915/intel_sprite.c b/sys/dev/pci/drm/i915/intel_sprite.c
index 80c0cf968ed..9c0394934be 100644
--- a/sys/dev/pci/drm/i915/intel_sprite.c
+++ b/sys/dev/pci/drm/i915/intel_sprite.c
@@ -128,7 +128,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
local_irq_enable();
- timeout = schedule_timeout(timeout);
+ timeout = schedule_timeout(timeout, &wait);
local_irq_disable();
}
diff --git a/sys/dev/pci/drm/radeon/ObjectID.h b/sys/dev/pci/drm/radeon/ObjectID.h
index 06192698bd9..f84a5ae68cf 100644
--- a/sys/dev/pci/drm/radeon/ObjectID.h
+++ b/sys/dev/pci/drm/radeon/ObjectID.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: ObjectID.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2006-2007 Advanced Micro Devices, Inc.
*
@@ -69,8 +70,6 @@
#define ENCODER_OBJECT_ID_ALMOND 0x22
#define ENCODER_OBJECT_ID_TRAVIS 0x23
#define ENCODER_OBJECT_ID_NUTMEG 0x22
-#define ENCODER_OBJECT_ID_HDMI_ANX9805 0x26
-
/* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
@@ -88,8 +87,6 @@
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
-#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 0x25
-#define ENCODER_OBJECT_ID_INTERNAL_AMCLK 0x27
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
@@ -368,14 +365,6 @@
GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
-#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY3_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY3 << OBJECT_ID_SHIFT)
-
#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
@@ -404,10 +393,6 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
-#define ENCODER_HDMI_ANX9805_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_HDMI_ANX9805 << OBJECT_ID_SHIFT)
-
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
@@ -477,14 +462,6 @@
GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
-
#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
@@ -497,10 +474,6 @@
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
-#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
-
#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
@@ -569,18 +542,6 @@
GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
-#define CONNECTOR_HDMI_TYPE_A_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HDMI_TYPE_A_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HDMI_TYPE_A_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
-
#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
diff --git a/sys/dev/pci/drm/radeon/atom-bits.h b/sys/dev/pci/drm/radeon/atom-bits.h
index e8fae5c7751..3afe16b2d61 100644
--- a/sys/dev/pci/drm/radeon/atom-bits.h
+++ b/sys/dev/pci/drm/radeon/atom-bits.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: atom-bits.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
*
diff --git a/sys/dev/pci/drm/radeon/atom-names.h b/sys/dev/pci/drm/radeon/atom-names.h
index 6f907a5ffa5..974590b93d1 100644
--- a/sys/dev/pci/drm/radeon/atom-names.h
+++ b/sys/dev/pci/drm/radeon/atom-names.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: atom-names.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
*
diff --git a/sys/dev/pci/drm/radeon/atom-types.h b/sys/dev/pci/drm/radeon/atom-types.h
index 1125b866cdb..4641ee47561 100644
--- a/sys/dev/pci/drm/radeon/atom-types.h
+++ b/sys/dev/pci/drm/radeon/atom-types.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: atom-types.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Red Hat Inc.
*
diff --git a/sys/dev/pci/drm/radeon/atom.c b/sys/dev/pci/drm/radeon/atom.c
index 1ce19696ee0..9331697f9f6 100644
--- a/sys/dev/pci/drm/radeon/atom.c
+++ b/sys/dev/pci/drm/radeon/atom.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: atom.c,v 1.11 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
*
@@ -91,8 +92,8 @@ static void debug_print_spaces(int n)
#undef DEBUG
#endif
-#define DEBUG(...) do if (atom_debug) { printk(KERN_DEBUG __VA_ARGS__); } while (0)
-#define SDEBUG(...) do if (atom_debug) { printk(KERN_DEBUG); debug_print_spaces(debug_depth); printk(__VA_ARGS__); } while (0)
+#define DEBUG(...) do if (atom_debug) { printk(__FILE__ __VA_ARGS__); } while (0)
+#define SDEBUG(...) do if (atom_debug) { printk(__FILE__); debug_print_spaces(debug_depth); printf(__VA_ARGS__); } while (0)
#else
#define DEBUG(...) do { } while (0)
#define SDEBUG(...) do { } while (0)
@@ -221,7 +222,11 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr)++;
/* get_unaligned_le32 avoids unaligned accesses from atombios
* tables, noticed on a DEC Alpha. */
+#ifdef notyet
val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+#else
+ val = le32_to_cpu(ctx->ps[idx]);
+#endif
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
@@ -1216,7 +1221,7 @@ free:
return ret;
}
-int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
int r;
@@ -1237,22 +1242,11 @@ int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uin
return r;
}
-int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
-{
- int r;
- mutex_lock(&ctx->scratch_mutex);
- r = atom_execute_table_scratch_unlocked(ctx, index, params);
- mutex_unlock(&ctx->scratch_mutex);
- return r;
-}
-
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
static void atom_index_iio(struct atom_context *ctx, int base)
{
ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
- if (!ctx->iio)
- return;
while (CU8(base) == ATOM_IIO_START) {
ctx->iio[CU8(base + 1)] = base + 2;
base += 2;
@@ -1267,9 +1261,11 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
int base;
struct atom_context *ctx =
kzalloc(sizeof(struct atom_context), GFP_KERNEL);
+#ifdef DRMDEBUG
char *str;
char name[512];
int i;
+#endif
if (!ctx)
return NULL;
@@ -1302,11 +1298,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
- if (!ctx->iio) {
- atom_destroy(ctx);
- return NULL;
- }
+#ifdef DRMDEBUG
str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
while (*str && ((*str == '\n') || (*str == '\r')))
str++;
@@ -1318,7 +1311,6 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
break;
}
}
-#ifdef DRMDEBUG
printk(KERN_INFO "ATOM BIOS: %s\n", name);
#endif
@@ -1356,7 +1348,8 @@ int atom_asic_init(struct atom_context *ctx)
void atom_destroy(struct atom_context *ctx)
{
- kfree(ctx->iio);
+ if (ctx->iio)
+ kfree(ctx->iio);
kfree(ctx);
}
diff --git a/sys/dev/pci/drm/radeon/atom.h b/sys/dev/pci/drm/radeon/atom.h
index 0d710ef330a..f9b9e031cc5 100644
--- a/sys/dev/pci/drm/radeon/atom.h
+++ b/sys/dev/pci/drm/radeon/atom.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: atom.h,v 1.4 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
*
@@ -125,7 +126,6 @@ struct card_info {
struct atom_context {
struct card_info *card;
struct rwlock mutex;
- struct rwlock scratch_mutex;
void *bios;
uint32_t cmd_table, data_table;
uint16_t *iio;
@@ -146,7 +146,6 @@ extern int atom_debug;
struct atom_context *atom_parse(struct card_info *, void *);
int atom_execute_table(struct atom_context *, int, uint32_t *);
-int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *);
int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
diff --git a/sys/dev/pci/drm/radeon/atombios.h b/sys/dev/pci/drm/radeon/atombios.h
index ab89eed9ddd..7655861e641 100644
--- a/sys/dev/pci/drm/radeon/atombios.h
+++ b/sys/dev/pci/drm/radeon/atombios.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: atombios.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2006-2007 Advanced Micro Devices, Inc.
*
@@ -74,8 +75,6 @@
#define ATOM_PPLL2 1
#define ATOM_DCPLL 2
#define ATOM_PPLL0 2
-#define ATOM_PPLL3 3
-
#define ATOM_EXT_PLL1 8
#define ATOM_EXT_PLL2 9
#define ATOM_EXT_CLOCK 10
@@ -261,7 +260,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes.
USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
- USHORT SetUniphyInstance; //Atomic Table, only used by Bios
+ USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1
@@ -273,7 +272,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
USHORT PatchMCSetting; //only used by BIOS
USHORT MC_SEQ_Control; //only used by BIOS
- USHORT Gfx_Harvesting; //Atomic Table, Obsolete from Ry6xx, Now only used by BIOS for GFX harvesting
+ USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
USHORT EnableScaler; //Atomic Table, used only by Bios
USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
@@ -330,7 +329,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
#define UNIPHYTransmitterControl DIG1TransmitterControl
#define LVTMATransmitterControl DIG2TransmitterControl
#define SetCRTC_DPM_State GetConditionalGoldenSetting
-#define ASIC_StaticPwrMgtStatusChange SetUniphyInstance
+#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
#define HPDInterruptService ReadHWAssistedI2CStatus
#define EnableVGA_Access GetSCLKOverMCLKRatio
#define EnableYUV GetDispObjectInfo
@@ -340,7 +339,7 @@ typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
#define TMDSAEncoderControl PatchMCSetting
#define LVDSEncoderControl MC_SEQ_Control
#define LCD1OutputControl HW_Misc_Operation
-#define TV1OutputControl Gfx_Harvesting
+
typedef struct _ATOM_MASTER_COMMAND_TABLE
{
@@ -460,7 +459,6 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
union
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
- ULONG ulClockParams; //ULONG access for BE
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
};
UCHAR ucRefDiv; //Output Parameter
@@ -480,11 +478,11 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
{
#if ATOM_BIG_ENDIAN
- ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly
+ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
ULONG ulClock:24; //Input= target clock, output = actual clock
#else
ULONG ulClock:24; //Input= target clock, output = actual clock
- ULONG ucPostDiv:8; //return parameter: post divider which is used to program to register directly
+ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
#endif
}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
@@ -493,7 +491,6 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
union
{
ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
- ULONG ulClockParams; //ULONG access for BE
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
};
UCHAR ucRefDiv; //Output Parameter
@@ -506,32 +503,6 @@ typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
UCHAR ucReserved;
}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
-
-typedef struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6
-{
- ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
- ULONG ulReserved[2];
-}COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6;
-
-//ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag
-#define COMPUTE_GPUCLK_INPUT_FLAG_CLK_TYPE_MASK 0x0f
-#define COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK 0x00
-#define COMPUTE_GPUCLK_INPUT_FLAG_SCLK 0x01
-
-typedef struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6
-{
- COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock; //Output Parameter: ucPostDiv=DFS divider
- ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter: PLL FB divider
- UCHAR ucPllRefDiv; //Output Parameter: PLL ref divider
- UCHAR ucPllPostDiv; //Output Parameter: PLL post divider
- UCHAR ucPllCntlFlag; //Output Flags: control flag
- UCHAR ucReserved;
-}COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6;
-
-//ucPllCntlFlag
-#define SPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
-
-
// ucInputFlag
#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
@@ -1711,12 +1682,9 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V6
#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK 0x0c
#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP 0x00
#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP 0x04
-#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6 0x08 //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP 0x08
-#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6 0x04 //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP 0x0c
#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC 0x10
-#define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK 0x40
typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
{
@@ -2133,17 +2101,6 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
}DVO_ENCODER_CONTROL_PARAMETERS_V3;
#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
-typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V1_4
-{
- USHORT usPixelClock;
- UCHAR ucDVOConfig;
- UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
- UCHAR ucBitPerColor; //please refer to definition of PANEL_xBIT_PER_COLOR
- UCHAR ucReseved[3];
-}DVO_ENCODER_CONTROL_PARAMETERS_V1_4;
-#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 DVO_ENCODER_CONTROL_PARAMETERS_V1_4
-
-
//ucTableFormatRevision=1
//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
// bit1=0: non-coherent mode
@@ -2207,7 +2164,7 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V1_4
#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
-#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
+#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
typedef struct _SET_VOLTAGE_PARAMETERS
@@ -2225,7 +2182,7 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V2
USHORT usVoltageLevel; // real voltage level
}SET_VOLTAGE_PARAMETERS_V2;
-// used by both SetVoltageTable v1.3 and v1.4
+
typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
{
UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
@@ -2242,20 +2199,15 @@ typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
#define ATOM_SET_VOLTAGE 0 //Set voltage Level
#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator
-#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase, only for SVID/PVID regulator
-#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used from SetVoltageTable v1.3
-#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID, not used for SetVoltage v1.4
-#define ATOM_GET_LEAKAGE_ID 8 //Get Leakage Voltage Id ( starting from SMU7x IP ), SetVoltage v1.4
+#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase
+#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used in SetVoltageTable v1.3
+#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID
// define vitual voltage id in usVoltageLevel
#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02
#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03
#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04
-#define ATOM_VIRTUAL_VOLTAGE_ID4 0xff05
-#define ATOM_VIRTUAL_VOLTAGE_ID5 0xff06
-#define ATOM_VIRTUAL_VOLTAGE_ID6 0xff07
-#define ATOM_VIRTUAL_VOLTAGE_ID7 0xff08
typedef struct _SET_VOLTAGE_PS_ALLOCATION
{
@@ -2292,36 +2244,15 @@ typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
#define ATOM_GET_VOLTAGE_VID 0x00
#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
-#define ATOM_GET_VOLTAGE_SVID2 0x07 //Get SVI2 Regulator Info
-
// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
-#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
+#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
+
// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
-
+// undefined power state
#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
-// New Added from CI Hawaii for GetVoltageInfoTable, input parameter structure
-typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2
-{
- UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
- UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
- USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
- ULONG ulSCLKFreq; // Input: when ucVoltageMode= ATOM_GET_VOLTAGE_EVV_VOLTAGE, DPM state SCLK frequency, Define in PPTable SCLK/Voltage dependence table
-}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2;
-
-// New in GetVoltageInfo v1.2 ucVoltageMode
-#define ATOM_GET_VOLTAGE_EVV_VOLTAGE 0x09
-
-// New Added from CI Hawaii for EVV feature
-typedef struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2
-{
- USHORT usVoltageLevel; // real voltage level in unit of mv
- USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
- ULONG ulReseved;
-}GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2;
-
/****************************************************************************/
// Structures used by TVEncoderControlTable
/****************************************************************************/
@@ -2696,8 +2627,7 @@ typedef struct _ATOM_FIRMWARE_INFO_V2_2
ULONG ulFirmwareRevision;
ULONG ulDefaultEngineClock; //In 10Khz unit
ULONG ulDefaultMemoryClock; //In 10Khz unit
- ULONG ulSPLL_OutputFreq; //In 10Khz unit
- ULONG ulGPUPLL_OutputFreq; //In 10Khz unit
+ ULONG ulReserved[2];
ULONG ulReserved1; //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
ULONG ulReserved2; //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
@@ -3882,14 +3812,6 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
UCHAR ucGPIO_ID;
}ATOM_GPIO_PIN_ASSIGNMENT;
-//ucGPIO_ID pre-define id for multiple usage
-//from SMU7.x, if ucGPIO_ID=PP_AC_DC_SWITCH_GPIO_PINID in GPIO_LUTTable, AC/DC swithing feature is enable
-#define PP_AC_DC_SWITCH_GPIO_PINID 60
-//from SMU7.x, if ucGPIO_ID=VDDC_REGULATOR_VRHOT_GPIO_PINID in GPIO_LUTable, VRHot feature is enable
-#define VDDC_VRHOT_GPIO_PINID 61
-//if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak Current Control feature is enabled
-#define VDDC_PCC_GPIO_PINID 62
-
typedef struct _ATOM_GPIO_PIN_LUT
{
ATOM_COMMON_TABLE_HEADER sHeader;
@@ -4151,19 +4073,17 @@ typedef struct _EXT_DISPLAY_PATH
//usCaps
#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
-#define EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN 0x02
typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
{
ATOM_COMMON_TABLE_HEADER sHeader;
UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
- UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
+ UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
UCHAR uc3DStereoPinId; // use for eDP panel
UCHAR ucRemoteDisplayConfig;
UCHAR uceDPToLVDSRxId;
- UCHAR ucFixDPVoltageSwing; // usCaps[1]=1, this indicate DP_LANE_SET value
- UCHAR Reserved[3]; // for potential expansion
+ UCHAR Reserved[4]; // for potential expansion
}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
//Related definitions, all records are different but they have a commond header
@@ -4194,10 +4114,10 @@ typedef struct _ATOM_COMMON_RECORD_HEADER
#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
#define ATOM_ENCODER_CAP_RECORD_TYPE 20
-#define ATOM_BRACKET_LAYOUT_RECORD_TYPE 21
+
//Must be updated when new record type is added,equal to that record definition!
-#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_BRACKET_LAYOUT_RECORD_TYPE
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_ENCODER_CAP_RECORD_TYPE
typedef struct _ATOM_I2C_RECORD
{
@@ -4422,31 +4342,6 @@ typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
USHORT usReserved;
}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
-typedef struct _ATOM_CONNECTOR_LAYOUT_INFO
-{
- USHORT usConnectorObjectId;
- UCHAR ucConnectorType;
- UCHAR ucPosition;
-}ATOM_CONNECTOR_LAYOUT_INFO;
-
-// define ATOM_CONNECTOR_LAYOUT_INFO.ucConnectorType to describe the display connector size
-#define CONNECTOR_TYPE_DVI_D 1
-#define CONNECTOR_TYPE_DVI_I 2
-#define CONNECTOR_TYPE_VGA 3
-#define CONNECTOR_TYPE_HDMI 4
-#define CONNECTOR_TYPE_DISPLAY_PORT 5
-#define CONNECTOR_TYPE_MINI_DISPLAY_PORT 6
-
-typedef struct _ATOM_BRACKET_LAYOUT_RECORD
-{
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucLength;
- UCHAR ucWidth;
- UCHAR ucConnNum;
- UCHAR ucReserved;
- ATOM_CONNECTOR_LAYOUT_INFO asConnInfo[1];
-}ATOM_BRACKET_LAYOUT_RECORD;
-
/****************************************************************************/
// ASIC voltage data table
/****************************************************************************/
@@ -4520,13 +4415,6 @@ typedef struct _ATOM_VOLTAGE_CONTROL
#define VOLTAGE_CONTROL_ID_CHL822x 0x08
#define VOLTAGE_CONTROL_ID_VT1586M 0x09
#define VOLTAGE_CONTROL_ID_UP1637 0x0A
-#define VOLTAGE_CONTROL_ID_CHL8214 0x0B
-#define VOLTAGE_CONTROL_ID_UP1801 0x0C
-#define VOLTAGE_CONTROL_ID_ST6788A 0x0D
-#define VOLTAGE_CONTROL_ID_CHLIR3564SVI2 0x0E
-#define VOLTAGE_CONTROL_ID_AD527x 0x0F
-#define VOLTAGE_CONTROL_ID_NCP81022 0x10
-#define VOLTAGE_CONTROL_ID_LTC2635 0x11
typedef struct _ATOM_VOLTAGE_OBJECT
{
@@ -4569,16 +4457,6 @@ typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
USHORT usSize; //Size of Object
}ATOM_VOLTAGE_OBJECT_HEADER_V3;
-// ATOM_VOLTAGE_OBJECT_HEADER_V3.ucVoltageMode
-#define VOLTAGE_OBJ_GPIO_LUT 0 //VOLTAGE and GPIO Lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
-#define VOLTAGE_OBJ_VR_I2C_INIT_SEQ 3 //VOLTAGE REGULATOR INIT sequece through I2C -> ATOM_I2C_VOLTAGE_OBJECT_V3
-#define VOLTAGE_OBJ_PHASE_LUT 4 //Set Vregulator Phase lookup table ->ATOM_GPIO_VOLTAGE_OBJECT_V3
-#define VOLTAGE_OBJ_SVID2 7 //Indicate voltage control by SVID2 ->ATOM_SVID2_VOLTAGE_OBJECT_V3
-#define VOLTAGE_OBJ_EVV 8
-#define VOLTAGE_OBJ_PWRBOOST_LEAKAGE_LUT 0x10 //Powerboost Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
-#define VOLTAGE_OBJ_HIGH_STATE_LEAKAGE_LUT 0x11 //High voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
-#define VOLTAGE_OBJ_HIGH1_STATE_LEAKAGE_LUT 0x12 //High1 voltage state Voltage and LeakageId lookup table->ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
-
typedef struct _VOLTAGE_LUT_ENTRY_V2
{
ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register
@@ -4594,7 +4472,7 @@ typedef struct _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
{
- ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_VR_I2C_INIT_SEQ
+ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id
UCHAR ucVoltageControlI2cLine;
UCHAR ucVoltageControlAddress;
@@ -4603,13 +4481,9 @@ typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
}ATOM_I2C_VOLTAGE_OBJECT_V3;
-// ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
-#define VOLTAGE_DATA_ONE_BYTE 0
-#define VOLTAGE_DATA_TWO_BYTE 1
-
typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
{
- ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
+ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode
UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table
UCHAR ucPhaseDelay; // phase delay in unit of micro second
@@ -4620,7 +4494,7 @@ typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
{
- ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = 0x10/0x11/0x12
+ ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucLeakageCntlId; // default is 0
UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
UCHAR ucReserved[2];
@@ -4628,27 +4502,10 @@ typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
-
-typedef struct _ATOM_SVID2_VOLTAGE_OBJECT_V3
-{
- ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader; // voltage mode = VOLTAGE_OBJ_SVID2
-// 14:7 – PSI0_VID
-// 6 – PSI0_EN
-// 5 – PSI1
-// 4:2 – load line slope trim.
-// 1:0 – offset trim,
- USHORT usLoadLine_PSI;
-// GPU GPIO pin Id to SVID2 regulator VRHot pin. possible value 0~31. 0 means GPIO0, 31 means GPIO31
- UCHAR ucSVDGpioId; //0~31 indicate GPIO0~31
- UCHAR ucSVCGpioId; //0~31 indicate GPIO0~31
- ULONG ulReserved;
-}ATOM_SVID2_VOLTAGE_OBJECT_V3;
-
typedef union _ATOM_VOLTAGE_OBJECT_V3{
ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
- ATOM_SVID2_VOLTAGE_OBJECT_V3 asSVID2Obj;
}ATOM_VOLTAGE_OBJECT_V3;
typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
@@ -4678,64 +4535,6 @@ typedef struct _ATOM_ASIC_PROFILING_INFO
ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
}ATOM_ASIC_PROFILING_INFO;
-typedef struct _ATOM_ASIC_PROFILING_INFO_V2_1
-{
- ATOM_COMMON_TABLE_HEADER asHeader;
- UCHAR ucLeakageBinNum; // indicate the entry number of LeakageId/Voltage Lut table
- USHORT usLeakageBinArrayOffset; // offset of USHORT Leakage Bin list array ( from lower LeakageId to higher)
-
- UCHAR ucElbVDDC_Num;
- USHORT usElbVDDC_IdArrayOffset; // offset of USHORT virtual VDDC voltage id ( 0xff01~0xff08 )
- USHORT usElbVDDC_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
-
- UCHAR ucElbVDDCI_Num;
- USHORT usElbVDDCI_IdArrayOffset; // offset of USHORT virtual VDDCI voltage id ( 0xff01~0xff08 )
- USHORT usElbVDDCI_LevelArrayOffset; // offset of 2 dimension voltage level USHORT array
-}ATOM_ASIC_PROFILING_INFO_V2_1;
-
-typedef struct _ATOM_ASIC_PROFILING_INFO_V3_1
-{
- ATOM_COMMON_TABLE_HEADER asHeader;
- ULONG ulEvvDerateTdp;
- ULONG ulEvvDerateTdc;
- ULONG ulBoardCoreTemp;
- ULONG ulMaxVddc;
- ULONG ulMinVddc;
- ULONG ulLoadLineSlop;
- ULONG ulLeakageTemp;
- ULONG ulLeakageVoltage;
- ULONG ulCACmEncodeRange;
- ULONG ulCACmEncodeAverage;
- ULONG ulCACbEncodeRange;
- ULONG ulCACbEncodeAverage;
- ULONG ulKt_bEncodeRange;
- ULONG ulKt_bEncodeAverage;
- ULONG ulKv_mEncodeRange;
- ULONG ulKv_mEncodeAverage;
- ULONG ulKv_bEncodeRange;
- ULONG ulKv_bEncodeAverage;
- ULONG ulLkgEncodeLn_MaxDivMin;
- ULONG ulLkgEncodeMin;
- ULONG ulEfuseLogisticAlpha;
- USHORT usPowerDpm0;
- USHORT usCurrentDpm0;
- USHORT usPowerDpm1;
- USHORT usCurrentDpm1;
- USHORT usPowerDpm2;
- USHORT usCurrentDpm2;
- USHORT usPowerDpm3;
- USHORT usCurrentDpm3;
- USHORT usPowerDpm4;
- USHORT usCurrentDpm4;
- USHORT usPowerDpm5;
- USHORT usCurrentDpm5;
- USHORT usPowerDpm6;
- USHORT usCurrentDpm6;
- USHORT usPowerDpm7;
- USHORT usCurrentDpm7;
-}ATOM_ASIC_PROFILING_INFO_V3_1;
-
-
typedef struct _ATOM_POWER_SOURCE_OBJECT
{
UCHAR ucPwrSrcId; // Power source
@@ -4852,8 +4651,6 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
#define SYS_INFO_LVDSMISC__888_BPC 0x04
#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08
#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10
-// new since Trinity
-#define SYS_INFO_LVDSMISC__TRAVIS_LVDS_VOL_OVERRIDE_EN 0x20
// not used any more
#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04
@@ -4954,29 +4751,6 @@ typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo;
ULONG ulPowerplayTable[128];
}ATOM_FUSION_SYSTEM_INFO_V1;
-
-
-typedef struct _ATOM_TDP_CONFIG_BITS
-{
-#if ATOM_BIG_ENDIAN
- ULONG uReserved:2;
- ULONG uTDP_Value:14; // Original TDP value in tens of milli watts
- ULONG uCTDP_Value:14; // Override value in tens of milli watts
- ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
-#else
- ULONG uCTDP_Enable:2; // = (uCTDP_Value > uTDP_Value? 2: (uCTDP_Value < uTDP_Value))
- ULONG uCTDP_Value:14; // Override value in tens of milli watts
- ULONG uTDP_Value:14; // Original TDP value in tens of milli watts
- ULONG uReserved:2;
-#endif
-}ATOM_TDP_CONFIG_BITS;
-
-typedef union _ATOM_TDP_CONFIG
-{
- ATOM_TDP_CONFIG_BITS TDP_config;
- ULONG TDP_config_all;
-}ATOM_TDP_CONFIG;
-
/**********************************************************************************************************************
ATOM_FUSION_SYSTEM_INFO_V1 Description
sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
@@ -5009,8 +4783,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
UCHAR ucMemoryType;
UCHAR ucUMAChannelNumber;
UCHAR strVBIOSMsg[40];
- ATOM_TDP_CONFIG asTdpConfig;
- ULONG ulReserved[19];
+ ULONG ulReserved[20];
ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
ULONG ulGMCRestoreResetTime;
ULONG ulMinimumNClk;
@@ -5035,7 +4808,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
USHORT GnbTdpLimit;
USHORT usMaxLVDSPclkFreqInSingleLink;
UCHAR ucLvdsMisc;
- UCHAR ucTravisLVDSVolAdjust;
+ UCHAR ucLVDSReserved;
UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
@@ -5043,7 +4816,7 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
UCHAR ucLVDSOffToOnDelay_in4Ms;
UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
- UCHAR ucMinAllowedBL_Level;
+ UCHAR ucLVDSReserved1;
ULONG ulLCDBitDepthControlVal;
ULONG ulNbpStateMemclkFreq[4];
USHORT usNBP2Voltage;
@@ -5072,7 +4845,6 @@ typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
-#define SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS 0x10
/**********************************************************************************************************************
ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
@@ -5172,9 +4944,6 @@ ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 pan
[bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
[bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
[bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
- [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
-ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust
- value to program Travis register LVDS_CTRL_4
ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
=0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
@@ -5194,241 +4963,18 @@ ucLVDSOffToOnDelay_in4Ms: LVDS power down sequence time in unit of 4ms.
=0 means to use VBIOS default delay which is 125 ( 500ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
-ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
- LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
+ucLVDSPwrOnVARY_BLtoBLON_in4Ms: LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
=0 means to use VBIOS default delay which is 0 ( 0ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
-ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:
- LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
+ucLVDSPwrOffBLONtoVARY_BL_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
=0 means to use VBIOS default delay which is 0 ( 0ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
-ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0.
-
ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate.
**********************************************************************************************************************/
-// this IntegrateSystemInfoTable is used for Kaveri & Kabini APU
-typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8
-{
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulBootUpEngineClock;
- ULONG ulDentistVCOFreq;
- ULONG ulBootUpUMAClock;
- ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
- ULONG ulBootUpReqDisplayVector;
- ULONG ulVBIOSMisc;
- ULONG ulGPUCapInfo;
- ULONG ulDISP_CLK2Freq;
- USHORT usRequestedPWMFreqInHz;
- UCHAR ucHtcTmpLmt;
- UCHAR ucHtcHystLmt;
- ULONG ulReserved2;
- ULONG ulSystemConfig;
- ULONG ulCPUCapInfo;
- ULONG ulReserved3;
- USHORT usGPUReservedSysMemSize;
- USHORT usExtDispConnInfoOffset;
- USHORT usPanelRefreshRateRange;
- UCHAR ucMemoryType;
- UCHAR ucUMAChannelNumber;
- UCHAR strVBIOSMsg[40];
- ATOM_TDP_CONFIG asTdpConfig;
- ULONG ulReserved[19];
- ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
- ULONG ulGMCRestoreResetTime;
- ULONG ulReserved4;
- ULONG ulIdleNClk;
- ULONG ulDDR_DLL_PowerUpTime;
- ULONG ulDDR_PLL_PowerUpTime;
- USHORT usPCIEClkSSPercentage;
- USHORT usPCIEClkSSType;
- USHORT usLvdsSSPercentage;
- USHORT usLvdsSSpreadRateIn10Hz;
- USHORT usHDMISSPercentage;
- USHORT usHDMISSpreadRateIn10Hz;
- USHORT usDVISSPercentage;
- USHORT usDVISSpreadRateIn10Hz;
- ULONG ulGPUReservedSysMemBaseAddrLo;
- ULONG ulGPUReservedSysMemBaseAddrHi;
- ULONG ulReserved5[3];
- USHORT usMaxLVDSPclkFreqInSingleLink;
- UCHAR ucLvdsMisc;
- UCHAR ucTravisLVDSVolAdjust;
- UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
- UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
- UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
- UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
- UCHAR ucLVDSOffToOnDelay_in4Ms;
- UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
- UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
- UCHAR ucMinAllowedBL_Level;
- ULONG ulLCDBitDepthControlVal;
- ULONG ulNbpStateMemclkFreq[4];
- ULONG ulReserved6;
- ULONG ulNbpStateNClkFreq[4];
- USHORT usNBPStateVoltage[4];
- USHORT usBootUpNBVoltage;
- USHORT usReserved2;
- ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
-}ATOM_INTEGRATED_SYSTEM_INFO_V1_8;
-
-/**********************************************************************************************************************
- ATOM_INTEGRATED_SYSTEM_INFO_V1_8 Description
-ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
-ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
-ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
-sDISPCLK_Voltage: Report Display clock frequency requirement on GNB voltage(up to 4 voltage levels).
-
-ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects:
- ATOM_DEVICE_CRT1_SUPPORT 0x0001
- ATOM_DEVICE_DFP1_SUPPORT 0x0008
- ATOM_DEVICE_DFP6_SUPPORT 0x0040
- ATOM_DEVICE_DFP2_SUPPORT 0x0080
- ATOM_DEVICE_DFP3_SUPPORT 0x0200
- ATOM_DEVICE_DFP4_SUPPORT 0x0400
- ATOM_DEVICE_DFP5_SUPPORT 0x0800
- ATOM_DEVICE_LCD1_SUPPORT 0x0002
-
-ulVBIOSMisc: Miscellenous flags for VBIOS requirement and interface
- bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
- =1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
- bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
- =1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
- bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
- =1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
- bit[3]=0: VBIOS fast boot is disable
- =1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
-
-ulGPUCapInfo: bit[0~2]= Reserved
- bit[3]=0: Enable AUX HW mode detection logic
- =1: Disable AUX HW mode detection logic
- bit[4]=0: Disable DFS bypass feature
- =1: Enable DFS bypass feature
-
-usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
- Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
-
- When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
- 1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
- VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
- Changing BL using VBIOS function is functional in both driver and non-driver present environment;
- and enabling VariBri under the driver environment from PP table is optional.
-
- 2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
- that BL control from GPU is expected.
- VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
- Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
- it's per platform
- and enabling VariBri under the driver environment from PP table is optional.
-
-ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt. Threshold on value to enter HTC_active state.
-ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
- To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
-
-ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
- =1: PCIE Power Gating Enabled
- Bit[1]=0: DDR-DLL shut-down feature disabled.
- 1: DDR-DLL shut-down feature enabled.
- Bit[2]=0: DDR-PLL Power down feature disabled.
- 1: DDR-PLL Power down feature enabled.
- Bit[3]=0: GNB DPM is disabled
- =1: GNB DPM is enabled
-ulCPUCapInfo: TBD
-
-usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
-usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
- to indicate a range.
- SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
- SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
- SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
- SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
-
-ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3;=5:GDDR5; [7:4] is reserved.
-ucUMAChannelNumber: System memory channel numbers.
-
-strVBIOSMsg[40]: VBIOS boot up customized message string
-
-sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
-
-ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
-ulIdleNClk: NCLK speed while memory runs in self-refresh state, used to calculate self-refresh latency. Unit in 10kHz.
-ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
-ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
-
-usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
-usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
-usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
-usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
-usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
-usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
-usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
-usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
-
-usGPUReservedSysMemSize: Reserved system memory size for ACP engine in APU GNB, units in MB. 0/2/4MB based on CMOS options, current default could be 0MB. KV only, not on KB.
-ulGPUReservedSysMemBaseAddrLo: Low 32 bits base address to the reserved system memory.
-ulGPUReservedSysMemBaseAddrHi: High 32 bits base address to the reserved system memory.
-
-usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
-ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
- [bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
- [bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
- [bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
- [bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
- [bit5] Travid LVDS output voltage override enable, when =1, use ucTravisLVDSVolAdjust value to overwrite Traivs register LVDS_CTRL_4
-ucTravisLVDSVolAdjust When ucLVDSMisc[5]=1,it means platform SBIOS want to overwrite TravisLVDSVoltage. Then VBIOS will use ucTravisLVDSVolAdjust
- value to program Travis register LVDS_CTRL_4
-ucLVDSPwrOnSeqDIGONtoDE_in4Ms:
- LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
- =0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
- This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
-ucLVDSPwrOnDEtoVARY_BL_in4Ms:
- LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
- =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
- This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
-ucLVDSPwrOffVARY_BLtoDE_in4Ms:
- LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
- =0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
- This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
-ucLVDSPwrOffDEtoDIGON_in4Ms:
- LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
- =0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
- This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
-ucLVDSOffToOnDelay_in4Ms:
- LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
- =0 means to use VBIOS default delay which is 125 ( 500ms ).
- This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
-ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms:
- LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
- =0 means to use VBIOS default delay which is 0 ( 0ms ).
- This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
-
-ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms:
- LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
- =0 means to use VBIOS default delay which is 0 ( 0ms ).
- This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
-ucMinAllowedBL_Level: Lowest LCD backlight PWM level. This is customer platform specific parameters. By default it is 0.
-
-ulLCDBitDepthControlVal: GPU display control encoder bit dither control setting, used to program register mmFMT_BIT_DEPTH_CONTROL
-
-ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB P-State(P0, P1, P2 & P3).
-ulNbpStateNClkFreq[4]: NB P-State NClk frequency in different NB P-State
-usNBPStateVoltage[4]: NB P-State (P0/P1 & P2/P3) voltage; NBP3 refers to lowes voltage
-usBootUpNBVoltage: NB P-State voltage during boot up before driver loaded
-sExtDispConnInfo: Display connector information table provided to VBIOS
-
-**********************************************************************************************************************/
-
-// this Table is used for Kaveri/Kabini APU
-typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
-{
- ATOM_INTEGRATED_SYSTEM_INFO_V1_8 sIntegratedSysInfo; // refer to ATOM_INTEGRATED_SYSTEM_INFO_V1_8 definition
- ULONG ulPowerplayTable[128]; // Update comments here to link new powerplay table definition structure
-}ATOM_FUSION_SYSTEM_INFO_V2;
-
-
/**************************************************************************/
// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
//Memory SS Info Table
@@ -5479,24 +5025,22 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT
//Define ucClockIndication, SW uses the IDs below to search if the SS is required/enabled on a clock branch/signal type.
//SS is not required or enabled if a match is not found.
-#define ASIC_INTERNAL_MEMORY_SS 1
-#define ASIC_INTERNAL_ENGINE_SS 2
-#define ASIC_INTERNAL_UVD_SS 3
-#define ASIC_INTERNAL_SS_ON_TMDS 4
-#define ASIC_INTERNAL_SS_ON_HDMI 5
-#define ASIC_INTERNAL_SS_ON_LVDS 6
-#define ASIC_INTERNAL_SS_ON_DP 7
-#define ASIC_INTERNAL_SS_ON_DCPLL 8
-#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
-#define ASIC_INTERNAL_VCE_SS 10
-#define ASIC_INTERNAL_GPUPLL_SS 11
-
+#define ASIC_INTERNAL_MEMORY_SS 1
+#define ASIC_INTERNAL_ENGINE_SS 2
+#define ASIC_INTERNAL_UVD_SS 3
+#define ASIC_INTERNAL_SS_ON_TMDS 4
+#define ASIC_INTERNAL_SS_ON_HDMI 5
+#define ASIC_INTERNAL_SS_ON_LVDS 6
+#define ASIC_INTERNAL_SS_ON_DP 7
+#define ASIC_INTERNAL_SS_ON_DCPLL 8
+#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
+#define ASIC_INTERNAL_VCE_SS 10
typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
{
ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
//For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
- USHORT usSpreadSpectrumPercentage; //in unit of 0.01% or 0.001%, decided by ucSpreadSpectrumMode bit4
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
UCHAR ucClockIndication; //Indicate which clock source needs SS
UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
@@ -5534,11 +5078,6 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
UCHAR ucReserved[2];
}ATOM_ASIC_SS_ASSIGNMENT_V3;
-//ATOM_ASIC_SS_ASSIGNMENT_V3.ucSpreadSpectrumMode
-#define SS_MODE_V3_CENTRE_SPREAD_MASK 0x01
-#define SS_MODE_V3_EXTERNAL_SS_MASK 0x02
-#define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10
-
typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
{
ATOM_COMMON_TABLE_HEADER sHeader;
@@ -5907,8 +5446,6 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define ATOM_S7_DOS_MODE_PIXEL_DEPTHb0 0x0C
#define ATOM_S7_DOS_MODE_PIXEL_FORMATb0 0xF0
#define ATOM_S7_DOS_8BIT_DAC_ENb1 0x01
-#define ATOM_S7_ASIC_INIT_COMPLETEb1 0x02
-#define ATOM_S7_ASIC_INIT_COMPLETE_MASK 0x00000200
#define ATOM_S7_DOS_MODE_NUMBERw1 0x0FFFF
#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
@@ -6181,7 +5718,6 @@ typedef struct _INDIRECT_IO_ACCESS
#define INDIRECT_IO_PCIE 3
#define INDIRECT_IO_PCIEP 4
#define INDIRECT_IO_NBMISC 5
-#define INDIRECT_IO_SMU 5
#define INDIRECT_IO_PLL_READ INDIRECT_IO_PLL | INDIRECT_READ
#define INDIRECT_IO_PLL_WRITE INDIRECT_IO_PLL | INDIRECT_WRITE
@@ -6193,8 +5729,6 @@ typedef struct _INDIRECT_IO_ACCESS
#define INDIRECT_IO_PCIEP_WRITE INDIRECT_IO_PCIEP | INDIRECT_WRITE
#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
-#define INDIRECT_IO_SMU_READ INDIRECT_IO_SMU | INDIRECT_READ
-#define INDIRECT_IO_SMU_WRITE INDIRECT_IO_SMU | INDIRECT_WRITE
typedef struct _ATOM_OEM_INFO
{
@@ -6340,10 +5874,8 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
#define _64Mx32 0x43
#define _128Mx8 0x51
#define _128Mx16 0x52
-#define _128Mx32 0x53
#define _256Mx8 0x61
#define _256Mx16 0x62
-#define _512Mx8 0x71
#define SAMSUNG 0x1
#define INFINEON 0x2
@@ -6360,8 +5892,6 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE
#define PROMOS MOSEL
#define KRETON INFINEON
#define ELIXIR NANYA
-#define MEZZA ELPIDA
-
/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
@@ -7089,16 +6619,11 @@ typedef struct _ATOM_DISP_OUT_INFO_V3
UCHAR ucMaxDispEngineNum;
UCHAR ucMaxActiveDispEngineNum;
UCHAR ucMaxPPLLNum;
- UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
- UCHAR ucDispCaps;
- UCHAR ucReserved[2];
- ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
+ UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
+ UCHAR ucReserved[3];
+ ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
}ATOM_DISP_OUT_INFO_V3;
-//ucDispCaps
-#define DISPLAY_CAPS__DP_PCLK_FROM_PPLL 0x01
-#define DISPLAY_CAPS__FORCE_DISPDEV_CONNECTED 0x02
-
typedef enum CORE_REF_CLK_SOURCE{
CLOCK_SRC_XTALIN=0,
CLOCK_SRC_XO_IN=1,
@@ -7303,17 +6828,6 @@ typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{
USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
}DIG_TRANSMITTER_INFO_HEADER_V3_1;
-typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_2{
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
- USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
- USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
- USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
- USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
- USHORT usDPSSRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy SS Pll register Info
- USHORT usDPSSSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy SS Pll Settings
-}DIG_TRANSMITTER_INFO_HEADER_V3_2;
-
typedef struct _CLOCK_CONDITION_REGESTER_INFO{
USHORT usRegisterIndex;
UCHAR ucStartBit;
@@ -7337,24 +6851,12 @@ typedef struct _PHY_CONDITION_REG_VAL{
ULONG ulRegVal;
}PHY_CONDITION_REG_VAL;
-typedef struct _PHY_CONDITION_REG_VAL_V2{
- ULONG ulCondition;
- UCHAR ucCondition2;
- ULONG ulRegVal;
-}PHY_CONDITION_REG_VAL_V2;
-
typedef struct _PHY_CONDITION_REG_INFO{
USHORT usRegIndex;
USHORT usSize;
PHY_CONDITION_REG_VAL asRegVal[1];
}PHY_CONDITION_REG_INFO;
-typedef struct _PHY_CONDITION_REG_INFO_V2{
- USHORT usRegIndex;
- USHORT usSize;
- PHY_CONDITION_REG_VAL_V2 asRegVal[1];
-}PHY_CONDITION_REG_INFO_V2;
-
typedef struct _PHY_ANALOG_SETTING_INFO{
UCHAR ucEncodeMode;
UCHAR ucPhySel;
@@ -7362,25 +6864,6 @@ typedef struct _PHY_ANALOG_SETTING_INFO{
PHY_CONDITION_REG_INFO asAnalogSetting[1];
}PHY_ANALOG_SETTING_INFO;
-typedef struct _PHY_ANALOG_SETTING_INFO_V2{
- UCHAR ucEncodeMode;
- UCHAR ucPhySel;
- USHORT usSize;
- PHY_CONDITION_REG_INFO_V2 asAnalogSetting[1];
-}PHY_ANALOG_SETTING_INFO_V2;
-
-typedef struct _GFX_HAVESTING_PARAMETERS {
- UCHAR ucGfxBlkId; //GFX blk id to be harvested, like CU, RB or PRIM
- UCHAR ucReserved; //reserved
- UCHAR ucActiveUnitNumPerSH; //requested active CU/RB/PRIM number per shader array
- UCHAR ucMaxUnitNumPerSH; //max CU/RB/PRIM number per shader array
-} GFX_HAVESTING_PARAMETERS;
-
-//ucGfxBlkId
-#define GFX_HARVESTING_CU_ID 0
-#define GFX_HARVESTING_RB_ID 1
-#define GFX_HARVESTING_PRIM_ID 2
-
/****************************************************************************/
//Portion VI: Definitinos for vbios MC scratch registers that driver used
/****************************************************************************/
@@ -7391,17 +6874,8 @@ typedef struct _GFX_HAVESTING_PARAMETERS {
#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000
#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000
#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000
-#define MC_MISC0__MEMORY_TYPE__HBM 0x60000000
#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000
-#define ATOM_MEM_TYPE_DDR_STRING "DDR"
-#define ATOM_MEM_TYPE_DDR2_STRING "DDR2"
-#define ATOM_MEM_TYPE_GDDR3_STRING "GDDR3"
-#define ATOM_MEM_TYPE_GDDR4_STRING "GDDR4"
-#define ATOM_MEM_TYPE_GDDR5_STRING "GDDR5"
-#define ATOM_MEM_TYPE_HBM_STRING "HBM"
-#define ATOM_MEM_TYPE_DDR3_STRING "DDR3"
-
/****************************************************************************/
//Portion VI: Definitinos being oboselete
/****************************************************************************/
@@ -7764,6 +7238,565 @@ typedef struct _ATOM_POWERPLAY_INFO_V3
ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
}ATOM_POWERPLAY_INFO_V3;
+/* New PPlib */
+/**************************************************************************/
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+ UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
+ UCHAR ucI2cLine; // as interpreted by DAL I2C
+ UCHAR ucI2cAddress;
+ UCHAR ucFanParameters; // Fan Control Parameters.
+ UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
+ UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
+ UCHAR ucReserved; // ----
+ UCHAR ucFlags; // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE 0
+#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64 5
+#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
+#define ATOM_PP_THERMALCONTROLLER_RV770 8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
+#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
+#define ATOM_PP_THERMALCONTROLLER_LM96163 17
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
+
+typedef struct _ATOM_PPLIB_STATE
+{
+ UCHAR ucNonClockStateIndex;
+ UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
+ UCHAR ucTHyst; // Temperature hysteresis. Integer.
+ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+ USHORT usTMed; // The middle temperature where we change slopes.
+ USHORT usTHigh; // The high point above TMed for adjusting the second slope.
+ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
+ USHORT usPWMMed; // The PWM value (in percent) at TMed.
+ USHORT usPWMHigh; // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_FANTABLE2
+{
+ ATOM_PPLIB_FANTABLE basicTable;
+ USHORT usTMax; // The max temperature
+} ATOM_PPLIB_FANTABLE2;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+ USHORT usSize;
+ ULONG ulMaxEngineClock; // For Overdrive.
+ ULONG ulMaxMemoryClock; // For Overdrive.
+ // Add extra system parameters here, always adjust size to include all fields.
+ USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
+ USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
+} ATOM_PPLIB_EXTENDEDHEADER;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
+
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ UCHAR ucDataRevision;
+
+ UCHAR ucNumStates;
+ UCHAR ucStateEntrySize;
+ UCHAR ucClockInfoSize;
+ UCHAR ucNonClockSize;
+
+ // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+ USHORT usStateArrayOffset;
+
+ // offset from start of this table to array of ASIC-specific structures,
+ // currently ATOM_PPLIB_CLOCK_INFO.
+ USHORT usClockInfoArrayOffset;
+
+ // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+ USHORT usNonClockInfoArrayOffset;
+
+ USHORT usBackbiasTime; // in microseconds
+ USHORT usVoltageTime; // in microseconds
+ USHORT usTableSize; //the size of this structure, or the extended structure
+
+ ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
+
+ ATOM_PPLIB_THERMALCONTROLLER sThermalController;
+
+ USHORT usBootClockInfoOffset;
+ USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+ ATOM_PPLIB_POWERPLAYTABLE basicTable;
+ UCHAR ucNumCustomThermalPolicy;
+ USHORT usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+ USHORT usFormatID; // To be used ONLY by PPGen.
+ USHORT usFanTableOffset;
+ USHORT usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+ ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+ ULONG ulGoldenPPID; // PPGen use only
+ ULONG ulGoldenRevision; // PPGen use only
+ USHORT usVddcDependencyOnSCLKOffset;
+ USHORT usVddciDependencyOnMCLKOffset;
+ USHORT usVddcDependencyOnMCLKOffset;
+ USHORT usMaxClockVoltageOnDCOffset;
+ USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
+ USHORT usReserved;
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+ ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+ ULONG ulTDPLimit;
+ ULONG ulNearTDPLimit;
+ ULONG ulSQRampingThreshold;
+ USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
+ ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
+ USHORT usTDPODLimit;
+ USHORT usLoadLineSlope; // in milliOhms * 100
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
+#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
+
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
+
+//M3 Arb //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK 0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT 17
+
+#define ATOM_PPLIB_ENABLE_DRR 0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+ USHORT usClassification;
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ ULONG ulCapsAndSettings;
+ UCHAR ucRequiredPower;
+ USHORT usClassification2;
+ ULONG ulVCLK;
+ ULONG ulDCLK;
+ UCHAR ucUnused[5];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usUnused1;
+ USHORT usUnused2;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ USHORT usUnused;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ UCHAR ucPCIEGen;
+ UCHAR ucUnused1;
+
+ ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
+
+} ATOM_PPLIB_SI_CLOCK_INFO;
+
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+ USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
+ UCHAR ucLowEngineClockHigh;
+ USHORT usHighEngineClockLow; // High Engine clock in MHz.
+ UCHAR ucHighEngineClockHigh;
+ USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+ UCHAR ucMemoryClockHigh; // Currentyl unused.
+ UCHAR ucPadding; // For proper alignment and size.
+ USHORT usVDDC; // For the 780, use: None, Low, High, Variable
+ UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
+ USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+ ULONG ulFlags;
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+ USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
+ UCHAR ucEngineClockHigh; //clockfrequency >> 16.
+ UCHAR vddcIndex; //2-bit vddc index;
+ USHORT tdpLimit;
+ //please initalize to 0
+ USHORT rsv1;
+ //please initialize to 0s
+ ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+ //number of valid dpm levels in this state; Driver uses it to calculate the whole
+ //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ UCHAR ucNumDPMLevels;
+
+ //a index to the array of nonClockInfos
+ UCHAR nonClockInfoIndex;
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+ UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct _StateArray{
+ //how many states we have
+ UCHAR ucNumEntries;
+
+ ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct _ClockInfoArray{
+ //how many clock levels we have
+ UCHAR ucNumEntries;
+
+ //sizeof(ATOM_PPLIB_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ UCHAR clockInfo[1];
+}ClockInfoArray;
+
+typedef struct _NonClockInfoArray{
+
+ //how many non-clock levels we have. normally should be same as number of states
+ UCHAR ucNumEntries;
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+ USHORT usClockLow;
+ UCHAR ucClockHigh;
+ USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+ USHORT usVddc;
+ USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Record
+{
+ USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations
+ ULONG ulLeakageValue;
+}ATOM_PPLIB_CAC_Leakage_Record;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_CAC_Leakage_Table;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+{
+ USHORT usVoltage;
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+}ATOM_PPLIB_PhaseSheddingLimits_Record;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_PhaseSheddingLimits_Table;
+
+typedef struct _VCEClockInfo{
+ USHORT usEVClkLow;
+ UCHAR ucEVClkHigh;
+ USHORT usECClkLow;
+ UCHAR ucECClkHigh;
+}VCEClockInfo;
+
+typedef struct _VCEClockInfoArray{
+ UCHAR ucNumEntries;
+ VCEClockInfo entries[1];
+}VCEClockInfoArray;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ UCHAR ucVCEClockInfoIndex;
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_VCE_State_Record
+{
+ UCHAR ucVCEClockInfoIndex;
+ UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_VCE_State_Record;
+
+typedef struct _ATOM_PPLIB_VCE_State_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_State_Record entries[1];
+}ATOM_PPLIB_VCE_State_Table;
+
+
+typedef struct _ATOM_PPLIB_VCE_Table
+{
+ UCHAR revid;
+// VCEClockInfoArray array;
+// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
+// ATOM_PPLIB_VCE_State_Table states;
+}ATOM_PPLIB_VCE_Table;
+
+
+typedef struct _UVDClockInfo{
+ USHORT usVClkLow;
+ UCHAR ucVClkHigh;
+ USHORT usDClkLow;
+ UCHAR ucDClkHigh;
+}UVDClockInfo;
+
+typedef struct _UVDClockInfoArray{
+ UCHAR ucNumEntries;
+ UVDClockInfo entries[1];
+}UVDClockInfoArray;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ UCHAR ucUVDClockInfoIndex;
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_UVD_State_Record
+{
+ UCHAR ucUVDClockInfoIndex;
+ UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_UVD_State_Record;
+
+typedef struct _ATOM_PPLIB_UVD_State_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_UVD_State_Record entries[1];
+}ATOM_PPLIB_UVD_State_Table;
+
+
+typedef struct _ATOM_PPLIB_UVD_Table
+{
+ UCHAR revid;
+// UVDClockInfoArray array;
+// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
+// ATOM_PPLIB_UVD_State_Table states;
+}ATOM_PPLIB_UVD_Table;
+
+/**************************************************************************/
+
// Following definitions are for compatibility issue in different SW components.
#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
@@ -7944,8 +7977,8 @@ typedef struct {
typedef struct {
AMD_ACPI_DESCRIPTION_HEADER SHeader;
UCHAR TableUUID[16]; //0x24
- ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure.
- ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure.
+ ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
+ ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
ULONG Reserved[4]; //0x3C
}UEFI_ACPI_VFCT;
@@ -7976,6 +8009,3 @@ typedef struct {
#endif /* _ATOMBIOS_H */
-
-#include "pptable.h"
-
diff --git a/sys/dev/pci/drm/radeon/atombios_crtc.c b/sys/dev/pci/drm/radeon/atombios_crtc.c
index 8f3826560ba..ae788fa7369 100644
--- a/sys/dev/pci/drm/radeon/atombios_crtc.c
+++ b/sys/dev/pci/drm/radeon/atombios_crtc.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: atombios_crtc.c,v 1.9 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -209,16 +210,6 @@ static void atombios_enable_crtc_memreq(struct drm_crtc *crtc, int state)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-static const u32 vga_control_regs[6] =
-{
- AVIVO_D1VGA_CONTROL,
- AVIVO_D2VGA_CONTROL,
- EVERGREEN_D3VGA_CONTROL,
- EVERGREEN_D4VGA_CONTROL,
- EVERGREEN_D5VGA_CONTROL,
- EVERGREEN_D6VGA_CONTROL,
-};
-
static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -226,23 +217,13 @@ static void atombios_blank_crtc(struct drm_crtc *crtc, int state)
struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
BLANK_CRTC_PS_ALLOCATION args;
- u32 vga_control = 0;
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE8(rdev)) {
- vga_control = RREG32(vga_control_regs[radeon_crtc->crtc_id]);
- WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control | 1);
- }
-
args.ucCRTC = radeon_crtc->crtc_id;
args.ucBlanking = state;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- if (ASIC_IS_DCE8(rdev)) {
- WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control);
- }
}
static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
@@ -270,13 +251,13 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
radeon_crtc->enabled = true;
+ /* adjust pm to dpms changes BEFORE enabling crtcs */
+ radeon_pm_compute_clocks(rdev);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
- /* Make sure vblank interrupt is still enabled if needed */
- radeon_irq_set(rdev);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
@@ -289,10 +270,10 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
+ /* adjust pm to dpms changes AFTER disabling crtcs */
+ radeon_pm_compute_clocks(rdev);
break;
}
- /* adjust pm to dpms */
- radeon_pm_compute_clocks(rdev);
}
static void
@@ -332,10 +313,8 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- misc |= ATOM_DOUBLE_CLOCK_MODE;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
@@ -378,10 +357,8 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
- if (mode->flags & DRM_MODE_FLAG_DBLCLK)
- misc |= ATOM_DOUBLE_CLOCK_MODE;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
+ misc |= ATOM_DOUBLE_CLOCK_MODE;
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
@@ -447,17 +424,7 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
- if (enable) {
- /* Don't mess with SS if percentage is 0 or external ss.
- * SS is already disabled previously, and disabling it
- * again can cause display problems if the pll is already
- * programmed.
- */
- if (ss->percentage == 0)
- return;
- if (ss->type & ATOM_EXTERNAL_SS_MASK)
- return;
- } else {
+ if (!enable) {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
rdev->mode_info.crtcs[i]->enabled &&
@@ -493,6 +460,8 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable;
+ if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
+ args.v3.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v2.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
@@ -512,6 +481,8 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v2.ucEnable = enable;
+ if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
+ args.v2.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE3(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
args.v1.ucSpreadSpectrumType = ss->type & ATOM_SS_CENTRE_SPREAD_MODE_MASK;
@@ -533,7 +504,8 @@ static void atombios_crtc_program_ss(struct radeon_device *rdev,
args.lvds_ss_2.ucSpreadSpectrumRange = ss->range;
args.lvds_ss_2.ucEnable = enable;
} else {
- if (enable == ATOM_DISABLE) {
+ if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
+ (ss->type & ATOM_EXTERNAL_SS_MASK)) {
atombios_disable_ss(rdev, pll_id);
return;
}
@@ -563,8 +535,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
u32 adjusted_clock = mode->clock;
int encoder_mode = atombios_get_encoder_mode(encoder);
u32 dp_clock = mode->clock;
- u32 clock = mode->clock;
- int bpc = radeon_crtc->bpc;
+ int bpc = radeon_get_monitor_bpc(connector);
bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
/* reset the pll flags */
@@ -585,7 +556,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (rdev->family < CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
- if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
/* use frac fb div on RS780/RS880 */
if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
@@ -612,13 +583,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
}
- if (radeon_encoder->is_mst_encoder) {
- struct radeon_encoder_mst *mst_enc = radeon_encoder->enc_priv;
- struct radeon_connector_atom_dig *dig_connector = mst_enc->connector->con_priv;
-
- dp_clock = dig_connector->dp_clock;
- }
-
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (radeon_crtc->ss_enabled) {
@@ -646,24 +610,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
}
- /* adjust pll for deep color modes */
- if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
- switch (bpc) {
- case 8:
- default:
- break;
- case 10:
- clock = (clock * 5) / 4;
- break;
- case 12:
- clock = (clock * 3) / 2;
- break;
- case 16:
- clock = clock * 2;
- break;
- }
- }
-
/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
* accordingly based on the encoder/transmitter to work around
* special hw requirements.
@@ -685,7 +631,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
switch (crev) {
case 1:
case 2:
- args.v1.usPixelClock = cpu_to_le16(clock / 10);
+ args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
@@ -697,7 +643,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
case 3:
- args.v3.sInput.usPixelClock = cpu_to_le16(clock / 10);
+ args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
args.v3.sInput.ucEncodeMode = encoder_mode;
args.v3.sInput.ucDispPllConfig = 0;
@@ -711,6 +657,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
+ /* deep color support */
+ args.v3.sInput.usPixelClock =
+ cpu_to_le16((mode->clock * bpc / 8) / 10);
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
@@ -794,7 +744,7 @@ static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
* SetPixelClock provides the dividers
*/
args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
- if (ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
+ if (ASIC_IS_DCE61(rdev))
args.v6.ucPpll = ATOM_EXT_PLL1;
else if (ASIC_IS_DCE6(rdev))
args.v6.ucPpll = ATOM_PPLL0;
@@ -897,11 +847,6 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_24BPP;
break;
case 10:
- /* yes this is correct, the atom define is wrong */
- args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
- break;
- case 12:
- /* yes this is correct, the atom define is wrong */
args.v5.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
break;
}
@@ -926,10 +871,10 @@ static void atombios_crtc_program_pll(struct drm_crtc *crtc,
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_24BPP;
break;
case 10:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP;
break;
case 12:
- args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
+ args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP;
break;
case 16:
args.v6.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
@@ -965,9 +910,7 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
radeon_crtc->bpc = 8;
radeon_crtc->ss_enabled = false;
- if (radeon_encoder->is_mst_encoder) {
- radeon_dp_mst_prepare_pll(crtc, mode);
- } else if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
+ if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector =
@@ -977,9 +920,6 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
-
- /* Assign mode clock for hdmi deep color max clock limit check */
- radeon_connector->pixelclock_for_modeset = mode->clock;
radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
switch (encoder_mode) {
@@ -1061,17 +1001,10 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
u32 pll_clock = mode->clock;
- u32 clock = mode->clock;
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
- /* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */
- if (ASIC_IS_DCE5(rdev) &&
- (encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
- (radeon_crtc->bpc > 8))
- clock = radeon_crtc->adjusted_clock;
-
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
@@ -1106,7 +1039,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
radeon_crtc->crtc_id, &radeon_crtc->ss);
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
- encoder_mode, radeon_encoder->encoder_id, clock,
+ encoder_mode, radeon_encoder->encoder_id, mode->clock,
ref_div, fb_div, frac_fb_div, post_div,
radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
@@ -1114,17 +1047,15 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
/* calculate ss amount and step size */
if (ASIC_IS_DCE4(rdev)) {
u32 step_size;
- u32 amount = (((fb_div * 10) + frac_fb_div) *
- (u32)radeon_crtc->ss.percentage) /
- (100 * (u32)radeon_crtc->ss.percentage_divider);
+ u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
- step_size = (4 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
+ step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
else
- step_size = (2 * amount * ref_div * ((u32)radeon_crtc->ss.rate * 2048)) /
+ step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
radeon_crtc->ss.step = step_size;
}
@@ -1151,7 +1082,6 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
u32 tmp, viewport_w, viewport_h;
int r;
- bool bypass_lut = false;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1190,165 +1120,71 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (target_fb->pixel_format) {
- case DRM_FORMAT_C8:
+ switch (target_fb->bits_per_pixel) {
+ case 8:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
break;
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_ARGB4444:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
-#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
+ case 15:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
-#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_BGRA5551:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
-#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
-#endif
break;
- case DRM_FORMAT_RGB565:
+ case 16:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
#endif
break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
+ case 24:
+ case 32:
fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
#ifdef __BIG_ENDIAN
fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
#endif
break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
-#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_BGRA1010102:
- fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
- EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
-#ifdef __BIG_ENDIAN
- fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ DRM_ERROR("Unsupported screen depth %d\n",
+ target_fb->bits_per_pixel);
return -EINVAL;
}
if (tiling_flags & RADEON_TILING_MACRO) {
- evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
-
- /* Set NUM_BANKS. */
- if (rdev->family >= CHIP_TAHITI) {
- unsigned index, num_banks;
-
- if (rdev->family >= CHIP_BONAIRE) {
- unsigned tileb, tile_split_bytes;
-
- /* Calculate the macrotile mode index. */
- tile_split_bytes = 64 << tile_split;
- tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
- tileb = min(tile_split_bytes, tileb);
-
- for (index = 0; tileb > 64; index++)
- tileb >>= 1;
-
- if (index >= 16) {
- DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
- target_fb->bits_per_pixel, tile_split);
- return -EINVAL;
- }
-
- num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
- } else {
- switch (target_fb->bits_per_pixel) {
- case 8:
- index = 10;
- break;
- case 16:
- index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP;
- break;
- default:
- case 32:
- index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP;
- break;
- }
-
- num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
- }
-
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
- } else {
- /* NI and older. */
- if (rdev->family >= CHIP_CAYMAN)
- tmp = rdev->config.cayman.tile_config;
- else
- tmp = rdev->config.evergreen.tile_config;
+ if (rdev->family >= CHIP_TAHITI)
+ tmp = rdev->config.si.tile_config;
+ else if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
- switch ((tmp & 0xf0) >> 4) {
- case 0: /* 4 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
- break;
- case 1: /* 8 banks */
- default:
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
- break;
- case 2: /* 16 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
- break;
- }
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
}
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+
+ evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
- if (rdev->family >= CHIP_BONAIRE) {
- /* XXX need to know more about the surface tiling mode */
- fb_format |= CIK_GRPH_MICRO_TILE_MODE(CIK_DISPLAY_MICRO_TILING);
- }
} else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
- if (rdev->family >= CHIP_BONAIRE) {
- /* Read the pipe config from the 2D TILED SCANOUT mode.
- * It should be the same for the other modes too, but not all
- * modes set the pipe config field. */
- u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
-
- fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
- } else if ((rdev->family == CHIP_TAHITI) ||
- (rdev->family == CHIP_PITCAIRN))
+ if ((rdev->family == CHIP_TAHITI) ||
+ (rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
- else if ((rdev->family == CHIP_VERDE) ||
- (rdev->family == CHIP_OLAND) ||
- (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
+ else if (rdev->family == CHIP_VERDE)
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
switch (radeon_crtc->crtc_id) {
@@ -1385,18 +1221,6 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
- /*
- * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
- * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
- * retain the full precision throughout the pipeline.
- */
- WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + radeon_crtc->crtc_offset,
- (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
- ~EVERGREEN_LUT_10BIT_BYPASS_EN);
-
- if (bypass_lut)
- DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
-
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
@@ -1408,21 +1232,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
- if (rdev->family >= CHIP_BONAIRE)
- WREG32(CIK_LB_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
- target_fb->height);
- else
- WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
- target_fb->height);
+ WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+ target_fb->height);
x &= ~3;
y &= ~1;
WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
(x << 16) | y);
viewport_w = crtc->mode.hdisplay;
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
- if ((rdev->family >= CHIP_BONAIRE) &&
- (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
- viewport_h *= 2;
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
(viewport_w << 16) | viewport_h);
@@ -1432,8 +1249,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
@@ -1467,7 +1284,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
u32 tmp, viewport_w, viewport_h;
int r;
- bool bypass_lut = false;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1505,30 +1321,18 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
radeon_bo_unreserve(rbo);
- switch (target_fb->pixel_format) {
- case DRM_FORMAT_C8:
+ switch (target_fb->bits_per_pixel) {
+ case 8:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
break;
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_ARGB4444:
- fb_format =
- AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
- AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444;
-#ifdef __BIG_ENDIAN
- fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
-#endif
- break;
- case DRM_FORMAT_XRGB1555:
+ case 15:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
-#ifdef __BIG_ENDIAN
- fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
-#endif
break;
- case DRM_FORMAT_RGB565:
+ case 16:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
@@ -1536,8 +1340,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
#endif
break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
+ case 24:
+ case 32:
fb_format =
AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
@@ -1545,20 +1349,9 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
#endif
break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- fb_format =
- AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
- AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010;
-#ifdef __BIG_ENDIAN
- fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->pixel_format));
+ DRM_ERROR("Unsupported screen depth %d\n",
+ target_fb->bits_per_pixel);
return -EINVAL;
}
@@ -1597,13 +1390,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
if (rdev->family >= CHIP_R600)
WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
- /* LUT only has 256 slots for 8 bpc fb. Bypass for > 8 bpc scanout for precision */
- WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset,
- (bypass_lut ? AVIVO_LUT_10BIT_BYPASS_EN : 0), ~AVIVO_LUT_10BIT_BYPASS_EN);
-
- if (bypass_lut)
- DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
-
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
@@ -1632,8 +1418,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
- /* set pageflip to happen only at start of vblank interval (front porch) */
- WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
+ /* set pageflip to happen anywhere in vblank interval */
+ WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
radeon_fb = to_radeon_framebuffer(fb);
@@ -1741,7 +1527,6 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
@@ -1751,10 +1536,6 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
- /* PPLL2 is exclusive to UNIPHYA on DCE61 */
- if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
- test_radeon_crtc->pll_id == ATOM_PPLL2)
- continue;
/* for DP use the same PLL for all */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id;
@@ -1776,7 +1557,6 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
u32 adjusted_clock, test_adjusted_clock;
@@ -1792,10 +1572,6 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
- /* PPLL2 is exclusive to UNIPHYA on DCE61 */
- if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
- test_radeon_crtc->pll_id == ATOM_PPLL2)
- continue;
/* check if we are already driving this connector with another crtc */
if (test_radeon_crtc->connector == radeon_crtc->connector) {
/* if we are, return that pll */
@@ -1829,12 +1605,6 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
*
* Asic specific PLL information
*
- * DCE 8.x
- * KB/KV
- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
- * CI
- * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
- *
* DCE 6.1
* - PPLL2 is only available to UNIPHYA (both DP and non-DP)
* - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
@@ -1861,47 +1631,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
u32 pll_in_use;
int pll;
- if (ASIC_IS_DCE8(rdev)) {
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
- if (rdev->clock.dp_extclk)
- /* skip PPLL programming if using ext clock */
- return ATOM_PPLL_INVALID;
- else {
- /* use the same PPLL for all DP monitors */
- pll = radeon_get_shared_dp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- } else {
- /* use the same PPLL for all monitors with the same clock */
- pll = radeon_get_shared_nondp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- /* otherwise, pick one of the plls */
- if ((rdev->family == CHIP_KABINI) ||
- (rdev->family == CHIP_MULLINS)) {
- /* KB/ML has PPLL1 and PPLL2 */
- pll_in_use = radeon_get_pll_use_mask(crtc);
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- } else {
- /* CI/KV has PPLL0, PPLL1, and PPLL2 */
- pll_in_use = radeon_get_pll_use_mask(crtc);
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- }
- } else if (ASIC_IS_DCE61(rdev)) {
+ if (ASIC_IS_DCE61(rdev)) {
struct radeon_encoder_atom_dig *dig =
radeon_encoder->enc_priv;
@@ -2045,9 +1775,6 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
is_tvcv = true;
- if (!radeon_crtc->adjusted_clock)
- return -EINVAL;
-
atombios_crtc_set_pll(crtc, adjusted_mode);
if (ASIC_IS_DCE4(rdev))
@@ -2066,10 +1793,6 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
- radeon_cursor_reset(crtc);
- /* update the hw version fpr dpm */
- radeon_crtc->hw_mode = *adjusted_mode;
-
return 0;
}
@@ -2094,12 +1817,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
radeon_crtc->connector = NULL;
return false;
}
- if (radeon_crtc->encoder) {
- struct radeon_encoder *radeon_encoder =
- to_radeon_encoder(radeon_crtc->encoder);
-
- radeon_crtc->output_csc = radeon_encoder->output_csc;
- }
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
@@ -2116,9 +1833,12 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
+ radeon_crtc->in_mode_set = true;
+
/* disable crtc pair power gating before programming */
if (ASIC_IS_DCE6(rdev))
atombios_powergate_crtc(crtc, ATOM_DISABLE);
@@ -2129,8 +1849,11 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc)
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
atombios_lock_crtc(crtc, ATOM_DISABLE);
+ radeon_crtc->in_mode_set = false;
}
static void atombios_crtc_disable(struct drm_crtc *crtc)
@@ -2142,27 +1865,6 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct radeon_framebuffer *radeon_fb;
- struct radeon_bo *rbo;
-
- radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
- rbo = gem_to_radeon_bo(radeon_fb->obj);
- r = radeon_bo_reserve(rbo, false);
- if (unlikely(r))
- DRM_ERROR("failed to reserve rbo before unpin\n");
- else {
- radeon_bo_unpin(rbo);
- radeon_bo_unreserve(rbo);
- }
- }
- /* disable the GRPH */
- if (ASIC_IS_DCE4(rdev))
- WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
- else if (ASIC_IS_AVIVO(rdev))
- WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
-
if (ASIC_IS_DCE6(rdev))
atombios_powergate_crtc(crtc, ATOM_ENABLE);
@@ -2187,10 +1889,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
break;
case ATOM_PPLL0:
/* disable the ppll */
- if ((rdev->family == CHIP_ARUBA) ||
- (rdev->family == CHIP_KAVERI) ||
- (rdev->family == CHIP_BONAIRE) ||
- (rdev->family == CHIP_HAWAII))
+ if (ASIC_IS_DCE61(rdev))
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
diff --git a/sys/dev/pci/drm/radeon/atombios_dp.c b/sys/dev/pci/drm/radeon/atombios_dp.c
index b590be0b5a0..5e1ed107e26 100644
--- a/sys/dev/pci/drm/radeon/atombios_dp.c
+++ b/sys/dev/pci/drm/radeon/atombios_dp.c
@@ -47,32 +47,34 @@ static char *pre_emph_names[] = {
/***** radeon AUX functions *****/
-/* Atom needs data in little endian format so swap as appropriate when copying
- * data to or from atom. Note that atom operates on dw units.
- *
- * Use to_le=true when sending data to atom and provide at least
- * ALIGN(num_bytes,4) bytes in the dst buffer.
- *
- * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
- * byes in the src buffer.
+/* Atom needs data in little endian format
+ * so swap as appropriate when copying data to
+ * or from atom. Note that atom operates on
+ * dw units.
*/
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
- u32 src_tmp[5], dst_tmp[5];
+ u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
+ u32 *dst32, *src32;
int i;
- u8 align_num_bytes = roundup2(num_bytes, 4);
+ memcpy(src_tmp, src, num_bytes);
+ src32 = (u32 *)src_tmp;
+ dst32 = (u32 *)dst_tmp;
if (to_le) {
- memcpy(src_tmp, src, num_bytes);
- for (i = 0; i < align_num_bytes / 4; i++)
- dst_tmp[i] = cpu_to_le32(src_tmp[i]);
- memcpy(dst, dst_tmp, align_num_bytes);
- } else {
- memcpy(src_tmp, src, align_num_bytes);
- for (i = 0; i < align_num_bytes / 4; i++)
- dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = cpu_to_le32(src32[i]);
memcpy(dst, dst_tmp, num_bytes);
+ } else {
+ u8 dws = num_bytes & ~3;
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = le32_to_cpu(src32[i]);
+ memcpy(dst, dst_tmp, dws);
+ if (num_bytes % 4) {
+ for (i = 0; i < (num_bytes % 4); i++)
+ dst[dws+i] = dst_tmp[dws+i];
+ }
}
#else
memcpy(dst, src, num_bytes);
@@ -99,9 +101,6 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
memset(&args, 0, sizeof(args));
- mutex_lock(&chan->mutex);
- mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
-
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
radeon_atom_copy_swap(base, send, send_bytes, true);
@@ -114,7 +113,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
if (ASIC_IS_DCE4(rdev))
args.v2.ucHPD_ID = chan->rec.hpd;
- atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
*ack = args.v1.ucReplyStatus;
@@ -148,9 +147,6 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
r = recv_bytes;
done:
- mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
- mutex_unlock(&chan->mutex);
-
return r;
}
diff --git a/sys/dev/pci/drm/radeon/atombios_encoders.c b/sys/dev/pci/drm/radeon/atombios_encoders.c
index 65ca0364b3b..b710c1d6ea9 100644
--- a/sys/dev/pci/drm/radeon/atombios_encoders.c
+++ b/sys/dev/pci/drm/radeon/atombios_encoders.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: atombios_encoders.c,v 1.12 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2007-11 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -27,7 +28,6 @@
#include <dev/pci/drm/drm_crtc_helper.h>
#include <dev/pci/drm/radeon_drm.h>
#include "radeon.h"
-#include "radeon_audio.h"
#include "atom.h"
extern int atom_debug;
@@ -119,7 +119,6 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (dig->backlight_level == 0)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
else {
@@ -184,15 +183,9 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig;
+ u8 backlight_level;
char bl_name[16];
- /* Mac laptops with multiple GPUs use the gmux driver for backlight
- * so don't register a backlight device
- */
- if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
- (rdev->pdev->device == 0x6741))
- return;
-
if (!radeon_encoder->enc_priv)
return;
@@ -211,14 +204,11 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
memset(&props, 0, sizeof(props));
props.max_brightness = RADEON_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
-#ifdef notyet
+#ifdef __linux__
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
-#else
- snprintf(bl_name, sizeof(bl_name),
- "radeon_bl%d", 0);
#endif
- bd = backlight_device_register(bl_name, drm_connector->kdev,
+ bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_atom_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
@@ -227,22 +217,16 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
pdata->encoder = radeon_encoder;
+ backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
+
dig = radeon_encoder->enc_priv;
dig->bl_dev = bd;
bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
- /* Set a reasonable default here if the level is 0 otherwise
- * fbdev will attempt to turn the backlight on after console
- * unblanking and it will try and restore 0 which turns the backlight
- * off again.
- */
- if (bd->props.brightness == 0)
- bd->props.brightness = RADEON_MAX_BL_LEVEL;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
DRM_INFO("radeon atom DIG backlight initialized\n");
- rdev->mode_info.bl_encoder = radeon_encoder;
return;
@@ -298,6 +282,28 @@ static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode);
+
+static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ return true;
+ default:
+ return false;
+ }
+}
+
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -315,14 +321,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
- /* vertical FP must be at least 1 */
- if (mode->crtc_vsync_start == mode->crtc_vdisplay)
- adjusted_mode->crtc_vsync_start++;
-
- /* get the native mode for scaling */
- if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ /* get the native mode for LVDS */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
radeon_panel_mode_fixup(encoder, adjusted_mode);
- } else if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+
+ /* get the native mode for TV */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
if (tv_dac->tv_std == TV_STD_NTSC ||
@@ -332,8 +336,6 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
else
radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
}
- } else if (radeon_encoder->rmx_type != RMX_OFF) {
- radeon_panel_mode_fixup(encoder, adjusted_mode);
}
if (ASIC_IS_DCE3(rdev) &&
@@ -456,12 +458,11 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
{
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int bpc = 8;
- if (encoder->crtc) {
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- bpc = radeon_crtc->bpc;
- }
+ if (connector)
+ bpc = radeon_get_monitor_bpc(connector);
switch (bpc) {
case 0:
@@ -480,11 +481,11 @@ static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
}
}
+
union dvo_encoder_control {
ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
- DVO_ENCODER_CONTROL_PS_ALLOCATION_V1_4 dvo_v4;
};
void
@@ -534,13 +535,6 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.dvo_v3.ucDVOConfig = 0; /* XXX */
break;
- case 4:
- /* DCE8 */
- args.dvo_v4.ucAction = action;
- args.dvo_v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
- args.dvo_v4.ucDVOConfig = 0; /* XXX */
- args.dvo_v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
- break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
@@ -681,15 +675,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
- struct radeon_encoder_atom_dig *dig_enc;
- if (radeon_encoder_is_digital(encoder)) {
- dig_enc = radeon_encoder->enc_priv;
- if (dig_enc->active_mst_links)
- return ATOM_ENCODER_MODE_DP_MST;
- }
- if (radeon_encoder->is_mst_encoder || radeon_encoder->offset)
- return ATOM_ENCODER_MODE_DP_MST;
/* dp bridges are always DP */
if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
return ATOM_ENCODER_MODE_DP;
@@ -710,37 +696,24 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (radeon_audio != 0) {
- if (radeon_connector->use_digital &&
- (radeon_connector->audio == RADEON_AUDIO_ENABLE))
- return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
- (radeon_connector->audio == RADEON_AUDIO_AUTO))
- return ATOM_ENCODER_MODE_HDMI;
- else if (radeon_connector->use_digital)
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_CRT;
- } else if (radeon_connector->use_digital) {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ return ATOM_ENCODER_MODE_HDMI;
+ else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
- } else {
+ else
return ATOM_ENCODER_MODE_CRT;
- }
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (radeon_audio != 0) {
- if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
- return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
- (radeon_connector->audio == RADEON_AUDIO_AUTO))
- return ATOM_ENCODER_MODE_HDMI;
- else
- return ATOM_ENCODER_MODE_DVI;
- } else {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ return ATOM_ENCODER_MODE_HDMI;
+ else
return ATOM_ENCODER_MODE_DVI;
- }
break;
case DRM_MODE_CONNECTOR_LVDS:
return ATOM_ENCODER_MODE_LVDS;
@@ -748,29 +721,16 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
- if (radeon_audio != 0 &&
- drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
- ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
- return ATOM_ENCODER_MODE_DP_AUDIO;
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- } else if (radeon_audio != 0) {
- if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
- return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
- (radeon_connector->audio == RADEON_AUDIO_AUTO))
- return ATOM_ENCODER_MODE_HDMI;
- else
- return ATOM_ENCODER_MODE_DVI;
- } else {
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ radeon_audio &&
+ !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ return ATOM_ENCODER_MODE_HDMI;
+ else
return ATOM_ENCODER_MODE_DVI;
- }
break;
case DRM_MODE_CONNECTOR_eDP:
- if (radeon_audio != 0 &&
- drm_detect_monitor_audio(radeon_connector_edid(connector)) &&
- ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev))
- return ATOM_ENCODER_MODE_DP_AUDIO;
return ATOM_ENCODER_MODE_DP;
case DRM_MODE_CONNECTOR_DVIA:
case DRM_MODE_CONNECTOR_VGA:
@@ -841,7 +801,7 @@ union dig_encoder_control {
};
void
-atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override)
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -901,6 +861,8 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
else
args.v1.ucLaneNum = 4;
+ if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -917,10 +879,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
-
- if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
-
break;
case 2:
case 3:
@@ -940,10 +898,7 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
- if (enc_override != -1)
- args.v3.acConfig.ucDigSel = enc_override;
- else
- args.v3.acConfig.ucDigSel = dig->dig_encoder;
+ args.v3.acConfig.ucDigSel = dig->dig_encoder;
args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
break;
case 4:
@@ -962,20 +917,12 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
args.v4.ucLaneNum = 4;
if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
- if (dp_clock == 540000)
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
- else if (dp_clock == 324000)
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ;
- else if (dp_clock == 270000)
+ if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
- else
- args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ;
+ else if (dp_clock == 540000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
}
-
- if (enc_override != -1)
- args.v4.acConfig.ucDigSel = enc_override;
- else
- args.v4.acConfig.ucDigSel = dig->dig_encoder;
+ args.v4.acConfig.ucDigSel = dig->dig_encoder;
args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
if (hpd_id == RADEON_HPD_NONE)
args.v4.ucHPD_ID = 0;
@@ -996,12 +943,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
}
-void
-atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
-{
- atombios_dig_encoder_setup2(encoder, action, panel_mode, -1);
-}
-
union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
@@ -1011,7 +952,7 @@ union dig_transmitter_control {
};
void
-atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set, int fe)
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -1073,7 +1014,6 @@ atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
@@ -1333,9 +1273,6 @@ atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t
else
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYG;
- break;
}
if (is_dp)
args.v5.ucLaneNum = dp_lane_count;
@@ -1361,7 +1298,7 @@ atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t
args.v5.asConfig.ucHPDSel = 0;
else
args.v5.asConfig.ucHPDSel = hpd_id + 1;
- args.v5.ucDigEncoderSel = (fe != -1) ? (1 << fe) : (1 << dig_encoder);
+ args.v5.ucDigEncoderSel = 1 << dig_encoder;
args.v5.ucDPLaneSet = lane_set;
break;
default:
@@ -1377,12 +1314,6 @@ atombios_dig_transmitter_setup2(struct drm_encoder *encoder, int action, uint8_t
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-void
-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
-{
- atombios_dig_transmitter_setup2(encoder, action, lane_num, lane_set, -1);
-}
-
bool
atombios_set_edp_panel_power(struct drm_connector *connector, int action)
{
@@ -1636,14 +1567,8 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
} else
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (rdev->mode_info.bl_encoder) {
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
- atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
- } else {
- args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- }
+ args.ucAction = ATOM_LCD_BLON;
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
break;
case DRM_MODE_DPMS_STANDBY:
@@ -1670,16 +1595,10 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_connector *radeon_connector = NULL;
struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
- bool travis_quirk = false;
if (connector) {
radeon_connector = to_radeon_connector(connector);
radeon_dig_connector = radeon_connector->con_priv;
- if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_TRAVIS) &&
- (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
- !ASIC_IS_DCE5(rdev))
- travis_quirk = true;
}
switch (mode) {
@@ -1700,13 +1619,25 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
}
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
} else if (ASIC_IS_DCE4(rdev)) {
/* setup and enable the encoder */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+ /* enable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
} else {
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ /* some dce3.x boards have a bug in their transmitter control table.
+ * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
+ * does the same thing and more.
+ */
+ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
+ (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
@@ -1714,65 +1645,73 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
ATOM_TRANSMITTER_ACTION_POWER_ON);
radeon_dig_connector->edp_on = true;
}
- }
- /* enable the transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
- /* DP_SET_POWER_D0 is set in radeon_dp_link_train */
radeon_dp_link_train(encoder, connector);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- if (rdev->mode_info.bl_encoder)
- atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
- else
- atombios_dig_transmitter_setup(encoder,
- ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
- }
- if (ext_encoder)
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
-
- /* don't power off encoders with active MST links */
- if (dig->active_mst_links)
- return;
-
- if (ASIC_IS_DCE4(rdev)) {
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector)
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
- }
- if (ext_encoder)
- atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder,
- ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
-
- if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) &&
- connector && !travis_quirk)
- radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
/* disable the transmitter */
- atombios_dig_transmitter_setup(encoder,
- ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ } else if (ASIC_IS_DCE4(rdev)) {
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
} else {
/* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder,
- ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
- if (travis_quirk)
- radeon_dp_set_rx_power_state(connector, DP_SET_POWER_D3);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
radeon_dig_connector->edp_on = false;
}
}
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+ break;
+ }
+}
+
+static void
+radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
+ struct drm_encoder *ext_encoder,
+ int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ default:
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
+ atombios_external_encoder_setup(encoder, ext_encoder,
+ EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
+ } else
+ atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
break;
}
}
@@ -1783,17 +1722,11 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- int encoder_mode = atombios_get_encoder_mode(encoder);
+ struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
-
- if ((radeon_audio != 0) &&
- ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
- ENCODER_MODE_IS_DP(encoder_mode)))
- radeon_audio_dpms(encoder, mode);
-
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
@@ -1808,7 +1741,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
radeon_atom_encoder_dpms_dig(encoder, mode);
break;
@@ -1849,6 +1781,9 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
return;
}
+ if (ext_encoder)
+ radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
+
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
}
@@ -1946,7 +1881,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
dig = radeon_encoder->enc_priv;
switch (dig->dig_encoder) {
@@ -1968,9 +1902,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
case 5:
args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
break;
- case 6:
- args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
- break;
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
@@ -2007,53 +1938,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
-void
-atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder, int fe)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
- uint8_t frev, crev;
- union crtc_source_param args;
-
- memset(&args, 0, sizeof(args));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return;
-
- if (frev != 1 && crev != 2)
- DRM_ERROR("Unknown table for MST %d, %d\n", frev, crev);
-
- args.v2.ucCRTC = radeon_crtc->crtc_id;
- args.v2.ucEncodeMode = ATOM_ENCODER_MODE_DP_MST;
-
- switch (fe) {
- case 0:
- args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
- break;
- case 1:
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- break;
- case 2:
- args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
- break;
- case 3:
- args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
- break;
- case 4:
- args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
- break;
- case 5:
- args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
- break;
- case 6:
- args.v2.ucEncoderID = ASIC_INT_DIG7_ENCODER_ID;
- break;
- }
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
static void
atombios_apply_encoder_quirks(struct drm_encoder *encoder,
struct drm_display_mode *mode)
@@ -2080,13 +1964,7 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
/* set scaler clears this on some chips */
if (ASIC_IS_AVIVO(rdev) &&
(!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
- if (ASIC_IS_DCE8(rdev)) {
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- WREG32(CIK_LB_DATA_FORMAT + radeon_crtc->crtc_offset,
- CIK_INTERLEAVE_EN);
- else
- WREG32(CIK_LB_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
- } else if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
EVERGREEN_INTERLEAVE_EN);
@@ -2102,14 +1980,7 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
}
-void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx)
-{
- if (enc_idx < 0)
- return;
- rdev->mode_info.active_encoders &= ~(1 << enc_idx);
-}
-
-int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
+static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -2118,87 +1989,68 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
struct drm_encoder *test_encoder;
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t dig_enc_in_use = 0;
- int enc_idx = -1;
- if (fe_idx >= 0) {
- enc_idx = fe_idx;
- goto assigned;
- }
if (ASIC_IS_DCE6(rdev)) {
/* DCE6 */
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
- enc_idx = 1;
+ return 1;
else
- enc_idx = 0;
+ return 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
- enc_idx = 3;
+ return 3;
else
- enc_idx = 2;
+ return 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
- enc_idx = 5;
+ return 5;
else
- enc_idx = 4;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- enc_idx = 6;
+ return 4;
break;
}
- goto assigned;
} else if (ASIC_IS_DCE4(rdev)) {
/* DCE4/5 */
if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
/* ontario follows DCE4 */
if (rdev->family == CHIP_PALM) {
if (dig->linkb)
- enc_idx = 1;
+ return 1;
else
- enc_idx = 0;
+ return 0;
} else
/* llano follows DCE3.2 */
- enc_idx = radeon_crtc->crtc_id;
+ return radeon_crtc->crtc_id;
} else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
- enc_idx = 1;
+ return 1;
else
- enc_idx = 0;
+ return 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
- enc_idx = 3;
+ return 3;
else
- enc_idx = 2;
+ return 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
- enc_idx = 5;
+ return 5;
else
- enc_idx = 4;
+ return 4;
break;
}
}
- goto assigned;
}
- /*
- * On DCE32 any encoder can drive any block so usually just use crtc id,
- * but Apple thinks different at least on iMac10,1, so there use linkb,
- * otherwise the internal eDP panel will stay dark.
- */
+ /* on DCE32 and encoder can driver any block so just crtc id */
if (ASIC_IS_DCE32(rdev)) {
- if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
- enc_idx = (dig->linkb) ? 1 : 0;
- else
- enc_idx = radeon_crtc->crtc_id;
-
- goto assigned;
+ return radeon_crtc->crtc_id;
}
/* on DCE3 - LVTMA can only be driven by DIGB */
@@ -2226,17 +2078,6 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
if (!(dig_enc_in_use & 1))
return 0;
return 1;
-
-assigned:
- if (enc_idx == -1) {
- DRM_ERROR("Got encoder index incorrect - returning 0\n");
- return 0;
- }
- if (rdev->mode_info.active_encoders & (1 << enc_idx)) {
- DRM_ERROR("chosen encoder in use %d\n", enc_idx);
- }
- rdev->mode_info.active_encoders |= (1 << enc_idx);
- return enc_idx;
}
/* This only needs to be called once at startup */
@@ -2254,7 +2095,6 @@ radeon_atom_encoder_init(struct radeon_device *rdev)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
break;
@@ -2276,8 +2116,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- int encoder_mode;
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -2301,7 +2139,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* handled in dpms */
break;
@@ -2326,11 +2163,15 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
atombios_apply_encoder_quirks(encoder, adjusted_mode);
- encoder_mode = atombios_get_encoder_mode(encoder);
- if (connector && (radeon_audio != 0) &&
- ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
- ENCODER_MODE_IS_DP(encoder_mode)))
- radeon_audio_mode_set(encoder, adjusted_mode);
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ r600_hdmi_enable(encoder);
+ if (ASIC_IS_DCE6(rdev))
+ ; /* TODO (use pointers instead of if-s?) */
+ else if (ASIC_IS_DCE4(rdev))
+ evergreen_hdmi_setmode(encoder, adjusted_mode);
+ else
+ r600_hdmi_setmode(encoder, adjusted_mode);
+ }
}
static bool
@@ -2498,9 +2339,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (dig) {
- if (dig->dig_encoder >= 0)
- radeon_atom_release_dig_encoder(rdev, dig->dig_encoder);
- dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder, -1);
+ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
if (rdev->family >= CHIP_R600)
dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
@@ -2528,15 +2367,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
/* this is needed for the pll/ss setup to work correctly in some cases */
atombios_set_encoder_crtc_source(encoder);
- /* set up the FMT blocks */
- if (ASIC_IS_DCE8(rdev))
- dce8_program_fmt(encoder);
- else if (ASIC_IS_DCE4(rdev))
- dce4_program_fmt(encoder);
- else if (ASIC_IS_DCE3(rdev))
- dce3_program_fmt(encoder);
- else if (ASIC_IS_AVIVO(rdev))
- avivo_program_fmt(encoder);
}
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
@@ -2581,7 +2411,6 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* handled in dpms */
break;
@@ -2602,18 +2431,12 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
disable_done:
if (radeon_encoder_is_digital(encoder)) {
- if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- if (rdev->asic->display.hdmi_enable)
- radeon_hdmi_enable(rdev, encoder, false);
- }
- if (atombios_get_encoder_mode(encoder) != ATOM_ENCODER_MODE_DP_MST) {
- dig = radeon_encoder->enc_priv;
- radeon_atom_release_dig_encoder(rdev, dig->dig_encoder);
- dig->dig_encoder = -1;
- radeon_encoder->active_device = 0;
- }
- } else
- radeon_encoder->active_device = 0;
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ r600_hdmi_disable(encoder);
+ dig = radeon_encoder->enc_priv;
+ dig->dig_encoder = -1;
+ }
+ radeon_encoder->active_device = 0;
}
/* these are handled by the primary encoders */
@@ -2817,7 +2640,6 @@ radeon_add_atom_encoder(struct drm_device *dev,
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
diff --git a/sys/dev/pci/drm/radeon/atombios_i2c.c b/sys/dev/pci/drm/radeon/atombios_i2c.c
index b817c037af6..7440be07ad1 100644
--- a/sys/dev/pci/drm/radeon/atombios_i2c.c
+++ b/sys/dev/pci/drm/radeon/atombios_i2c.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: atombios_i2c.c,v 1.9 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
@@ -27,10 +28,12 @@
#include "radeon.h"
#include "atom.h"
+extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
+
#define TARGET_HW_I2C_CLOCK 50
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
-#define ATOM_MAX_HW_I2C_WRITE 3
+#define ATOM_MAX_HW_I2C_WRITE 2
#define ATOM_MAX_HW_I2C_READ 255
static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
@@ -43,20 +46,15 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
u16 out = cpu_to_le16(0);
- int r = 0;
memset(&args, 0, sizeof(args));
- mutex_lock(&chan->mutex);
- mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
-
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
if (flags & HW_I2C_WRITE) {
if (num > ATOM_MAX_HW_I2C_WRITE) {
- DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
- r = -EINVAL;
- goto done;
+ DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
+ return -EINVAL;
}
if (buf == NULL)
args.ucRegIndex = 0;
@@ -67,45 +65,33 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
if (num)
memcpy(&out, &buf[1], num);
args.lpI2CDataOut = cpu_to_le16(out);
- } else {
#if 0
- /*
- * gcc 4.2 gives 'warning: comparison is always false
- * due to limited range of data type'
- */
+ } else {
if (num > ATOM_MAX_HW_I2C_READ) {
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
- r = -EINVAL;
- goto done;
+ return -EINVAL;
}
#endif
- args.ucRegIndex = 0;
- args.lpI2CDataOut = 0;
}
- args.ucFlag = flags;
args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+ args.ucRegIndex = 0;
args.ucTransBytes = num;
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
- atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
DRM_DEBUG_KMS("hw_i2c error\n");
- r = -EIO;
- goto done;
+ return -EIO;
}
if (!(flags & HW_I2C_WRITE))
radeon_atom_copy_swap(buf, base, num, false);
-done:
- mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
- mutex_unlock(&chan->mutex);
-
- return r;
+ return 0;
}
int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -162,4 +148,3 @@ u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-
diff --git a/sys/dev/pci/drm/radeon/avivod.h b/sys/dev/pci/drm/radeon/avivod.h
index 3c391e7e9fd..1a7526a1475 100644
--- a/sys/dev/pci/drm/radeon/avivod.h
+++ b/sys/dev/pci/drm/radeon/avivod.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: avivod.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2009 Advanced Micro Devices, Inc.
* Copyright 2009 Red Hat Inc.
diff --git a/sys/dev/pci/drm/radeon/cayman_blit_shaders.c b/sys/dev/pci/drm/radeon/cayman_blit_shaders.c
index b9ca2001bc5..052390c629d 100644
--- a/sys/dev/pci/drm/radeon/cayman_blit_shaders.c
+++ b/sys/dev/pci/drm/radeon/cayman_blit_shaders.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: cayman_blit_shaders.c,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
@@ -24,13 +25,15 @@
* Alex Deucher <alexander.deucher@amd.com>
*/
-#include <dev/pci/drm/drm_linux.h>
+#include <sys/types.h>
+
+#include <dev/pci/drm/drmP.h>
/*
* evergreen cards need to use the 3D engine to blit data which requires
* quite a bit of hw state setup. Rather than pull the whole 3D driver
* (which normally generates the 3D state) into the DRM, we opt to use
- * statically generated state tables. The register state and shaders
+ * statically generated state tables. The regsiter state and shaders
* were hand generated to support blitting functionality. See the 3D
* driver or documentation for descriptions of the registers and
* shader instructions.
@@ -315,4 +318,58 @@ const u32 cayman_default_state[] =
0x00000010, /* */
};
+const u32 cayman_vs[] =
+{
+ 0x00000004,
+ 0x80400400,
+ 0x0000a03c,
+ 0x95000688,
+ 0x00004000,
+ 0x15000688,
+ 0x00000000,
+ 0x88000000,
+ 0x04000000,
+ 0x67961001,
+#ifdef __BIG_ENDIAN
+ 0x00020000,
+#else
+ 0x00000000,
+#endif
+ 0x00000000,
+ 0x04000000,
+ 0x67961000,
+#ifdef __BIG_ENDIAN
+ 0x00020008,
+#else
+ 0x00000008,
+#endif
+ 0x00000000,
+};
+
+const u32 cayman_ps[] =
+{
+ 0x00000004,
+ 0xa00c0000,
+ 0x00000008,
+ 0x80400000,
+ 0x00000000,
+ 0x95000688,
+ 0x00000000,
+ 0x88000000,
+ 0x00380400,
+ 0x00146b10,
+ 0x00380000,
+ 0x20146b10,
+ 0x00380400,
+ 0x40146b00,
+ 0x80380000,
+ 0x60146b00,
+ 0x00000010,
+ 0x000d1000,
+ 0xb0800000,
+ 0x00000000,
+};
+
+const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
+const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/sys/dev/pci/drm/radeon/cayman_blit_shaders.h b/sys/dev/pci/drm/radeon/cayman_blit_shaders.h
index f5d0e9a6026..da0b35aaac2 100644
--- a/sys/dev/pci/drm/radeon/cayman_blit_shaders.h
+++ b/sys/dev/pci/drm/radeon/cayman_blit_shaders.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: cayman_blit_shaders.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
diff --git a/sys/dev/pci/drm/radeon/evergreen.c b/sys/dev/pci/drm/radeon/evergreen.c
index 255f6fe90e1..e9ae50dbddb 100644
--- a/sys/dev/pci/drm/radeon/evergreen.c
+++ b/sys/dev/pci/drm/radeon/evergreen.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: evergreen.c,v 1.21 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
@@ -24,83 +25,15 @@
#include <dev/pci/drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
-#include "radeon_audio.h"
#include <dev/pci/drm/radeon_drm.h>
#include "evergreend.h"
#include "atom.h"
#include "avivod.h"
#include "evergreen_reg.h"
#include "evergreen_blit_shaders.h"
-#include "radeon_ucode.h"
-/*
- * Indirect registers accessor
- */
-u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&rdev->cg_idx_lock, flags);
- WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
- r = RREG32(EVERGREEN_CG_IND_DATA);
- spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
- return r;
-}
-
-void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->cg_idx_lock, flags);
- WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff));
- WREG32(EVERGREEN_CG_IND_DATA, (v));
- spin_unlock_irqrestore(&rdev->cg_idx_lock, flags);
-}
-
-u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&rdev->pif_idx_lock, flags);
- WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
- r = RREG32(EVERGREEN_PIF_PHY0_DATA);
- spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
- return r;
-}
-
-void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->pif_idx_lock, flags);
- WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff));
- WREG32(EVERGREEN_PIF_PHY0_DATA, (v));
- spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
-}
-
-u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&rdev->pif_idx_lock, flags);
- WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
- r = RREG32(EVERGREEN_PIF_PHY1_DATA);
- spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
- return r;
-}
-
-void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->pif_idx_lock, flags);
- WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff));
- WREG32(EVERGREEN_PIF_PHY1_DATA, (v));
- spin_unlock_irqrestore(&rdev->pif_idx_lock, flags);
-}
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
static const u32 crtc_offsets[6] =
{
@@ -112,994 +45,11 @@ static const u32 crtc_offsets[6] =
EVERGREEN_CRTC5_REGISTER_OFFSET
};
-#include "clearstate_evergreen.h"
-
-static const u32 sumo_rlc_save_restore_register_list[] =
-{
- 0x98fc,
- 0x9830,
- 0x9834,
- 0x9838,
- 0x9870,
- 0x9874,
- 0x8a14,
- 0x8b24,
- 0x8bcc,
- 0x8b10,
- 0x8d00,
- 0x8d04,
- 0x8c00,
- 0x8c04,
- 0x8c08,
- 0x8c0c,
- 0x8d8c,
- 0x8c20,
- 0x8c24,
- 0x8c28,
- 0x8c18,
- 0x8c1c,
- 0x8cf0,
- 0x8e2c,
- 0x8e38,
- 0x8c30,
- 0x9508,
- 0x9688,
- 0x9608,
- 0x960c,
- 0x9610,
- 0x9614,
- 0x88c4,
- 0x88d4,
- 0xa008,
- 0x900c,
- 0x9100,
- 0x913c,
- 0x98f8,
- 0x98f4,
- 0x9b7c,
- 0x3f8c,
- 0x8950,
- 0x8954,
- 0x8a18,
- 0x8b28,
- 0x9144,
- 0x9148,
- 0x914c,
- 0x3f90,
- 0x3f94,
- 0x915c,
- 0x9160,
- 0x9178,
- 0x917c,
- 0x9180,
- 0x918c,
- 0x9190,
- 0x9194,
- 0x9198,
- 0x919c,
- 0x91a8,
- 0x91ac,
- 0x91b0,
- 0x91b4,
- 0x91b8,
- 0x91c4,
- 0x91c8,
- 0x91cc,
- 0x91d0,
- 0x91d4,
- 0x91e0,
- 0x91e4,
- 0x91ec,
- 0x91f0,
- 0x91f4,
- 0x9200,
- 0x9204,
- 0x929c,
- 0x9150,
- 0x802c,
-};
-
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
-void evergreen_program_aspm(struct radeon_device *rdev);
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
-extern void cayman_vm_decode_fault(struct radeon_device *rdev,
- u32 status, u32 addr);
-void cik_init_cp_pg_table(struct radeon_device *rdev);
-
-extern u32 si_get_csb_size(struct radeon_device *rdev);
-extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
-extern u32 cik_get_csb_size(struct radeon_device *rdev);
-extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
-extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
-
-static const u32 evergreen_golden_registers[] =
-{
- 0x3f90, 0xffff0000, 0xff000000,
- 0x9148, 0xffff0000, 0xff000000,
- 0x3f94, 0xffff0000, 0xff000000,
- 0x914c, 0xffff0000, 0xff000000,
- 0x9b7c, 0xffffffff, 0x00000000,
- 0x8a14, 0xffffffff, 0x00000007,
- 0x8b10, 0xffffffff, 0x00000000,
- 0x960c, 0xffffffff, 0x54763210,
- 0x88c4, 0xffffffff, 0x000000c2,
- 0x88d4, 0xffffffff, 0x00000010,
- 0x8974, 0xffffffff, 0x00000000,
- 0xc78, 0x00000080, 0x00000080,
- 0x5eb4, 0xffffffff, 0x00000002,
- 0x5e78, 0xffffffff, 0x001000f0,
- 0x6104, 0x01000300, 0x00000000,
- 0x5bc0, 0x00300000, 0x00000000,
- 0x7030, 0xffffffff, 0x00000011,
- 0x7c30, 0xffffffff, 0x00000011,
- 0x10830, 0xffffffff, 0x00000011,
- 0x11430, 0xffffffff, 0x00000011,
- 0x12030, 0xffffffff, 0x00000011,
- 0x12c30, 0xffffffff, 0x00000011,
- 0xd02c, 0xffffffff, 0x08421000,
- 0x240c, 0xffffffff, 0x00000380,
- 0x8b24, 0xffffffff, 0x00ff0fff,
- 0x28a4c, 0x06000000, 0x06000000,
- 0x10c, 0x00000001, 0x00000001,
- 0x8d00, 0xffffffff, 0x100e4848,
- 0x8d04, 0xffffffff, 0x00164745,
- 0x8c00, 0xffffffff, 0xe4000003,
- 0x8c04, 0xffffffff, 0x40600060,
- 0x8c08, 0xffffffff, 0x001c001c,
- 0x8cf0, 0xffffffff, 0x08e00620,
- 0x8c20, 0xffffffff, 0x00800080,
- 0x8c24, 0xffffffff, 0x00800080,
- 0x8c18, 0xffffffff, 0x20202078,
- 0x8c1c, 0xffffffff, 0x00001010,
- 0x28350, 0xffffffff, 0x00000000,
- 0xa008, 0xffffffff, 0x00010000,
- 0x5c4, 0xffffffff, 0x00000001,
- 0x9508, 0xffffffff, 0x00000002,
- 0x913c, 0x0000000f, 0x0000000a
-};
-
-static const u32 evergreen_golden_registers2[] =
-{
- 0x2f4c, 0xffffffff, 0x00000000,
- 0x54f4, 0xffffffff, 0x00000000,
- 0x54f0, 0xffffffff, 0x00000000,
- 0x5498, 0xffffffff, 0x00000000,
- 0x549c, 0xffffffff, 0x00000000,
- 0x5494, 0xffffffff, 0x00000000,
- 0x53cc, 0xffffffff, 0x00000000,
- 0x53c8, 0xffffffff, 0x00000000,
- 0x53c4, 0xffffffff, 0x00000000,
- 0x53c0, 0xffffffff, 0x00000000,
- 0x53bc, 0xffffffff, 0x00000000,
- 0x53b8, 0xffffffff, 0x00000000,
- 0x53b4, 0xffffffff, 0x00000000,
- 0x53b0, 0xffffffff, 0x00000000
-};
-
-static const u32 cypress_mgcg_init[] =
-{
- 0x802c, 0xffffffff, 0xc0000000,
- 0x5448, 0xffffffff, 0x00000100,
- 0x55e4, 0xffffffff, 0x00000100,
- 0x160c, 0xffffffff, 0x00000100,
- 0x5644, 0xffffffff, 0x00000100,
- 0xc164, 0xffffffff, 0x00000100,
- 0x8a18, 0xffffffff, 0x00000100,
- 0x897c, 0xffffffff, 0x06000100,
- 0x8b28, 0xffffffff, 0x00000100,
- 0x9144, 0xffffffff, 0x00000100,
- 0x9a60, 0xffffffff, 0x00000100,
- 0x9868, 0xffffffff, 0x00000100,
- 0x8d58, 0xffffffff, 0x00000100,
- 0x9510, 0xffffffff, 0x00000100,
- 0x949c, 0xffffffff, 0x00000100,
- 0x9654, 0xffffffff, 0x00000100,
- 0x9030, 0xffffffff, 0x00000100,
- 0x9034, 0xffffffff, 0x00000100,
- 0x9038, 0xffffffff, 0x00000100,
- 0x903c, 0xffffffff, 0x00000100,
- 0x9040, 0xffffffff, 0x00000100,
- 0xa200, 0xffffffff, 0x00000100,
- 0xa204, 0xffffffff, 0x00000100,
- 0xa208, 0xffffffff, 0x00000100,
- 0xa20c, 0xffffffff, 0x00000100,
- 0x971c, 0xffffffff, 0x00000100,
- 0x977c, 0xffffffff, 0x00000100,
- 0x3f80, 0xffffffff, 0x00000100,
- 0xa210, 0xffffffff, 0x00000100,
- 0xa214, 0xffffffff, 0x00000100,
- 0x4d8, 0xffffffff, 0x00000100,
- 0x9784, 0xffffffff, 0x00000100,
- 0x9698, 0xffffffff, 0x00000100,
- 0x4d4, 0xffffffff, 0x00000200,
- 0x30cc, 0xffffffff, 0x00000100,
- 0xd0c0, 0xffffffff, 0xff000100,
- 0x802c, 0xffffffff, 0x40000000,
- 0x915c, 0xffffffff, 0x00010000,
- 0x9160, 0xffffffff, 0x00030002,
- 0x9178, 0xffffffff, 0x00070000,
- 0x917c, 0xffffffff, 0x00030002,
- 0x9180, 0xffffffff, 0x00050004,
- 0x918c, 0xffffffff, 0x00010006,
- 0x9190, 0xffffffff, 0x00090008,
- 0x9194, 0xffffffff, 0x00070000,
- 0x9198, 0xffffffff, 0x00030002,
- 0x919c, 0xffffffff, 0x00050004,
- 0x91a8, 0xffffffff, 0x00010006,
- 0x91ac, 0xffffffff, 0x00090008,
- 0x91b0, 0xffffffff, 0x00070000,
- 0x91b4, 0xffffffff, 0x00030002,
- 0x91b8, 0xffffffff, 0x00050004,
- 0x91c4, 0xffffffff, 0x00010006,
- 0x91c8, 0xffffffff, 0x00090008,
- 0x91cc, 0xffffffff, 0x00070000,
- 0x91d0, 0xffffffff, 0x00030002,
- 0x91d4, 0xffffffff, 0x00050004,
- 0x91e0, 0xffffffff, 0x00010006,
- 0x91e4, 0xffffffff, 0x00090008,
- 0x91e8, 0xffffffff, 0x00000000,
- 0x91ec, 0xffffffff, 0x00070000,
- 0x91f0, 0xffffffff, 0x00030002,
- 0x91f4, 0xffffffff, 0x00050004,
- 0x9200, 0xffffffff, 0x00010006,
- 0x9204, 0xffffffff, 0x00090008,
- 0x9208, 0xffffffff, 0x00070000,
- 0x920c, 0xffffffff, 0x00030002,
- 0x9210, 0xffffffff, 0x00050004,
- 0x921c, 0xffffffff, 0x00010006,
- 0x9220, 0xffffffff, 0x00090008,
- 0x9224, 0xffffffff, 0x00070000,
- 0x9228, 0xffffffff, 0x00030002,
- 0x922c, 0xffffffff, 0x00050004,
- 0x9238, 0xffffffff, 0x00010006,
- 0x923c, 0xffffffff, 0x00090008,
- 0x9240, 0xffffffff, 0x00070000,
- 0x9244, 0xffffffff, 0x00030002,
- 0x9248, 0xffffffff, 0x00050004,
- 0x9254, 0xffffffff, 0x00010006,
- 0x9258, 0xffffffff, 0x00090008,
- 0x925c, 0xffffffff, 0x00070000,
- 0x9260, 0xffffffff, 0x00030002,
- 0x9264, 0xffffffff, 0x00050004,
- 0x9270, 0xffffffff, 0x00010006,
- 0x9274, 0xffffffff, 0x00090008,
- 0x9278, 0xffffffff, 0x00070000,
- 0x927c, 0xffffffff, 0x00030002,
- 0x9280, 0xffffffff, 0x00050004,
- 0x928c, 0xffffffff, 0x00010006,
- 0x9290, 0xffffffff, 0x00090008,
- 0x9294, 0xffffffff, 0x00000000,
- 0x929c, 0xffffffff, 0x00000001,
- 0x802c, 0xffffffff, 0x40010000,
- 0x915c, 0xffffffff, 0x00010000,
- 0x9160, 0xffffffff, 0x00030002,
- 0x9178, 0xffffffff, 0x00070000,
- 0x917c, 0xffffffff, 0x00030002,
- 0x9180, 0xffffffff, 0x00050004,
- 0x918c, 0xffffffff, 0x00010006,
- 0x9190, 0xffffffff, 0x00090008,
- 0x9194, 0xffffffff, 0x00070000,
- 0x9198, 0xffffffff, 0x00030002,
- 0x919c, 0xffffffff, 0x00050004,
- 0x91a8, 0xffffffff, 0x00010006,
- 0x91ac, 0xffffffff, 0x00090008,
- 0x91b0, 0xffffffff, 0x00070000,
- 0x91b4, 0xffffffff, 0x00030002,
- 0x91b8, 0xffffffff, 0x00050004,
- 0x91c4, 0xffffffff, 0x00010006,
- 0x91c8, 0xffffffff, 0x00090008,
- 0x91cc, 0xffffffff, 0x00070000,
- 0x91d0, 0xffffffff, 0x00030002,
- 0x91d4, 0xffffffff, 0x00050004,
- 0x91e0, 0xffffffff, 0x00010006,
- 0x91e4, 0xffffffff, 0x00090008,
- 0x91e8, 0xffffffff, 0x00000000,
- 0x91ec, 0xffffffff, 0x00070000,
- 0x91f0, 0xffffffff, 0x00030002,
- 0x91f4, 0xffffffff, 0x00050004,
- 0x9200, 0xffffffff, 0x00010006,
- 0x9204, 0xffffffff, 0x00090008,
- 0x9208, 0xffffffff, 0x00070000,
- 0x920c, 0xffffffff, 0x00030002,
- 0x9210, 0xffffffff, 0x00050004,
- 0x921c, 0xffffffff, 0x00010006,
- 0x9220, 0xffffffff, 0x00090008,
- 0x9224, 0xffffffff, 0x00070000,
- 0x9228, 0xffffffff, 0x00030002,
- 0x922c, 0xffffffff, 0x00050004,
- 0x9238, 0xffffffff, 0x00010006,
- 0x923c, 0xffffffff, 0x00090008,
- 0x9240, 0xffffffff, 0x00070000,
- 0x9244, 0xffffffff, 0x00030002,
- 0x9248, 0xffffffff, 0x00050004,
- 0x9254, 0xffffffff, 0x00010006,
- 0x9258, 0xffffffff, 0x00090008,
- 0x925c, 0xffffffff, 0x00070000,
- 0x9260, 0xffffffff, 0x00030002,
- 0x9264, 0xffffffff, 0x00050004,
- 0x9270, 0xffffffff, 0x00010006,
- 0x9274, 0xffffffff, 0x00090008,
- 0x9278, 0xffffffff, 0x00070000,
- 0x927c, 0xffffffff, 0x00030002,
- 0x9280, 0xffffffff, 0x00050004,
- 0x928c, 0xffffffff, 0x00010006,
- 0x9290, 0xffffffff, 0x00090008,
- 0x9294, 0xffffffff, 0x00000000,
- 0x929c, 0xffffffff, 0x00000001,
- 0x802c, 0xffffffff, 0xc0000000
-};
-
-static const u32 redwood_mgcg_init[] =
-{
- 0x802c, 0xffffffff, 0xc0000000,
- 0x5448, 0xffffffff, 0x00000100,
- 0x55e4, 0xffffffff, 0x00000100,
- 0x160c, 0xffffffff, 0x00000100,
- 0x5644, 0xffffffff, 0x00000100,
- 0xc164, 0xffffffff, 0x00000100,
- 0x8a18, 0xffffffff, 0x00000100,
- 0x897c, 0xffffffff, 0x06000100,
- 0x8b28, 0xffffffff, 0x00000100,
- 0x9144, 0xffffffff, 0x00000100,
- 0x9a60, 0xffffffff, 0x00000100,
- 0x9868, 0xffffffff, 0x00000100,
- 0x8d58, 0xffffffff, 0x00000100,
- 0x9510, 0xffffffff, 0x00000100,
- 0x949c, 0xffffffff, 0x00000100,
- 0x9654, 0xffffffff, 0x00000100,
- 0x9030, 0xffffffff, 0x00000100,
- 0x9034, 0xffffffff, 0x00000100,
- 0x9038, 0xffffffff, 0x00000100,
- 0x903c, 0xffffffff, 0x00000100,
- 0x9040, 0xffffffff, 0x00000100,
- 0xa200, 0xffffffff, 0x00000100,
- 0xa204, 0xffffffff, 0x00000100,
- 0xa208, 0xffffffff, 0x00000100,
- 0xa20c, 0xffffffff, 0x00000100,
- 0x971c, 0xffffffff, 0x00000100,
- 0x977c, 0xffffffff, 0x00000100,
- 0x3f80, 0xffffffff, 0x00000100,
- 0xa210, 0xffffffff, 0x00000100,
- 0xa214, 0xffffffff, 0x00000100,
- 0x4d8, 0xffffffff, 0x00000100,
- 0x9784, 0xffffffff, 0x00000100,
- 0x9698, 0xffffffff, 0x00000100,
- 0x4d4, 0xffffffff, 0x00000200,
- 0x30cc, 0xffffffff, 0x00000100,
- 0xd0c0, 0xffffffff, 0xff000100,
- 0x802c, 0xffffffff, 0x40000000,
- 0x915c, 0xffffffff, 0x00010000,
- 0x9160, 0xffffffff, 0x00030002,
- 0x9178, 0xffffffff, 0x00070000,
- 0x917c, 0xffffffff, 0x00030002,
- 0x9180, 0xffffffff, 0x00050004,
- 0x918c, 0xffffffff, 0x00010006,
- 0x9190, 0xffffffff, 0x00090008,
- 0x9194, 0xffffffff, 0x00070000,
- 0x9198, 0xffffffff, 0x00030002,
- 0x919c, 0xffffffff, 0x00050004,
- 0x91a8, 0xffffffff, 0x00010006,
- 0x91ac, 0xffffffff, 0x00090008,
- 0x91b0, 0xffffffff, 0x00070000,
- 0x91b4, 0xffffffff, 0x00030002,
- 0x91b8, 0xffffffff, 0x00050004,
- 0x91c4, 0xffffffff, 0x00010006,
- 0x91c8, 0xffffffff, 0x00090008,
- 0x91cc, 0xffffffff, 0x00070000,
- 0x91d0, 0xffffffff, 0x00030002,
- 0x91d4, 0xffffffff, 0x00050004,
- 0x91e0, 0xffffffff, 0x00010006,
- 0x91e4, 0xffffffff, 0x00090008,
- 0x91e8, 0xffffffff, 0x00000000,
- 0x91ec, 0xffffffff, 0x00070000,
- 0x91f0, 0xffffffff, 0x00030002,
- 0x91f4, 0xffffffff, 0x00050004,
- 0x9200, 0xffffffff, 0x00010006,
- 0x9204, 0xffffffff, 0x00090008,
- 0x9294, 0xffffffff, 0x00000000,
- 0x929c, 0xffffffff, 0x00000001,
- 0x802c, 0xffffffff, 0xc0000000
-};
-
-static const u32 cedar_golden_registers[] =
-{
- 0x3f90, 0xffff0000, 0xff000000,
- 0x9148, 0xffff0000, 0xff000000,
- 0x3f94, 0xffff0000, 0xff000000,
- 0x914c, 0xffff0000, 0xff000000,
- 0x9b7c, 0xffffffff, 0x00000000,
- 0x8a14, 0xffffffff, 0x00000007,
- 0x8b10, 0xffffffff, 0x00000000,
- 0x960c, 0xffffffff, 0x54763210,
- 0x88c4, 0xffffffff, 0x000000c2,
- 0x88d4, 0xffffffff, 0x00000000,
- 0x8974, 0xffffffff, 0x00000000,
- 0xc78, 0x00000080, 0x00000080,
- 0x5eb4, 0xffffffff, 0x00000002,
- 0x5e78, 0xffffffff, 0x001000f0,
- 0x6104, 0x01000300, 0x00000000,
- 0x5bc0, 0x00300000, 0x00000000,
- 0x7030, 0xffffffff, 0x00000011,
- 0x7c30, 0xffffffff, 0x00000011,
- 0x10830, 0xffffffff, 0x00000011,
- 0x11430, 0xffffffff, 0x00000011,
- 0xd02c, 0xffffffff, 0x08421000,
- 0x240c, 0xffffffff, 0x00000380,
- 0x8b24, 0xffffffff, 0x00ff0fff,
- 0x28a4c, 0x06000000, 0x06000000,
- 0x10c, 0x00000001, 0x00000001,
- 0x8d00, 0xffffffff, 0x100e4848,
- 0x8d04, 0xffffffff, 0x00164745,
- 0x8c00, 0xffffffff, 0xe4000003,
- 0x8c04, 0xffffffff, 0x40600060,
- 0x8c08, 0xffffffff, 0x001c001c,
- 0x8cf0, 0xffffffff, 0x08e00410,
- 0x8c20, 0xffffffff, 0x00800080,
- 0x8c24, 0xffffffff, 0x00800080,
- 0x8c18, 0xffffffff, 0x20202078,
- 0x8c1c, 0xffffffff, 0x00001010,
- 0x28350, 0xffffffff, 0x00000000,
- 0xa008, 0xffffffff, 0x00010000,
- 0x5c4, 0xffffffff, 0x00000001,
- 0x9508, 0xffffffff, 0x00000002
-};
-
-static const u32 cedar_mgcg_init[] =
-{
- 0x802c, 0xffffffff, 0xc0000000,
- 0x5448, 0xffffffff, 0x00000100,
- 0x55e4, 0xffffffff, 0x00000100,
- 0x160c, 0xffffffff, 0x00000100,
- 0x5644, 0xffffffff, 0x00000100,
- 0xc164, 0xffffffff, 0x00000100,
- 0x8a18, 0xffffffff, 0x00000100,
- 0x897c, 0xffffffff, 0x06000100,
- 0x8b28, 0xffffffff, 0x00000100,
- 0x9144, 0xffffffff, 0x00000100,
- 0x9a60, 0xffffffff, 0x00000100,
- 0x9868, 0xffffffff, 0x00000100,
- 0x8d58, 0xffffffff, 0x00000100,
- 0x9510, 0xffffffff, 0x00000100,
- 0x949c, 0xffffffff, 0x00000100,
- 0x9654, 0xffffffff, 0x00000100,
- 0x9030, 0xffffffff, 0x00000100,
- 0x9034, 0xffffffff, 0x00000100,
- 0x9038, 0xffffffff, 0x00000100,
- 0x903c, 0xffffffff, 0x00000100,
- 0x9040, 0xffffffff, 0x00000100,
- 0xa200, 0xffffffff, 0x00000100,
- 0xa204, 0xffffffff, 0x00000100,
- 0xa208, 0xffffffff, 0x00000100,
- 0xa20c, 0xffffffff, 0x00000100,
- 0x971c, 0xffffffff, 0x00000100,
- 0x977c, 0xffffffff, 0x00000100,
- 0x3f80, 0xffffffff, 0x00000100,
- 0xa210, 0xffffffff, 0x00000100,
- 0xa214, 0xffffffff, 0x00000100,
- 0x4d8, 0xffffffff, 0x00000100,
- 0x9784, 0xffffffff, 0x00000100,
- 0x9698, 0xffffffff, 0x00000100,
- 0x4d4, 0xffffffff, 0x00000200,
- 0x30cc, 0xffffffff, 0x00000100,
- 0xd0c0, 0xffffffff, 0xff000100,
- 0x802c, 0xffffffff, 0x40000000,
- 0x915c, 0xffffffff, 0x00010000,
- 0x9178, 0xffffffff, 0x00050000,
- 0x917c, 0xffffffff, 0x00030002,
- 0x918c, 0xffffffff, 0x00010004,
- 0x9190, 0xffffffff, 0x00070006,
- 0x9194, 0xffffffff, 0x00050000,
- 0x9198, 0xffffffff, 0x00030002,
- 0x91a8, 0xffffffff, 0x00010004,
- 0x91ac, 0xffffffff, 0x00070006,
- 0x91e8, 0xffffffff, 0x00000000,
- 0x9294, 0xffffffff, 0x00000000,
- 0x929c, 0xffffffff, 0x00000001,
- 0x802c, 0xffffffff, 0xc0000000
-};
-
-static const u32 juniper_mgcg_init[] =
-{
- 0x802c, 0xffffffff, 0xc0000000,
- 0x5448, 0xffffffff, 0x00000100,
- 0x55e4, 0xffffffff, 0x00000100,
- 0x160c, 0xffffffff, 0x00000100,
- 0x5644, 0xffffffff, 0x00000100,
- 0xc164, 0xffffffff, 0x00000100,
- 0x8a18, 0xffffffff, 0x00000100,
- 0x897c, 0xffffffff, 0x06000100,
- 0x8b28, 0xffffffff, 0x00000100,
- 0x9144, 0xffffffff, 0x00000100,
- 0x9a60, 0xffffffff, 0x00000100,
- 0x9868, 0xffffffff, 0x00000100,
- 0x8d58, 0xffffffff, 0x00000100,
- 0x9510, 0xffffffff, 0x00000100,
- 0x949c, 0xffffffff, 0x00000100,
- 0x9654, 0xffffffff, 0x00000100,
- 0x9030, 0xffffffff, 0x00000100,
- 0x9034, 0xffffffff, 0x00000100,
- 0x9038, 0xffffffff, 0x00000100,
- 0x903c, 0xffffffff, 0x00000100,
- 0x9040, 0xffffffff, 0x00000100,
- 0xa200, 0xffffffff, 0x00000100,
- 0xa204, 0xffffffff, 0x00000100,
- 0xa208, 0xffffffff, 0x00000100,
- 0xa20c, 0xffffffff, 0x00000100,
- 0x971c, 0xffffffff, 0x00000100,
- 0xd0c0, 0xffffffff, 0xff000100,
- 0x802c, 0xffffffff, 0x40000000,
- 0x915c, 0xffffffff, 0x00010000,
- 0x9160, 0xffffffff, 0x00030002,
- 0x9178, 0xffffffff, 0x00070000,
- 0x917c, 0xffffffff, 0x00030002,
- 0x9180, 0xffffffff, 0x00050004,
- 0x918c, 0xffffffff, 0x00010006,
- 0x9190, 0xffffffff, 0x00090008,
- 0x9194, 0xffffffff, 0x00070000,
- 0x9198, 0xffffffff, 0x00030002,
- 0x919c, 0xffffffff, 0x00050004,
- 0x91a8, 0xffffffff, 0x00010006,
- 0x91ac, 0xffffffff, 0x00090008,
- 0x91b0, 0xffffffff, 0x00070000,
- 0x91b4, 0xffffffff, 0x00030002,
- 0x91b8, 0xffffffff, 0x00050004,
- 0x91c4, 0xffffffff, 0x00010006,
- 0x91c8, 0xffffffff, 0x00090008,
- 0x91cc, 0xffffffff, 0x00070000,
- 0x91d0, 0xffffffff, 0x00030002,
- 0x91d4, 0xffffffff, 0x00050004,
- 0x91e0, 0xffffffff, 0x00010006,
- 0x91e4, 0xffffffff, 0x00090008,
- 0x91e8, 0xffffffff, 0x00000000,
- 0x91ec, 0xffffffff, 0x00070000,
- 0x91f0, 0xffffffff, 0x00030002,
- 0x91f4, 0xffffffff, 0x00050004,
- 0x9200, 0xffffffff, 0x00010006,
- 0x9204, 0xffffffff, 0x00090008,
- 0x9208, 0xffffffff, 0x00070000,
- 0x920c, 0xffffffff, 0x00030002,
- 0x9210, 0xffffffff, 0x00050004,
- 0x921c, 0xffffffff, 0x00010006,
- 0x9220, 0xffffffff, 0x00090008,
- 0x9224, 0xffffffff, 0x00070000,
- 0x9228, 0xffffffff, 0x00030002,
- 0x922c, 0xffffffff, 0x00050004,
- 0x9238, 0xffffffff, 0x00010006,
- 0x923c, 0xffffffff, 0x00090008,
- 0x9240, 0xffffffff, 0x00070000,
- 0x9244, 0xffffffff, 0x00030002,
- 0x9248, 0xffffffff, 0x00050004,
- 0x9254, 0xffffffff, 0x00010006,
- 0x9258, 0xffffffff, 0x00090008,
- 0x925c, 0xffffffff, 0x00070000,
- 0x9260, 0xffffffff, 0x00030002,
- 0x9264, 0xffffffff, 0x00050004,
- 0x9270, 0xffffffff, 0x00010006,
- 0x9274, 0xffffffff, 0x00090008,
- 0x9278, 0xffffffff, 0x00070000,
- 0x927c, 0xffffffff, 0x00030002,
- 0x9280, 0xffffffff, 0x00050004,
- 0x928c, 0xffffffff, 0x00010006,
- 0x9290, 0xffffffff, 0x00090008,
- 0x9294, 0xffffffff, 0x00000000,
- 0x929c, 0xffffffff, 0x00000001,
- 0x802c, 0xffffffff, 0xc0000000,
- 0x977c, 0xffffffff, 0x00000100,
- 0x3f80, 0xffffffff, 0x00000100,
- 0xa210, 0xffffffff, 0x00000100,
- 0xa214, 0xffffffff, 0x00000100,
- 0x4d8, 0xffffffff, 0x00000100,
- 0x9784, 0xffffffff, 0x00000100,
- 0x9698, 0xffffffff, 0x00000100,
- 0x4d4, 0xffffffff, 0x00000200,
- 0x30cc, 0xffffffff, 0x00000100,
- 0x802c, 0xffffffff, 0xc0000000
-};
-
-static const u32 supersumo_golden_registers[] =
-{
- 0x5eb4, 0xffffffff, 0x00000002,
- 0x5c4, 0xffffffff, 0x00000001,
- 0x7030, 0xffffffff, 0x00000011,
- 0x7c30, 0xffffffff, 0x00000011,
- 0x6104, 0x01000300, 0x00000000,
- 0x5bc0, 0x00300000, 0x00000000,
- 0x8c04, 0xffffffff, 0x40600060,
- 0x8c08, 0xffffffff, 0x001c001c,
- 0x8c20, 0xffffffff, 0x00800080,
- 0x8c24, 0xffffffff, 0x00800080,
- 0x8c18, 0xffffffff, 0x20202078,
- 0x8c1c, 0xffffffff, 0x00001010,
- 0x918c, 0xffffffff, 0x00010006,
- 0x91a8, 0xffffffff, 0x00010006,
- 0x91c4, 0xffffffff, 0x00010006,
- 0x91e0, 0xffffffff, 0x00010006,
- 0x9200, 0xffffffff, 0x00010006,
- 0x9150, 0xffffffff, 0x6e944040,
- 0x917c, 0xffffffff, 0x00030002,
- 0x9180, 0xffffffff, 0x00050004,
- 0x9198, 0xffffffff, 0x00030002,
- 0x919c, 0xffffffff, 0x00050004,
- 0x91b4, 0xffffffff, 0x00030002,
- 0x91b8, 0xffffffff, 0x00050004,
- 0x91d0, 0xffffffff, 0x00030002,
- 0x91d4, 0xffffffff, 0x00050004,
- 0x91f0, 0xffffffff, 0x00030002,
- 0x91f4, 0xffffffff, 0x00050004,
- 0x915c, 0xffffffff, 0x00010000,
- 0x9160, 0xffffffff, 0x00030002,
- 0x3f90, 0xffff0000, 0xff000000,
- 0x9178, 0xffffffff, 0x00070000,
- 0x9194, 0xffffffff, 0x00070000,
- 0x91b0, 0xffffffff, 0x00070000,
- 0x91cc, 0xffffffff, 0x00070000,
- 0x91ec, 0xffffffff, 0x00070000,
- 0x9148, 0xffff0000, 0xff000000,
- 0x9190, 0xffffffff, 0x00090008,
- 0x91ac, 0xffffffff, 0x00090008,
- 0x91c8, 0xffffffff, 0x00090008,
- 0x91e4, 0xffffffff, 0x00090008,
- 0x9204, 0xffffffff, 0x00090008,
- 0x3f94, 0xffff0000, 0xff000000,
- 0x914c, 0xffff0000, 0xff000000,
- 0x929c, 0xffffffff, 0x00000001,
- 0x8a18, 0xffffffff, 0x00000100,
- 0x8b28, 0xffffffff, 0x00000100,
- 0x9144, 0xffffffff, 0x00000100,
- 0x5644, 0xffffffff, 0x00000100,
- 0x9b7c, 0xffffffff, 0x00000000,
- 0x8030, 0xffffffff, 0x0000100a,
- 0x8a14, 0xffffffff, 0x00000007,
- 0x8b24, 0xffffffff, 0x00ff0fff,
- 0x8b10, 0xffffffff, 0x00000000,
- 0x28a4c, 0x06000000, 0x06000000,
- 0x4d8, 0xffffffff, 0x00000100,
- 0x913c, 0xffff000f, 0x0100000a,
- 0x960c, 0xffffffff, 0x54763210,
- 0x88c4, 0xffffffff, 0x000000c2,
- 0x88d4, 0xffffffff, 0x00000010,
- 0x8974, 0xffffffff, 0x00000000,
- 0xc78, 0x00000080, 0x00000080,
- 0x5e78, 0xffffffff, 0x001000f0,
- 0xd02c, 0xffffffff, 0x08421000,
- 0xa008, 0xffffffff, 0x00010000,
- 0x8d00, 0xffffffff, 0x100e4848,
- 0x8d04, 0xffffffff, 0x00164745,
- 0x8c00, 0xffffffff, 0xe4000003,
- 0x8cf0, 0x1fffffff, 0x08e00620,
- 0x28350, 0xffffffff, 0x00000000,
- 0x9508, 0xffffffff, 0x00000002
-};
-
-static const u32 sumo_golden_registers[] =
-{
- 0x900c, 0x00ffffff, 0x0017071f,
- 0x8c18, 0xffffffff, 0x10101060,
- 0x8c1c, 0xffffffff, 0x00001010,
- 0x8c30, 0x0000000f, 0x00000005,
- 0x9688, 0x0000000f, 0x00000007
-};
-
-static const u32 wrestler_golden_registers[] =
-{
- 0x5eb4, 0xffffffff, 0x00000002,
- 0x5c4, 0xffffffff, 0x00000001,
- 0x7030, 0xffffffff, 0x00000011,
- 0x7c30, 0xffffffff, 0x00000011,
- 0x6104, 0x01000300, 0x00000000,
- 0x5bc0, 0x00300000, 0x00000000,
- 0x918c, 0xffffffff, 0x00010006,
- 0x91a8, 0xffffffff, 0x00010006,
- 0x9150, 0xffffffff, 0x6e944040,
- 0x917c, 0xffffffff, 0x00030002,
- 0x9198, 0xffffffff, 0x00030002,
- 0x915c, 0xffffffff, 0x00010000,
- 0x3f90, 0xffff0000, 0xff000000,
- 0x9178, 0xffffffff, 0x00070000,
- 0x9194, 0xffffffff, 0x00070000,
- 0x9148, 0xffff0000, 0xff000000,
- 0x9190, 0xffffffff, 0x00090008,
- 0x91ac, 0xffffffff, 0x00090008,
- 0x3f94, 0xffff0000, 0xff000000,
- 0x914c, 0xffff0000, 0xff000000,
- 0x929c, 0xffffffff, 0x00000001,
- 0x8a18, 0xffffffff, 0x00000100,
- 0x8b28, 0xffffffff, 0x00000100,
- 0x9144, 0xffffffff, 0x00000100,
- 0x9b7c, 0xffffffff, 0x00000000,
- 0x8030, 0xffffffff, 0x0000100a,
- 0x8a14, 0xffffffff, 0x00000001,
- 0x8b24, 0xffffffff, 0x00ff0fff,
- 0x8b10, 0xffffffff, 0x00000000,
- 0x28a4c, 0x06000000, 0x06000000,
- 0x4d8, 0xffffffff, 0x00000100,
- 0x913c, 0xffff000f, 0x0100000a,
- 0x960c, 0xffffffff, 0x54763210,
- 0x88c4, 0xffffffff, 0x000000c2,
- 0x88d4, 0xffffffff, 0x00000010,
- 0x8974, 0xffffffff, 0x00000000,
- 0xc78, 0x00000080, 0x00000080,
- 0x5e78, 0xffffffff, 0x001000f0,
- 0xd02c, 0xffffffff, 0x08421000,
- 0xa008, 0xffffffff, 0x00010000,
- 0x8d00, 0xffffffff, 0x100e4848,
- 0x8d04, 0xffffffff, 0x00164745,
- 0x8c00, 0xffffffff, 0xe4000003,
- 0x8cf0, 0x1fffffff, 0x08e00410,
- 0x28350, 0xffffffff, 0x00000000,
- 0x9508, 0xffffffff, 0x00000002,
- 0x900c, 0xffffffff, 0x0017071f,
- 0x8c18, 0xffffffff, 0x10101060,
- 0x8c1c, 0xffffffff, 0x00001010
-};
-
-static const u32 barts_golden_registers[] =
-{
- 0x5eb4, 0xffffffff, 0x00000002,
- 0x5e78, 0x8f311ff1, 0x001000f0,
- 0x3f90, 0xffff0000, 0xff000000,
- 0x9148, 0xffff0000, 0xff000000,
- 0x3f94, 0xffff0000, 0xff000000,
- 0x914c, 0xffff0000, 0xff000000,
- 0xc78, 0x00000080, 0x00000080,
- 0xbd4, 0x70073777, 0x00010001,
- 0xd02c, 0xbfffff1f, 0x08421000,
- 0xd0b8, 0x03773777, 0x02011003,
- 0x5bc0, 0x00200000, 0x50100000,
- 0x98f8, 0x33773777, 0x02011003,
- 0x98fc, 0xffffffff, 0x76543210,
- 0x7030, 0x31000311, 0x00000011,
- 0x2f48, 0x00000007, 0x02011003,
- 0x6b28, 0x00000010, 0x00000012,
- 0x7728, 0x00000010, 0x00000012,
- 0x10328, 0x00000010, 0x00000012,
- 0x10f28, 0x00000010, 0x00000012,
- 0x11b28, 0x00000010, 0x00000012,
- 0x12728, 0x00000010, 0x00000012,
- 0x240c, 0x000007ff, 0x00000380,
- 0x8a14, 0xf000001f, 0x00000007,
- 0x8b24, 0x3fff3fff, 0x00ff0fff,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x28a4c, 0x07ffffff, 0x06000000,
- 0x10c, 0x00000001, 0x00010003,
- 0xa02c, 0xffffffff, 0x0000009b,
- 0x913c, 0x0000000f, 0x0100000a,
- 0x8d00, 0xffff7f7f, 0x100e4848,
- 0x8d04, 0x00ffffff, 0x00164745,
- 0x8c00, 0xfffc0003, 0xe4000003,
- 0x8c04, 0xf8ff00ff, 0x40600060,
- 0x8c08, 0x00ff00ff, 0x001c001c,
- 0x8cf0, 0x1fff1fff, 0x08e00620,
- 0x8c20, 0x0fff0fff, 0x00800080,
- 0x8c24, 0x0fff0fff, 0x00800080,
- 0x8c18, 0xffffffff, 0x20202078,
- 0x8c1c, 0x0000ffff, 0x00001010,
- 0x28350, 0x00000f01, 0x00000000,
- 0x9508, 0x3700001f, 0x00000002,
- 0x960c, 0xffffffff, 0x54763210,
- 0x88c4, 0x001f3ae3, 0x000000c2,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x8974, 0xffffffff, 0x00000000
-};
-
-static const u32 turks_golden_registers[] =
-{
- 0x5eb4, 0xffffffff, 0x00000002,
- 0x5e78, 0x8f311ff1, 0x001000f0,
- 0x8c8, 0x00003000, 0x00001070,
- 0x8cc, 0x000fffff, 0x00040035,
- 0x3f90, 0xffff0000, 0xfff00000,
- 0x9148, 0xffff0000, 0xfff00000,
- 0x3f94, 0xffff0000, 0xfff00000,
- 0x914c, 0xffff0000, 0xfff00000,
- 0xc78, 0x00000080, 0x00000080,
- 0xbd4, 0x00073007, 0x00010002,
- 0xd02c, 0xbfffff1f, 0x08421000,
- 0xd0b8, 0x03773777, 0x02010002,
- 0x5bc0, 0x00200000, 0x50100000,
- 0x98f8, 0x33773777, 0x00010002,
- 0x98fc, 0xffffffff, 0x33221100,
- 0x7030, 0x31000311, 0x00000011,
- 0x2f48, 0x33773777, 0x00010002,
- 0x6b28, 0x00000010, 0x00000012,
- 0x7728, 0x00000010, 0x00000012,
- 0x10328, 0x00000010, 0x00000012,
- 0x10f28, 0x00000010, 0x00000012,
- 0x11b28, 0x00000010, 0x00000012,
- 0x12728, 0x00000010, 0x00000012,
- 0x240c, 0x000007ff, 0x00000380,
- 0x8a14, 0xf000001f, 0x00000007,
- 0x8b24, 0x3fff3fff, 0x00ff0fff,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x28a4c, 0x07ffffff, 0x06000000,
- 0x10c, 0x00000001, 0x00010003,
- 0xa02c, 0xffffffff, 0x0000009b,
- 0x913c, 0x0000000f, 0x0100000a,
- 0x8d00, 0xffff7f7f, 0x100e4848,
- 0x8d04, 0x00ffffff, 0x00164745,
- 0x8c00, 0xfffc0003, 0xe4000003,
- 0x8c04, 0xf8ff00ff, 0x40600060,
- 0x8c08, 0x00ff00ff, 0x001c001c,
- 0x8cf0, 0x1fff1fff, 0x08e00410,
- 0x8c20, 0x0fff0fff, 0x00800080,
- 0x8c24, 0x0fff0fff, 0x00800080,
- 0x8c18, 0xffffffff, 0x20202078,
- 0x8c1c, 0x0000ffff, 0x00001010,
- 0x28350, 0x00000f01, 0x00000000,
- 0x9508, 0x3700001f, 0x00000002,
- 0x960c, 0xffffffff, 0x54763210,
- 0x88c4, 0x001f3ae3, 0x000000c2,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x8974, 0xffffffff, 0x00000000
-};
-
-static const u32 caicos_golden_registers[] =
-{
- 0x5eb4, 0xffffffff, 0x00000002,
- 0x5e78, 0x8f311ff1, 0x001000f0,
- 0x8c8, 0x00003420, 0x00001450,
- 0x8cc, 0x000fffff, 0x00040035,
- 0x3f90, 0xffff0000, 0xfffc0000,
- 0x9148, 0xffff0000, 0xfffc0000,
- 0x3f94, 0xffff0000, 0xfffc0000,
- 0x914c, 0xffff0000, 0xfffc0000,
- 0xc78, 0x00000080, 0x00000080,
- 0xbd4, 0x00073007, 0x00010001,
- 0xd02c, 0xbfffff1f, 0x08421000,
- 0xd0b8, 0x03773777, 0x02010001,
- 0x5bc0, 0x00200000, 0x50100000,
- 0x98f8, 0x33773777, 0x02010001,
- 0x98fc, 0xffffffff, 0x33221100,
- 0x7030, 0x31000311, 0x00000011,
- 0x2f48, 0x33773777, 0x02010001,
- 0x6b28, 0x00000010, 0x00000012,
- 0x7728, 0x00000010, 0x00000012,
- 0x10328, 0x00000010, 0x00000012,
- 0x10f28, 0x00000010, 0x00000012,
- 0x11b28, 0x00000010, 0x00000012,
- 0x12728, 0x00000010, 0x00000012,
- 0x240c, 0x000007ff, 0x00000380,
- 0x8a14, 0xf000001f, 0x00000001,
- 0x8b24, 0x3fff3fff, 0x00ff0fff,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x28a4c, 0x07ffffff, 0x06000000,
- 0x10c, 0x00000001, 0x00010003,
- 0xa02c, 0xffffffff, 0x0000009b,
- 0x913c, 0x0000000f, 0x0100000a,
- 0x8d00, 0xffff7f7f, 0x100e4848,
- 0x8d04, 0x00ffffff, 0x00164745,
- 0x8c00, 0xfffc0003, 0xe4000003,
- 0x8c04, 0xf8ff00ff, 0x40600060,
- 0x8c08, 0x00ff00ff, 0x001c001c,
- 0x8cf0, 0x1fff1fff, 0x08e00410,
- 0x8c20, 0x0fff0fff, 0x00800080,
- 0x8c24, 0x0fff0fff, 0x00800080,
- 0x8c18, 0xffffffff, 0x20202078,
- 0x8c1c, 0x0000ffff, 0x00001010,
- 0x28350, 0x00000f01, 0x00000000,
- 0x9508, 0x3700001f, 0x00000002,
- 0x960c, 0xffffffff, 0x54763210,
- 0x88c4, 0x001f3ae3, 0x000000c2,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x8974, 0xffffffff, 0x00000000
-};
-
-static void evergreen_init_golden_registers(struct radeon_device *rdev)
-{
- switch (rdev->family) {
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- radeon_program_register_sequence(rdev,
- evergreen_golden_registers,
- (const u32)ARRAY_SIZE(evergreen_golden_registers));
- radeon_program_register_sequence(rdev,
- evergreen_golden_registers2,
- (const u32)ARRAY_SIZE(evergreen_golden_registers2));
- radeon_program_register_sequence(rdev,
- cypress_mgcg_init,
- (const u32)ARRAY_SIZE(cypress_mgcg_init));
- break;
- case CHIP_JUNIPER:
- radeon_program_register_sequence(rdev,
- evergreen_golden_registers,
- (const u32)ARRAY_SIZE(evergreen_golden_registers));
- radeon_program_register_sequence(rdev,
- evergreen_golden_registers2,
- (const u32)ARRAY_SIZE(evergreen_golden_registers2));
- radeon_program_register_sequence(rdev,
- juniper_mgcg_init,
- (const u32)ARRAY_SIZE(juniper_mgcg_init));
- break;
- case CHIP_REDWOOD:
- radeon_program_register_sequence(rdev,
- evergreen_golden_registers,
- (const u32)ARRAY_SIZE(evergreen_golden_registers));
- radeon_program_register_sequence(rdev,
- evergreen_golden_registers2,
- (const u32)ARRAY_SIZE(evergreen_golden_registers2));
- radeon_program_register_sequence(rdev,
- redwood_mgcg_init,
- (const u32)ARRAY_SIZE(redwood_mgcg_init));
- break;
- case CHIP_CEDAR:
- radeon_program_register_sequence(rdev,
- cedar_golden_registers,
- (const u32)ARRAY_SIZE(cedar_golden_registers));
- radeon_program_register_sequence(rdev,
- evergreen_golden_registers2,
- (const u32)ARRAY_SIZE(evergreen_golden_registers2));
- radeon_program_register_sequence(rdev,
- cedar_mgcg_init,
- (const u32)ARRAY_SIZE(cedar_mgcg_init));
- break;
- case CHIP_PALM:
- radeon_program_register_sequence(rdev,
- wrestler_golden_registers,
- (const u32)ARRAY_SIZE(wrestler_golden_registers));
- break;
- case CHIP_SUMO:
- radeon_program_register_sequence(rdev,
- supersumo_golden_registers,
- (const u32)ARRAY_SIZE(supersumo_golden_registers));
- break;
- case CHIP_SUMO2:
- radeon_program_register_sequence(rdev,
- supersumo_golden_registers,
- (const u32)ARRAY_SIZE(supersumo_golden_registers));
- radeon_program_register_sequence(rdev,
- sumo_golden_registers,
- (const u32)ARRAY_SIZE(sumo_golden_registers));
- break;
- case CHIP_BARTS:
- radeon_program_register_sequence(rdev,
- barts_golden_registers,
- (const u32)ARRAY_SIZE(barts_golden_registers));
- break;
- case CHIP_TURKS:
- radeon_program_register_sequence(rdev,
- turks_golden_registers,
- (const u32)ARRAY_SIZE(turks_golden_registers));
- break;
- case CHIP_CAICOS:
- radeon_program_register_sequence(rdev,
- caicos_golden_registers,
- (const u32)ARRAY_SIZE(caicos_golden_registers));
- break;
- default:
- break;
- }
-}
-
-/**
- * evergreen_get_allowed_info_register - fetch the register for the info ioctl
- *
- * @rdev: radeon_device pointer
- * @reg: register offset in bytes
- * @val: register value
- *
- * Returns 0 for success or -EINVAL for an invalid register
- *
- */
-int evergreen_get_allowed_info_register(struct radeon_device *rdev,
- u32 reg, u32 *val)
-{
- switch (reg) {
- case GRBM_STATUS:
- case GRBM_STATUS_SE0:
- case GRBM_STATUS_SE1:
- case SRBM_STATUS:
- case SRBM_STATUS2:
- case DMA_STATUS_REG:
- case UVD_STATUS:
- *val = RREG32(reg);
- return 0;
- default:
- return -EINVAL;
- }
-}
void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
@@ -1132,157 +82,6 @@ void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
}
}
-static int sumo_set_uvd_clock(struct radeon_device *rdev, u32 clock,
- u32 cntl_reg, u32 status_reg)
-{
- int r, i;
- struct atom_clock_dividers dividers;
-
- r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
- clock, false, &dividers);
- if (r)
- return r;
-
- WREG32_P(cntl_reg, dividers.post_div, ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK));
-
- for (i = 0; i < 100; i++) {
- if (RREG32(status_reg) & DCLK_STATUS)
- break;
- mdelay(10);
- }
- if (i == 100)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
-{
- int r = 0;
- u32 cg_scratch = RREG32(CG_SCRATCH1);
-
- r = sumo_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
- if (r)
- goto done;
- cg_scratch &= 0xffff0000;
- cg_scratch |= vclk / 100; /* Mhz */
-
- r = sumo_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
- if (r)
- goto done;
- cg_scratch &= 0x0000ffff;
- cg_scratch |= (dclk / 100) << 16; /* Mhz */
-
-done:
- WREG32(CG_SCRATCH1, cg_scratch);
-
- return r;
-}
-
-int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
-{
- /* start off with something large */
- unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
- int r;
-
- /* bypass vclk and dclk with bclk */
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
- ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
-
- /* put PLL in bypass mode */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
-
- if (!vclk || !dclk) {
- /* keep the Bypass mode, put PLL to sleep */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
- return 0;
- }
-
- r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
- 16384, 0x03FFFFFF, 0, 128, 5,
- &fb_div, &vclk_div, &dclk_div);
- if (r)
- return r;
-
- /* set VCO_MODE to 1 */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
-
- /* toggle UPLL_SLEEP to 1 then back to 0 */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
-
- /* deassert UPLL_RESET */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
-
- mdelay(1);
-
- r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
- if (r)
- return r;
-
- /* assert UPLL_RESET again */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
-
- /* disable spread spectrum. */
- WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
-
- /* set feedback divider */
- WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
-
- /* set ref divider to 0 */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
-
- if (fb_div < 307200)
- WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
- else
- WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
-
- /* set PDIV_A and PDIV_B */
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
- ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
-
- /* give the PLL some time to settle */
- mdelay(15);
-
- /* deassert PLL_RESET */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
-
- mdelay(15);
-
- /* switch from bypass mode to normal mode */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
-
- r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
- if (r)
- return r;
-
- /* switch VCLK and DCLK selection */
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
- ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
-
- mdelay(100);
-
- return 0;
-}
-
-#ifdef __linux__
-void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
-{
- int readrq;
- u16 v;
-
- readrq = pcie_get_readrq(rdev->pdev);
- v = ffs(readrq) - 8;
- /* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
- * to avoid hangs or perfomance issues
- */
- if ((v == 0) || (v == 6) || (v == 7))
- pcie_set_readrq(rdev->pdev, 512);
-}
-#else
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
pcireg_t ctl, v;
@@ -1305,63 +104,6 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
pci_conf_write(rdev->pc, rdev->pa_tag, off + PCI_PCIE_DCSR, ctl);
}
}
-#endif
-
-void dce4_program_fmt(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- int bpc = 0;
- u32 tmp = 0;
- enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- bpc = radeon_get_monitor_bpc(connector);
- dither = radeon_connector->dither;
- }
-
- /* LVDS/eDP FMT is set up by atom */
- if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
- return;
-
- /* not needed for analog */
- if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
- (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
- return;
-
- if (bpc == 0)
- return;
-
- switch (bpc) {
- case 6:
- if (dither == RADEON_FMT_DITHER_ENABLE)
- /* XXX sort out optimal dither settings */
- tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
- FMT_SPATIAL_DITHER_EN);
- else
- tmp |= FMT_TRUNCATE_EN;
- break;
- case 8:
- if (dither == RADEON_FMT_DITHER_ENABLE)
- /* XXX sort out optimal dither settings */
- tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
- FMT_RGB_RANDOM_ENABLE |
- FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
- else
- tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
- break;
- case 10:
- default:
- /* not needed */
- break;
- }
-
- WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
-}
static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
{
@@ -1421,43 +163,83 @@ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
}
/**
- * evergreen_page_flip - pageflip callback.
+ * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
*
* @rdev: radeon_device pointer
- * @crtc_id: crtc to cleanup pageflip on
- * @crtc_base: new address of the crtc (GPU MC address)
+ * @crtc: crtc to prepare for pageflip on
*
- * Triggers the actual pageflip by updating the primary
- * surface base address (evergreen+).
+ * Pre-pageflip callback (evergreen+).
+ * Enables the pageflip irq (vblank irq).
*/
-void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
- /* update the scanout addresses */
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
- upper_32_bits(crtc_base));
- WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- (u32)crtc_base);
- /* post the write */
- RREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset);
+/**
+ * evergreen_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (evergreen+).
+ * Disables the pageflip irq (vblank irq).
+ */
+void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
}
/**
- * evergreen_page_flip_pending - check if page flip is still pending
+ * evergreen_page_flip - pageflip callback.
*
* @rdev: radeon_device pointer
- * @crtc_id: crtc to check
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
*
+ * Does the actual pageflip (evergreen+).
+ * During vblank we take the crtc lock and wait for the update_pending
+ * bit to go high, when it does, we release the lock, and allow the
+ * double buffered update to take place.
* Returns the current update pending status.
*/
-bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc_id)
+u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
+
+ /* Lock the graphics update lock */
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+ /* update the scanout addresses */
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(crtc_base));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)crtc_base);
+
+ /* Wait for update_pending to go high. */
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
+ DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+ /* Unlock the lock, so double-buffering can take place inside vblank */
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+ WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
/* Return current update_pending status: */
- return !!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) &
- EVERGREEN_GRPH_SURFACE_UPDATE_PENDING);
+ return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
}
/* get temperature in millidegrees */
@@ -1641,8 +423,8 @@ void evergreen_pm_misc(struct radeon_device *rdev)
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
if (voltage->type == VOLTAGE_SW) {
- /* 0xff0x are flags rather then an actual voltage */
- if ((voltage->voltage & 0xff00) == 0xff00)
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->voltage == 0xff01)
return;
if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
@@ -1662,8 +444,8 @@ void evergreen_pm_misc(struct radeon_device *rdev)
voltage = &rdev->pm.power_state[req_ps_idx].
clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
- /* 0xff0x are flags rather then an actual voltage */
- if ((voltage->vddci & 0xff00) == 0xff00)
+ /* 0xff01 is a flag rather then an actual voltage */
+ if (voltage->vddci == 0xff01)
return;
if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
@@ -1760,7 +542,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
case RADEON_HPD_6:
if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
- break;
+ break;
default:
break;
}
@@ -2277,8 +1059,7 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
u32 lb_size, u32 num_heads)
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
- struct evergreen_wm_params wm_low, wm_high;
- u32 dram_channels;
+ struct evergreen_wm_params wm;
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
@@ -2294,81 +1075,39 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
- dram_channels = evergreen_get_number_of_dram_channels(rdev);
-
- /* watermark for high clocks */
- if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- wm_high.yclk =
- radeon_dpm_get_mclk(rdev, false) * 10;
- wm_high.sclk =
- radeon_dpm_get_sclk(rdev, false) * 10;
- } else {
- wm_high.yclk = rdev->pm.current_mclk * 10;
- wm_high.sclk = rdev->pm.current_sclk * 10;
- }
-
- wm_high.disp_clk = mode->clock;
- wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
- wm_high.blank_time = line_time - wm_high.active_time;
- wm_high.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_high.interlaced = true;
- wm_high.vsc = radeon_crtc->vsc;
- wm_high.vtaps = 1;
- if (radeon_crtc->rmx_type != RMX_OFF)
- wm_high.vtaps = 2;
- wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_high.lb_size = lb_size;
- wm_high.dram_channels = dram_channels;
- wm_high.num_heads = num_heads;
-
- /* watermark for low clocks */
- if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- wm_low.yclk =
- radeon_dpm_get_mclk(rdev, true) * 10;
- wm_low.sclk =
- radeon_dpm_get_sclk(rdev, true) * 10;
- } else {
- wm_low.yclk = rdev->pm.current_mclk * 10;
- wm_low.sclk = rdev->pm.current_sclk * 10;
- }
- wm_low.disp_clk = mode->clock;
- wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
- wm_low.blank_time = line_time - wm_low.active_time;
- wm_low.interlaced = false;
+ wm.yclk = rdev->pm.current_mclk * 10;
+ wm.sclk = rdev->pm.current_sclk * 10;
+ wm.disp_clk = mode->clock;
+ wm.src_width = mode->crtc_hdisplay;
+ wm.active_time = mode->crtc_hdisplay * pixel_period;
+ wm.blank_time = line_time - wm.active_time;
+ wm.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_low.interlaced = true;
- wm_low.vsc = radeon_crtc->vsc;
- wm_low.vtaps = 1;
+ wm.interlaced = true;
+ wm.vsc = radeon_crtc->vsc;
+ wm.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
- wm_low.vtaps = 2;
- wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_low.lb_size = lb_size;
- wm_low.dram_channels = dram_channels;
- wm_low.num_heads = num_heads;
+ wm.vtaps = 2;
+ wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm.lb_size = lb_size;
+ wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
+ wm.num_heads = num_heads;
/* set for high clocks */
- latency_watermark_a = min(evergreen_latency_watermark(&wm_high), (u32)65535);
+ latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
/* set for low clocks */
- latency_watermark_b = min(evergreen_latency_watermark(&wm_low), (u32)65535);
+ /* wm.yclk = low clk; wm.sclk = low clk */
+ latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
- if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
- !evergreen_average_bandwidth_vs_available_bandwidth(&wm_high) ||
- !evergreen_check_latency_hiding(&wm_high) ||
+ if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
+ !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
+ !evergreen_check_latency_hiding(&wm) ||
(rdev->disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority a to high\n");
+ DRM_DEBUG_KMS("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
- }
- if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
- !evergreen_average_bandwidth_vs_available_bandwidth(&wm_low) ||
- !evergreen_check_latency_hiding(&wm_low) ||
- (rdev->disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority b to high\n");
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
@@ -2395,9 +1134,6 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
c.full = dfixed_div(c, a);
priority_b_mark = dfixed_trunc(c);
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
-
- /* Save number of lines the linebuffer leads before the scanout */
- radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@@ -2424,10 +1160,6 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
- /* save values for DPM */
- radeon_crtc->line_time = line_time;
- radeon_crtc->wm_high = latency_watermark_a;
- radeon_crtc->wm_low = latency_watermark_b;
}
/**
@@ -2445,9 +1177,6 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
u32 num_heads = 0, lb_size;
int i;
- if (!rdev->mode_info.mode_config_initialized)
- return;
-
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
@@ -2526,6 +1255,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -2631,160 +1361,16 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
-static const unsigned ni_dig_offsets[] =
-{
- NI_DIG0_REGISTER_OFFSET,
- NI_DIG1_REGISTER_OFFSET,
- NI_DIG2_REGISTER_OFFSET,
- NI_DIG3_REGISTER_OFFSET,
- NI_DIG4_REGISTER_OFFSET,
- NI_DIG5_REGISTER_OFFSET
-};
-
-static const unsigned ni_tx_offsets[] =
-{
- NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
- NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
- NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
- NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
- NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
- NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
-};
-
-static const unsigned evergreen_dp_offsets[] =
-{
- EVERGREEN_DP0_REGISTER_OFFSET,
- EVERGREEN_DP1_REGISTER_OFFSET,
- EVERGREEN_DP2_REGISTER_OFFSET,
- EVERGREEN_DP3_REGISTER_OFFSET,
- EVERGREEN_DP4_REGISTER_OFFSET,
- EVERGREEN_DP5_REGISTER_OFFSET
-};
-
-
-/*
- * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
- * We go from crtc to connector and it is not relible since it
- * should be an opposite direction .If crtc is enable then
- * find the dig_fe which selects this crtc and insure that it enable.
- * if such dig_fe is found then find dig_be which selects found dig_be and
- * insure that it enable and in DP_SST mode.
- * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
- * from dp symbols clocks .
- */
-static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
- unsigned crtc_id, unsigned *ret_dig_fe)
-{
- unsigned i;
- unsigned dig_fe;
- unsigned dig_be;
- unsigned dig_en_be;
- unsigned uniphy_pll;
- unsigned digs_fe_selected;
- unsigned dig_be_mode;
- unsigned dig_fe_mask;
- bool is_enabled = false;
- bool found_crtc = false;
-
- /* loop through all running dig_fe to find selected crtc */
- for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
- dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
- if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
- crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
- /* found running pipe */
- found_crtc = true;
- dig_fe_mask = 1 << i;
- dig_fe = i;
- break;
- }
- }
-
- if (found_crtc) {
- /* loop through all running dig_be to find selected dig_fe */
- for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
- dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
- /* if dig_fe_selected by dig_be? */
- digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
- dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
- if (dig_fe_mask & digs_fe_selected &&
- /* if dig_be in sst mode? */
- dig_be_mode == NI_DIG_BE_DPSST) {
- dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
- ni_dig_offsets[i]);
- uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
- ni_tx_offsets[i]);
- /* dig_be enable and tx is running */
- if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
- dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
- uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
- is_enabled = true;
- *ret_dig_fe = dig_fe;
- break;
- }
- }
- }
- }
-
- return is_enabled;
-}
-
-/*
- * Blank dig when in dp sst mode
- * Dig ignores crtc timing
- */
-static void evergreen_blank_dp_output(struct radeon_device *rdev,
- unsigned dig_fe)
-{
- unsigned stream_ctrl;
- unsigned fifo_ctrl;
- unsigned counter = 0;
-
- if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
- DRM_ERROR("invalid dig_fe %d\n", dig_fe);
- return;
- }
-
- stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
- evergreen_dp_offsets[dig_fe]);
- if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
- DRM_ERROR("dig %d , should be enable\n", dig_fe);
- return;
- }
-
- stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
- WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
- evergreen_dp_offsets[dig_fe], stream_ctrl);
-
- stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
- evergreen_dp_offsets[dig_fe]);
- while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
- drm_msleep(1);
- counter++;
- stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
- evergreen_dp_offsets[dig_fe]);
- }
- if (counter >= 32 )
- DRM_ERROR("counter exceeds %d\n", counter);
-
- fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
- fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
- WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
-
-}
-
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
- unsigned dig_fe;
- if (!ASIC_IS_NODCE(rdev)) {
- save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
- /* disable VGA render */
- WREG32(VGA_RENDER_CONTROL, 0);
- }
+ /* disable VGA render */
+ WREG32(VGA_RENDER_CONTROL, 0);
/* blank the display controllers */
for (i = 0; i < rdev->num_crtc; i++) {
crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
@@ -2797,7 +1383,6 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
}
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
@@ -2816,17 +1401,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
- /*we should disable dig if it drives dp sst*/
- /*but we are in radeon_device_init and the topology is unknown*/
- /*and it is available after radeon_modeset_init*/
- /*the following method radeon_atom_encoder_dpms_dig*/
- /*does the job if we initialize it properly*/
- /*for now we do it this manually*/
- /**/
- if (ASIC_IS_DCE5(rdev) &&
- evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
- evergreen_blank_dp_output(rdev, dig_fe);
- /*we could remove 6 lines below*/
+
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
@@ -2886,19 +1461,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)rdev->mc.vram_start);
}
-
- if (!ASIC_IS_NODCE(rdev)) {
- WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
- WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
- }
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
/* unlock regs and wait for update */
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x7) != 3) {
- tmp &= ~0x7;
- tmp |= 0x3;
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -2931,7 +1502,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
if (save->crtc_enabled[i]) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
@@ -2951,12 +1522,10 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
}
}
}
- if (!ASIC_IS_NODCE(rdev)) {
- /* Unlock vga access */
- WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
- }
+ /* Unlock vga access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
void evergreen_mc_program(struct radeon_device *rdev)
@@ -3089,13 +1658,13 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
#endif
RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
- fw_data = (const __be32 *)rdev->pfp_fw->data;
+ fw_data = (const __be32 *)rdev->pfp_fw;
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < EVERGREEN_PFP_UCODE_SIZE; i++)
WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
WREG32(CP_PFP_UCODE_ADDR, 0);
- fw_data = (const __be32 *)rdev->me_fw->data;
+ fw_data = (const __be32 *)rdev->me_fw;
WREG32(CP_ME_RAM_WADDR, 0);
for (i = 0; i < EVERGREEN_PM4_UCODE_SIZE; i++)
WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
@@ -3124,7 +1693,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
@@ -3167,7 +1736,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
return 0;
}
@@ -3192,8 +1761,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
- rb_bufsz = order_base_2(ring->ring_size / 8);
- tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = drm_order(ring->ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3229,6 +1798,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+ ring->rptr = RREG32(CP_RB_RPTR);
+
evergreen_cp_start(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
@@ -3261,7 +1832,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
u32 vgt_cache_invalidation;
u32 hdp_host_path_cntl, tmp;
u32 disabled_rb_mask;
- int i, j, ps_thread_count;
+ int i, j, num_shader_engines, ps_thread_count;
switch (rdev->family) {
case CHIP_CYPRESS:
@@ -3504,8 +2075,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
- WREG32(SRBM_INT_CNTL, 0x1);
- WREG32(SRBM_INT_ACK, 0x1);
evergreen_fix_pci_max_read_req_size(rdev);
@@ -3561,12 +2130,16 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
+ num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
+
if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
u32 efuse_straps_4;
u32 efuse_straps_3;
- efuse_straps_4 = RREG32_RCU(0x204);
- efuse_straps_3 = RREG32_RCU(0x203);
+ WREG32(RCU_IND_INDEX, 0x204);
+ efuse_straps_4 = RREG32(RCU_IND_DATA);
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
tmp = (((efuse_straps_4 & 0xf) << 4) |
((efuse_straps_3 & 0xf0000000) >> 28));
} else {
@@ -3583,26 +2156,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
- tmp = 0;
- for (i = 0; i < rdev->config.evergreen.max_backends; i++)
- tmp |= (1 << i);
- /* if all the backends are disabled, fix it up here */
- if ((disabled_rb_mask & tmp) == tmp) {
- for (i = 0; i < rdev->config.evergreen.max_backends; i++)
- disabled_rb_mask &= ~(1 << i);
- }
-
- for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
- u32 simd_disable_bitmap;
-
- WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
- WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
- simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
- simd_disable_bitmap |= 0xffffffff << rdev->config.evergreen.max_simds;
- tmp <<= 16;
- tmp |= simd_disable_bitmap;
- }
- rdev->config.evergreen.active_simds = hweight32(~tmp);
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
@@ -3611,9 +2164,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
if ((rdev->config.evergreen.max_backends == 1) &&
(rdev->flags & RADEON_IS_IGP)) {
@@ -3877,8 +2427,32 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
-void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status_se0, grbm_status_se1;
+
+ srbm_status = RREG32(SRBM_STATUS);
+ grbm_status = RREG32(GRBM_STATUS);
+ grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+ grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+ if (!(grbm_status & GUI_ACTIVE)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
+ u32 grbm_reset = 0;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ return;
+
dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
@@ -3887,8 +2461,6 @@ void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
RREG32(SRBM_STATUS));
- dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
- RREG32(SRBM_STATUS2));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
@@ -3897,658 +2469,112 @@ void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
- if (rdev->family >= CHIP_CAYMAN) {
- dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG + 0x800));
- }
-}
-bool evergreen_is_display_hung(struct radeon_device *rdev)
-{
- u32 crtc_hung = 0;
- u32 crtc_status[6];
- u32 i, j, tmp;
-
- for (i = 0; i < rdev->num_crtc; i++) {
- if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
- crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- crtc_hung |= (1 << i);
- }
- }
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
- for (j = 0; j < 10; j++) {
- for (i = 0; i < rdev->num_crtc; i++) {
- if (crtc_hung & (1 << i)) {
- tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- if (tmp != crtc_status[i])
- crtc_hung &= ~(1 << i);
- }
- }
- if (crtc_hung == 0)
- return false;
- udelay(100);
- }
+ /* reset all the gfx blocks */
+ grbm_reset = (SOFT_RESET_CP |
+ SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VC |
+ SOFT_RESET_VGT);
+
+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+ WREG32(GRBM_SOFT_RESET, grbm_reset);
+ (void)RREG32(GRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(GRBM_SOFT_RESET, 0);
+ (void)RREG32(GRBM_SOFT_RESET);
- return true;
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
+ RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
}
-u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
+static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
{
- u32 reset_mask = 0;
u32 tmp;
- /* GRBM_STATUS */
- tmp = RREG32(GRBM_STATUS);
- if (tmp & (PA_BUSY | SC_BUSY |
- SH_BUSY | SX_BUSY |
- TA_BUSY | VGT_BUSY |
- DB_BUSY | CB_BUSY |
- SPI_BUSY | VGT_BUSY_NO_DMA))
- reset_mask |= RADEON_RESET_GFX;
-
- if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
- CP_BUSY | CP_COHERENCY_BUSY))
- reset_mask |= RADEON_RESET_CP;
-
- if (tmp & GRBM_EE_BUSY)
- reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
-
- /* DMA_STATUS_REG */
- tmp = RREG32(DMA_STATUS_REG);
- if (!(tmp & DMA_IDLE))
- reset_mask |= RADEON_RESET_DMA;
-
- /* SRBM_STATUS2 */
- tmp = RREG32(SRBM_STATUS2);
- if (tmp & DMA_BUSY)
- reset_mask |= RADEON_RESET_DMA;
-
- /* SRBM_STATUS */
- tmp = RREG32(SRBM_STATUS);
- if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
- reset_mask |= RADEON_RESET_RLC;
-
- if (tmp & IH_BUSY)
- reset_mask |= RADEON_RESET_IH;
-
- if (tmp & SEM_BUSY)
- reset_mask |= RADEON_RESET_SEM;
-
- if (tmp & GRBM_RQ_PENDING)
- reset_mask |= RADEON_RESET_GRBM;
-
- if (tmp & VMC_BUSY)
- reset_mask |= RADEON_RESET_VMC;
-
- if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
- MCC_BUSY | MCD_BUSY))
- reset_mask |= RADEON_RESET_MC;
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
- if (evergreen_is_display_hung(rdev))
- reset_mask |= RADEON_RESET_DISPLAY;
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
- /* VM_L2_STATUS */
- tmp = RREG32(VM_L2_STATUS);
- if (tmp & L2_BUSY)
- reset_mask |= RADEON_RESET_VMC;
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
- /* Skip MC reset as it's mostly likely not hung, just busy */
- if (reset_mask & RADEON_RESET_MC) {
- DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
- reset_mask &= ~RADEON_RESET_MC;
- }
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
- return reset_mask;
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
}
-static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct evergreen_mc_save save;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- u32 tmp;
- if (reset_mask == 0)
- return;
-
- dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
-
- evergreen_print_gpu_status_regs(rdev);
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
- if (reset_mask & RADEON_RESET_DMA) {
- /* Disable DMA */
- tmp = RREG32(DMA_RB_CNTL);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, tmp);
- }
+ if (reset_mask == 0)
+ return 0;
- udelay(50);
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
- grbm_soft_reset |= SOFT_RESET_DB |
- SOFT_RESET_CB |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_SPI |
- SOFT_RESET_SX |
- SOFT_RESET_SH |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VC |
- SOFT_RESET_VGT;
- }
-
- if (reset_mask & RADEON_RESET_CP) {
- grbm_soft_reset |= SOFT_RESET_CP |
- SOFT_RESET_VGT;
-
- srbm_soft_reset |= SOFT_RESET_GRBM;
- }
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ evergreen_gpu_soft_reset_gfx(rdev);
if (reset_mask & RADEON_RESET_DMA)
- srbm_soft_reset |= SOFT_RESET_DMA;
-
- if (reset_mask & RADEON_RESET_DISPLAY)
- srbm_soft_reset |= SOFT_RESET_DC;
-
- if (reset_mask & RADEON_RESET_RLC)
- srbm_soft_reset |= SOFT_RESET_RLC;
-
- if (reset_mask & RADEON_RESET_SEM)
- srbm_soft_reset |= SOFT_RESET_SEM;
-
- if (reset_mask & RADEON_RESET_IH)
- srbm_soft_reset |= SOFT_RESET_IH;
-
- if (reset_mask & RADEON_RESET_GRBM)
- srbm_soft_reset |= SOFT_RESET_GRBM;
-
- if (reset_mask & RADEON_RESET_VMC)
- srbm_soft_reset |= SOFT_RESET_VMC;
-
- if (!(rdev->flags & RADEON_IS_IGP)) {
- if (reset_mask & RADEON_RESET_MC)
- srbm_soft_reset |= SOFT_RESET_MC;
- }
-
- if (grbm_soft_reset) {
- tmp = RREG32(GRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(GRBM_SOFT_RESET, tmp);
- tmp = RREG32(GRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~grbm_soft_reset;
- WREG32(GRBM_SOFT_RESET, tmp);
- tmp = RREG32(GRBM_SOFT_RESET);
- }
-
- if (srbm_soft_reset) {
- tmp = RREG32(SRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
- }
+ evergreen_gpu_soft_reset_dma(rdev);
/* Wait a little for things to settle down */
udelay(50);
evergreen_mc_resume(rdev, &save);
- udelay(50);
-
- evergreen_print_gpu_status_regs(rdev);
-}
-
-void evergreen_gpu_pci_config_reset(struct radeon_device *rdev)
-{
- struct evergreen_mc_save save;
- u32 tmp, i;
-
- dev_info(rdev->dev, "GPU pci config reset\n");
-
- /* disable dpm? */
-
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
- udelay(50);
- /* Disable DMA */
- tmp = RREG32(DMA_RB_CNTL);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, tmp);
- /* XXX other engines? */
-
- /* halt the rlc */
- r600_rlc_stop(rdev);
-
- udelay(50);
-
- /* set mclk/sclk to bypass */
- rv770_set_clk_bypass_mode(rdev);
- /* disable BM */
- pci_clear_master(rdev->pdev);
- /* disable mem access */
- evergreen_mc_stop(rdev, &save);
- if (evergreen_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
- }
- /* reset */
- radeon_pci_config_reset(rdev);
- /* wait for asic to come out of reset */
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
- break;
- udelay(1);
- }
-}
-
-int evergreen_asic_reset(struct radeon_device *rdev)
-{
- u32 reset_mask;
-
- reset_mask = evergreen_gpu_check_soft_reset(rdev);
-
- if (reset_mask)
- r600_set_bios_scratch_engine_hung(rdev, true);
-
- /* try soft reset */
- evergreen_gpu_soft_reset(rdev, reset_mask);
-
- reset_mask = evergreen_gpu_check_soft_reset(rdev);
-
- /* try pci config reset */
- if (reset_mask && radeon_hard_reset)
- evergreen_gpu_pci_config_reset(rdev);
-
- reset_mask = evergreen_gpu_check_soft_reset(rdev);
-
- if (!reset_mask)
- r600_set_bios_scratch_engine_hung(rdev, false);
-
- return 0;
-}
-
-/**
- * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the GFX engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
-
- if (!(reset_mask & (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_CP))) {
- radeon_ring_lockup_update(rdev, ring);
- return false;
- }
- return radeon_ring_test_lockup(rdev, ring);
-}
-
-/*
- * RLC
- */
-#define RLC_SAVE_RESTORE_LIST_END_MARKER 0x00000000
-#define RLC_CLEAR_STATE_END_MARKER 0x00000001
-
-void sumo_rlc_fini(struct radeon_device *rdev)
-{
- int r;
-
- /* save restore block */
- if (rdev->rlc.save_restore_obj) {
- r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
- if (unlikely(r != 0))
- dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
- radeon_bo_unpin(rdev->rlc.save_restore_obj);
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
-
- radeon_bo_unref(&rdev->rlc.save_restore_obj);
- rdev->rlc.save_restore_obj = NULL;
- }
-
- /* clear state block */
- if (rdev->rlc.clear_state_obj) {
- r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
- if (unlikely(r != 0))
- dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
- radeon_bo_unpin(rdev->rlc.clear_state_obj);
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
-
- radeon_bo_unref(&rdev->rlc.clear_state_obj);
- rdev->rlc.clear_state_obj = NULL;
- }
-
- /* clear state block */
- if (rdev->rlc.cp_table_obj) {
- r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
- if (unlikely(r != 0))
- dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- radeon_bo_unpin(rdev->rlc.cp_table_obj);
- radeon_bo_unreserve(rdev->rlc.cp_table_obj);
-
- radeon_bo_unref(&rdev->rlc.cp_table_obj);
- rdev->rlc.cp_table_obj = NULL;
- }
-}
-
-#define CP_ME_TABLE_SIZE 96
-
-int sumo_rlc_init(struct radeon_device *rdev)
-{
- const u32 *src_ptr;
- volatile u32 *dst_ptr;
- u32 dws, data, i, j, k, reg_num;
- u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0;
- u64 reg_list_mc_addr;
- const struct cs_section_def *cs_data;
- int r;
-
- src_ptr = rdev->rlc.reg_list;
- dws = rdev->rlc.reg_list_size;
- if (rdev->family >= CHIP_BONAIRE) {
- dws += (5 * 16) + 48 + 48 + 64;
- }
- cs_data = rdev->rlc.cs_data;
-
- if (src_ptr) {
- /* save restore block */
- if (rdev->rlc.save_restore_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, 0, NULL,
- NULL, &rdev->rlc.save_restore_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
- return r;
- }
- }
-
- r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.save_restore_gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
-
- r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- /* write the sr buffer */
- dst_ptr = rdev->rlc.sr_ptr;
- if (rdev->family >= CHIP_TAHITI) {
- /* SI */
- for (i = 0; i < rdev->rlc.reg_list_size; i++)
- dst_ptr[i] = cpu_to_le32(src_ptr[i]);
- } else {
- /* ON/LN/TN */
- /* format:
- * dw0: (reg2 << 16) | reg1
- * dw1: reg1 save space
- * dw2: reg2 save space
- */
- for (i = 0; i < dws; i++) {
- data = src_ptr[i] >> 2;
- i++;
- if (i < dws)
- data |= (src_ptr[i] >> 2) << 16;
- j = (((i - 1) * 3) / 2);
- dst_ptr[j] = cpu_to_le32(data);
- }
- j = ((i * 3) / 2);
- dst_ptr[j] = cpu_to_le32(RLC_SAVE_RESTORE_LIST_END_MARKER);
- }
- radeon_bo_kunmap(rdev->rlc.save_restore_obj);
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- }
-
- if (cs_data) {
- /* clear state block */
- if (rdev->family >= CHIP_BONAIRE) {
- rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev);
- } else if (rdev->family >= CHIP_TAHITI) {
- rdev->rlc.clear_state_size = si_get_csb_size(rdev);
- dws = rdev->rlc.clear_state_size + (256 / 4);
- } else {
- reg_list_num = 0;
- dws = 0;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_list_num++;
- dws += cs_data[i].section[j].reg_count;
- }
- }
- reg_list_blk_index = (3 * reg_list_num + 2);
- dws += reg_list_blk_index;
- rdev->rlc.clear_state_size = dws;
- }
-
- if (rdev->rlc.clear_state_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, 0, NULL,
- NULL, &rdev->rlc.clear_state_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- }
- r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.clear_state_gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
- dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
-
- r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- /* set up the cs buffer */
- dst_ptr = rdev->rlc.cs_ptr;
- if (rdev->family >= CHIP_BONAIRE) {
- cik_get_csb_buffer(rdev, dst_ptr);
- } else if (rdev->family >= CHIP_TAHITI) {
- reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
- dst_ptr[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr));
- dst_ptr[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr));
- dst_ptr[2] = cpu_to_le32(rdev->rlc.clear_state_size);
- si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
- } else {
- reg_list_hdr_blk_index = 0;
- reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
- data = upper_32_bits(reg_list_mc_addr);
- dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
- reg_list_hdr_blk_index++;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_num = cs_data[i].section[j].reg_count;
- data = reg_list_mc_addr & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
- reg_list_hdr_blk_index++;
-
- data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
- reg_list_hdr_blk_index++;
-
- data = 0x08000000 | (reg_num * 4);
- dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(data);
- reg_list_hdr_blk_index++;
-
- for (k = 0; k < reg_num; k++) {
- data = cs_data[i].section[j].extent[k];
- dst_ptr[reg_list_blk_index + k] = cpu_to_le32(data);
- }
- reg_list_mc_addr += reg_num * 4;
- reg_list_blk_index += reg_num;
- }
- }
- dst_ptr[reg_list_hdr_blk_index] = cpu_to_le32(RLC_CLEAR_STATE_END_MARKER);
- }
- radeon_bo_kunmap(rdev->rlc.clear_state_obj);
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
- }
-
- if (rdev->rlc.cp_table_size) {
- if (rdev->rlc.cp_table_obj == NULL) {
- r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
- PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, 0, NULL,
- NULL, &rdev->rlc.cp_table_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- }
-
- r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
- if (unlikely(r != 0)) {
- dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.cp_table_gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->rlc.cp_table_obj);
- dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
-
- cik_init_cp_pg_table(rdev);
-
- radeon_bo_kunmap(rdev->rlc.cp_table_obj);
- radeon_bo_unreserve(rdev->rlc.cp_table_obj);
-
- }
-
return 0;
}
-static void evergreen_rlc_start(struct radeon_device *rdev)
-{
- u32 mask = RLC_ENABLE;
-
- if (rdev->flags & RADEON_IS_IGP) {
- mask |= GFX_POWER_GATING_ENABLE | GFX_POWER_GATING_SRC;
- }
-
- WREG32(RLC_CNTL, mask);
-}
-
-int evergreen_rlc_resume(struct radeon_device *rdev)
+int evergreen_asic_reset(struct radeon_device *rdev)
{
- u32 i;
- const __be32 *fw_data;
-
- if (!rdev->rlc_fw)
- return -EINVAL;
-
- r600_rlc_stop(rdev);
-
- WREG32(RLC_HB_CNTL, 0);
-
- if (rdev->flags & RADEON_IS_IGP) {
- if (rdev->family == CHIP_ARUBA) {
- u32 always_on_bitmap =
- 3 | (3 << (16 * rdev->config.cayman.max_shader_engines));
- /* find out the number of active simds */
- u32 tmp = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
- tmp |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
- tmp = hweight32(~tmp);
- if (tmp == rdev->config.cayman.max_simds_per_se) {
- WREG32(TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK, always_on_bitmap);
- WREG32(TN_RLC_LB_PARAMS, 0x00601004);
- WREG32(TN_RLC_LB_INIT_SIMD_MASK, 0xffffffff);
- WREG32(TN_RLC_LB_CNTR_INIT, 0x00000000);
- WREG32(TN_RLC_LB_CNTR_MAX, 0x00002000);
- }
- } else {
- WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
- WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
- }
- WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
- WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
- } else {
- WREG32(RLC_HB_BASE, 0);
- WREG32(RLC_HB_RPTR, 0);
- WREG32(RLC_HB_WPTR, 0);
- WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
- WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
- }
- WREG32(RLC_MC_CNTL, 0);
- WREG32(RLC_UCODE_CNTL, 0);
-
- fw_data = (const __be32 *)rdev->rlc_fw->data;
- if (rdev->family >= CHIP_ARUBA) {
- for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
- }
- } else if (rdev->family >= CHIP_CAYMAN) {
- for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
- }
- } else {
- for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
- }
- }
- WREG32(RLC_UCODE_ADDR, 0);
-
- evergreen_rlc_start(rdev);
-
- return 0;
+ return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
/* Interrupts */
@@ -4577,7 +2603,6 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
WREG32(DMA_CNTL, tmp);
WREG32(GRBM_INT_CNTL, 0);
- WREG32(SRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
@@ -4600,8 +2625,8 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
- /* only one DAC on DCE5 */
- if (!ASIC_IS_DCE5(rdev))
+ /* only one DAC on DCE6 */
+ if (!ASIC_IS_DCE6(rdev))
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
@@ -4627,9 +2652,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
+ u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
u32 dma_cntl, dma_cntl1 = 0;
- u32 thermal_int = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -4643,18 +2668,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
return 0;
}
- hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- if (rdev->family == CHIP_ARUBA)
- thermal_int = RREG32(TN_CG_THERMAL_INT_CTRL) &
- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
- else
- thermal_int = RREG32(CG_THERMAL_INT) &
- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
@@ -4700,11 +2719,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
}
}
- if (rdev->irq.dpm_thermal) {
- DRM_DEBUG("dpm thermal\n");
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- }
-
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -4737,27 +2751,27 @@ int evergreen_irq_set(struct radeon_device *rdev)
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("evergreen_irq_set: hpd 1\n");
- hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd1 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("evergreen_irq_set: hpd 2\n");
- hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd2 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("evergreen_irq_set: hpd 3\n");
- hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd3 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("evergreen_irq_set: hpd 4\n");
- hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd4 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("evergreen_irq_set: hpd 5\n");
- hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd5 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("evergreen_irq_set: hpd 6\n");
- hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd6 |= DC_HPDx_INT_EN;
}
if (rdev->irq.afmt[0]) {
DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
@@ -4809,21 +2823,15 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
if (rdev->num_crtc >= 4) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
}
if (rdev->num_crtc >= 6) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
}
WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -4832,10 +2840,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD4_INT_CONTROL, hpd4);
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
- if (rdev->family == CHIP_ARUBA)
- WREG32(TN_CG_THERMAL_INT_CTRL, thermal_int);
- else
- WREG32(CG_THERMAL_INT, thermal_int);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
@@ -4844,9 +2848,6 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
- /* posting read */
- RREG32(SRBM_STATUS);
-
return 0;
}
@@ -4947,42 +2948,10 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD6_INT_CONTROL);
- tmp |= DC_HPDx_INT_ACK;
- WREG32(DC_HPD6_INT_CONTROL, tmp);
- }
-
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD1_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
- WREG32(DC_HPD1_INT_CONTROL, tmp);
- }
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD2_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
- WREG32(DC_HPD2_INT_CONTROL, tmp);
- }
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD3_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
- WREG32(DC_HPD3_INT_CONTROL, tmp);
- }
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD4_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
- WREG32(DC_HPD4_INT_CONTROL, tmp);
- }
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
- WREG32(DC_HPD5_INT_CONTROL, tmp);
- }
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD6_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
+ tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
-
if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
@@ -5040,13 +3009,12 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
- wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
@@ -5063,22 +3031,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
u32 ring_index;
bool queue_hotplug = false;
bool queue_hdmi = false;
- bool queue_dp = false;
- bool queue_thermal = false;
- u32 status, addr;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
wptr = evergreen_get_ih_wptr(rdev);
+ if (wptr == rdev->ih.rptr)
+ return IRQ_NONE;
restart_ih:
/* is somebody else already processing irqs? */
if (atomic_xchg(&rdev->ih.lock, 1))
return IRQ_NONE;
rptr = rdev->ih.rptr;
- DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
@@ -5096,27 +3063,23 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
-
break;
case 1: /* D1 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
- DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5126,27 +3089,23 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
-
break;
case 1: /* D2 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
- DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5156,27 +3115,23 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_flip(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
-
break;
case 1: /* D3 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
- DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5186,27 +3141,23 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_flip(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
-
break;
case 1: /* D4 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
- DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5216,27 +3167,23 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_flip(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
-
break;
case 1: /* D5 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
- DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5246,140 +3193,72 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_flip(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
-
break;
case 1: /* D6 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
- DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
- case 8: /* D1 page flip */
- case 10: /* D2 page flip */
- case 12: /* D3 page flip */
- case 14: /* D4 page flip */
- case 16: /* D5 page flip */
- case 18: /* D6 page flip */
- DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- if (radeon_use_pflipirq > 0)
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
- break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
break;
case 1:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
break;
case 2:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
break;
case 3:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
break;
case 4:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
break;
case 5:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
- break;
- case 6:
- if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
- break;
- case 7:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
- break;
- case 8:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
- break;
- case 9:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
- break;
- case 10:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
- break;
- case 11:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5389,79 +3268,61 @@ restart_ih:
case 44: /* hdmi */
switch (src_data) {
case 0:
- if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI0\n");
+ if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+ }
break;
case 1:
- if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI1\n");
+ if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+ }
break;
case 2:
- if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI2\n");
+ if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI2\n");
+ }
break;
case 3:
- if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI3\n");
+ if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI3\n");
+ }
break;
case 4:
- if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI4\n");
+ if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI4\n");
+ }
break;
case 5:
- if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI5\n");
+ if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI5\n");
+ }
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
- case 96:
- DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
- WREG32(SRBM_INT_ACK, 0x1);
- break;
- case 124: /* UVD */
- DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
- radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
break;
case 146:
case 147:
- addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
- status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
- if (addr == 0x0 && status == 0x0)
- break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- addr);
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- status);
- cayman_vm_decode_fault(rdev, status, addr);
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
@@ -5490,16 +3351,6 @@ restart_ih:
DRM_DEBUG("IH: DMA trap\n");
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
break;
- case 230: /* thermal low to high */
- DRM_DEBUG("IH: thermal low to high\n");
- rdev->pm.dpm.thermal.high_to_low = false;
- queue_thermal = true;
- break;
- case 231: /* thermal high to low */
- DRM_DEBUG("IH: thermal high to low\n");
- rdev->pm.dpm.thermal.high_to_low = true;
- queue_thermal = true;
- break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
@@ -5517,17 +3368,13 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
- WREG32(IH_RB_RPTR, rptr);
}
- if (queue_dp)
- schedule_work(&rdev->dp_work);
if (queue_hotplug)
- schedule_delayed_work(&rdev->hotplug_work, 0);
+ task_add(systq, &rdev->hotplug_task);
if (queue_hdmi)
- schedule_work(&rdev->audio_work);
- if (queue_thermal && rdev->pm.dpm_enabled)
- schedule_work(&rdev->pm.dpm.thermal.work);
+ task_add(systq, &rdev->audio_task);
rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
@@ -5538,31 +3385,180 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFFF)
+ cur_size_in_dw = 0xFFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
static int evergreen_startup(struct radeon_device *rdev)
{
- struct radeon_ring *ring;
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
evergreen_pcie_gen2_enable(rdev);
- /* enable aspm */
- evergreen_program_aspm(rdev);
-
- /* scratch needs to be initialized before MC */
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
evergreen_mc_program(rdev);
- if (ASIC_IS_DCE5(rdev) && !rdev->pm.dpm_enabled) {
+ if (ASIC_IS_DCE5(rdev)) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
}
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
} else {
@@ -5572,17 +3568,11 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_gpu_init(rdev);
- /* allocate rlc buffers */
- if (rdev->flags & RADEON_IS_IGP) {
- rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
- rdev->rlc.reg_list_size =
- (u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list);
- rdev->rlc.cs_data = evergreen_cs_data;
- r = sumo_rlc_init(rdev);
- if (r) {
- DRM_ERROR("Failed to init rlc BOs!\n");
- return r;
- }
+ r = evergreen_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy.copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
/* allocate wb buffer */
@@ -5602,17 +3592,6 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
- r = uvd_v2_2_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
- }
-
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
-
/* Enable IRQ */
if (!rdev->irq.installed) {
r = radeon_irq_kms_init(rdev);
@@ -5628,15 +3607,16 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
- ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- RADEON_CP_PACKET2);
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -5650,24 +3630,13 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- RADEON_CP_PACKET2);
- if (!r)
- r = uvd_v1_0_init(rdev);
-
- if (r)
- DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
- }
-
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
- r = radeon_audio_init(rdev);
+ r = r600_audio_init(rdev);
if (r) {
DRM_ERROR("radeon: audio init failed\n");
return r;
@@ -5692,12 +3661,6 @@ int evergreen_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* init golden registers */
- evergreen_init_golden_registers(rdev);
-
- if (rdev->pm.pm_method == PM_METHOD_DPM)
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
@@ -5712,10 +3675,7 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
- radeon_audio_fini(rdev);
- uvd_v1_0_fini(rdev);
- radeon_uvd_suspend(rdev);
+ r600_audio_fini(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
evergreen_irq_suspend(rdev);
@@ -5762,8 +3722,6 @@ int evergreen_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
- /* init golden registers */
- evergreen_init_golden_registers(rdev);
/* Initialize scratch registers */
r600_scratch_init(rdev);
/* Initialize surface registers */
@@ -5789,40 +3747,12 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- if (ASIC_IS_DCE5(rdev)) {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
- r = ni_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
- } else {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
- }
-
- /* Initialize power management */
- radeon_pm_init(rdev);
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
- r = radeon_uvd_init(rdev);
- if (!r) {
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
- 4096);
- }
-
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -5837,8 +3767,6 @@ int evergreen_init(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
- if (rdev->flags & RADEON_IS_IGP)
- sumo_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -5862,18 +3790,14 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
- radeon_audio_fini(rdev);
+ r600_audio_fini(rdev);
+ r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
- if (rdev->flags & RADEON_IS_IGP)
- sumo_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- uvd_v1_0_fini(rdev);
- radeon_uvd_fini(rdev);
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
@@ -5887,8 +3811,8 @@ void evergreen_fini(struct radeon_device *rdev)
void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
- u32 link_width_cntl, speed_cntl;
- u32 mask;
+ u32 link_width_cntl, speed_cntl, mask;
+ int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -5902,14 +3826,15 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
/* x2 cards have a special sequence */
if (ASIC_IS_X2(rdev))
return;
-
- if (drm_pcie_get_speed_cap_mask(rdev->ddev, &mask))
+
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
return;
- if (!(mask & (DRM_PCIE_SPEED_50|DRM_PCIE_SPEED_80)))
+ if (!(mask & DRM_PCIE_SPEED_50))
return;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -5920,183 +3845,33 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
- }
-}
-
-void evergreen_program_aspm(struct radeon_device *rdev)
-{
- u32 data, orig;
- u32 pcie_lc_cntl, pcie_lc_cntl_old;
- bool disable_l0s, disable_l1 = false, disable_plloff_in_l1 = false;
- /* fusion_platform = true
- * if the system is a fusion system
- * (APU or DGPU in a fusion system).
- * todo: check if the system is a fusion platform.
- */
- bool fusion_platform = false;
-
- if (radeon_aspm == 0)
- return;
-
- if (!(rdev->flags & RADEON_IS_PCIE))
- return;
-
- switch (rdev->family) {
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- case CHIP_JUNIPER:
- case CHIP_REDWOOD:
- case CHIP_CEDAR:
- case CHIP_SUMO:
- case CHIP_SUMO2:
- case CHIP_PALM:
- case CHIP_ARUBA:
- disable_l0s = true;
- break;
- default:
- disable_l0s = false;
- break;
- }
-
- if (rdev->flags & RADEON_IS_IGP)
- fusion_platform = true; /* XXX also dGPUs in a fusion system */
-
- data = orig = RREG32_PIF_PHY0(PB0_PIF_PAIRING);
- if (fusion_platform)
- data &= ~MULTI_PIF;
- else
- data |= MULTI_PIF;
- if (data != orig)
- WREG32_PIF_PHY0(PB0_PIF_PAIRING, data);
-
- data = orig = RREG32_PIF_PHY1(PB1_PIF_PAIRING);
- if (fusion_platform)
- data &= ~MULTI_PIF;
- else
- data |= MULTI_PIF;
- if (data != orig)
- WREG32_PIF_PHY1(PB1_PIF_PAIRING, data);
-
- pcie_lc_cntl = pcie_lc_cntl_old = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- pcie_lc_cntl &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
- if (!disable_l0s) {
- if (rdev->family >= CHIP_BARTS)
- pcie_lc_cntl |= LC_L0S_INACTIVITY(7);
- else
- pcie_lc_cntl |= LC_L0S_INACTIVITY(3);
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
-
- if (!disable_l1) {
- if (rdev->family >= CHIP_BARTS)
- pcie_lc_cntl |= LC_L1_INACTIVITY(7);
- else
- pcie_lc_cntl |= LC_L1_INACTIVITY(8);
-
- if (!disable_plloff_in_l1) {
- data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
- if (data != orig)
- WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
-
- data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
- if (data != orig)
- WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
-
- data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
- if (data != orig)
- WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
-
- data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
- if (data != orig)
- WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
-
- if (rdev->family >= CHIP_BARTS) {
- data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
- data |= PLL_RAMP_UP_TIME_0(4);
- if (data != orig)
- WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
-
- data = orig = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
- data |= PLL_RAMP_UP_TIME_1(4);
- if (data != orig)
- WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
-
- data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
- data |= PLL_RAMP_UP_TIME_0(4);
- if (data != orig)
- WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
-
- data = orig = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
- data |= PLL_RAMP_UP_TIME_1(4);
- if (data != orig)
- WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
- }
-
- data = orig = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- data &= ~LC_DYN_LANES_PWR_STATE_MASK;
- data |= LC_DYN_LANES_PWR_STATE(3);
- if (data != orig)
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
-
- if (rdev->family >= CHIP_BARTS) {
- data = orig = RREG32_PIF_PHY0(PB0_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
- data |= LS2_EXIT_TIME(1);
- if (data != orig)
- WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
-
- data = orig = RREG32_PIF_PHY1(PB1_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
- data |= LS2_EXIT_TIME(1);
- if (data != orig)
- WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
- }
- }
- }
-
- /* evergreen parts only */
- if (rdev->family < CHIP_BARTS)
- pcie_lc_cntl |= LC_PMI_TO_L1_DIS;
-
- if (pcie_lc_cntl != pcie_lc_cntl_old)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, pcie_lc_cntl);
}
diff --git a/sys/dev/pci/drm/radeon/evergreen_blit_shaders.c b/sys/dev/pci/drm/radeon/evergreen_blit_shaders.c
index cf426879fbb..85a58332e15 100644
--- a/sys/dev/pci/drm/radeon/evergreen_blit_shaders.c
+++ b/sys/dev/pci/drm/radeon/evergreen_blit_shaders.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: evergreen_blit_shaders.c,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
@@ -24,13 +25,15 @@
* Alex Deucher <alexander.deucher@amd.com>
*/
-#include <dev/pci/drm/drm_linux.h>
+#include <sys/types.h>
+
+#include <dev/pci/drm/drmP.h>
/*
* evergreen cards need to use the 3D engine to blit data which requires
* quite a bit of hw state setup. Rather than pull the whole 3D driver
* (which normally generates the 3D state) into the DRM, we opt to use
- * statically generated state tables. The register state and shaders
+ * statically generated state tables. The regsiter state and shaders
* were hand generated to support blitting functionality. See the 3D
* driver or documentation for descriptions of the registers and
* shader instructions.
@@ -298,4 +301,58 @@ const u32 evergreen_default_state[] =
0x00000010, /* */
};
+const u32 evergreen_vs[] =
+{
+ 0x00000004,
+ 0x80800400,
+ 0x0000a03c,
+ 0x95000688,
+ 0x00004000,
+ 0x15200688,
+ 0x00000000,
+ 0x00000000,
+ 0x3c000000,
+ 0x67961001,
+#ifdef __BIG_ENDIAN
+ 0x000a0000,
+#else
+ 0x00080000,
+#endif
+ 0x00000000,
+ 0x1c000000,
+ 0x67961000,
+#ifdef __BIG_ENDIAN
+ 0x00020008,
+#else
+ 0x00000008,
+#endif
+ 0x00000000,
+};
+
+const u32 evergreen_ps[] =
+{
+ 0x00000003,
+ 0xa00c0000,
+ 0x00000008,
+ 0x80400000,
+ 0x00000000,
+ 0x95200688,
+ 0x00380400,
+ 0x00146b10,
+ 0x00380000,
+ 0x20146b10,
+ 0x00380400,
+ 0x40146b00,
+ 0x80380000,
+ 0x60146b00,
+ 0x00000000,
+ 0x00000000,
+ 0x00000010,
+ 0x000d1000,
+ 0xb0800000,
+ 0x00000000,
+};
+
+const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
+const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/sys/dev/pci/drm/radeon/evergreen_blit_shaders.h b/sys/dev/pci/drm/radeon/evergreen_blit_shaders.h
index bb8d6c75159..32d549cfb57 100644
--- a/sys/dev/pci/drm/radeon/evergreen_blit_shaders.h
+++ b/sys/dev/pci/drm/radeon/evergreen_blit_shaders.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: evergreen_blit_shaders.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2009 Advanced Micro Devices, Inc.
*
diff --git a/sys/dev/pci/drm/radeon/evergreen_cs.c b/sys/dev/pci/drm/radeon/evergreen_cs.c
index a48d9bd90aa..285f681d960 100644
--- a/sys/dev/pci/drm/radeon/evergreen_cs.c
+++ b/sys/dev/pci/drm/radeon/evergreen_cs.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: evergreen_cs.c,v 1.8 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2010 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -34,10 +35,11 @@
#define MAX(a,b) (((a)>(b))?(a):(b))
#define MIN(a,b) (((a)<(b))?(a):(b))
-#define REG_SAFE_BM_SIZE ARRAY_SIZE(evergreen_reg_safe_bm)
-
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
- struct radeon_bo_list **cs_reloc);
+ struct radeon_cs_reloc **cs_reloc);
+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
+
struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
@@ -85,8 +87,6 @@ struct evergreen_cs_track {
u32 htile_offset;
u32 htile_surface;
struct radeon_bo *htile_bo;
- unsigned long indirect_draw_buffer_size;
- const unsigned *reg_safe_bm;
};
static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
@@ -447,7 +447,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
* command stream.
*/
if (!surf.mode) {
- uint32_t *ib = p->ib.ptr;
+ volatile u32 *ib = p->ib.ptr;
unsigned long tmp, nby, bsize, size, min = 0;
/* find the height the ddx wants */
@@ -822,7 +822,7 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
/* align height */
evergreen_surface_check(p, &surf, NULL);
- surf.nby = roundup2(surf.nby, surf.halign);
+ surf.nby = roundup(surf.nby, surf.halign);
r = evergreen_surface_check(p, &surf, "texture");
if (r) {
@@ -838,7 +838,7 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
__func__, __LINE__, toffset, surf.base_align);
return -EINVAL;
}
- if (surf.nsamples <= 1 && moffset & (surf.base_align - 1)) {
+ if (moffset & (surf.base_align - 1)) {
dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
__func__, __LINE__, moffset, surf.base_align);
return -EINVAL;
@@ -895,8 +895,8 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
__func__, __LINE__, surf.mode);
return -EINVAL;
}
- surf.nbx = roundup2(surf.nbx, surf.palign);
- surf.nby = roundup2(surf.nby, surf.halign);
+ surf.nbx = roundup(surf.nbx, surf.palign);
+ surf.nby = roundup(surf.nby, surf.halign);
r = evergreen_surface_check(p, &surf, "mipmap");
if (r) {
@@ -1013,35 +1013,223 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
}
/**
- * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
+ * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet informations
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+static int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = CP_PACKET_GET_TYPE(header);
+ pkt->count = CP_PACKET_GET_COUNT(header);
+ pkt->one_reg_wr = 0;
+ switch (pkt->type) {
+ case PACKET_TYPE0:
+ pkt->reg = CP_PACKET0_GET_REG(header);
+ break;
+ case PACKET_TYPE3:
+ pkt->opcode = CP_PACKET3_GET_OPCODE(header);
+ break;
+ case PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
+
+/**
+ * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP
+ * @p: structure holding the parser context.
+ *
+ * Check if the next packet is a relocation packet3.
+ **/
+static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return false;
+ }
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ return false;
+ }
+ return true;
+}
+
+/**
+ * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
*
- * This is an Evergreen(+)-specific function for parsing VLINE packets.
- * Real work is done by r600_cs_common_vline_parse function.
- * Here we just set up ASIC-specific register table and call
- * the common implementation function.
+ * Userspace sends a special sequence for VLINE waits.
+ * PACKET0 - VLINE_START_END + value
+ * PACKET3 - WAIT_REG_MEM poll vline status reg
+ * RELOC (P3) - crtc_id in reloc.
+ *
+ * This function parses this and relocates the VLINE START END
+ * and WAIT_REG_MEM packets to the correct crtc.
+ * It also detects a switched off crtc and nulls out the
+ * wait in that case.
*/
static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ struct radeon_cs_packet p3reloc, wait_reg_mem;
+ int crtc_id;
+ int r;
+ uint32_t header, h_idx, reg, wait_reg_mem_info;
+ volatile uint32_t *ib;
+
+ ib = p->ib.ptr;
+
+ /* parse the WAIT_REG_MEM */
+ r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
+ if (r)
+ return r;
+
+ /* check its a WAIT_REG_MEM */
+ if (wait_reg_mem.type != PACKET_TYPE3 ||
+ wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
+ DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+ return -EINVAL;
+ }
+
+ wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
+ /* bit 4 is reg (0) or mem (1) */
+ if (wait_reg_mem_info & 0x10) {
+ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
+ return -EINVAL;
+ }
+ /* waiting for value to be equal */
+ if ((wait_reg_mem_info & 0x7) != 0x3) {
+ DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+ return -EINVAL;
+ }
+ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
+ DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+ return -EINVAL;
+ }
- static uint32_t vline_start_end[6] = {
- EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
- EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
- EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
- EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
- EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
- EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
- };
- static uint32_t vline_status[6] = {
- EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
- EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
- EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
- EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
- EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
- EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
- };
-
- return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
+ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
+ DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+ return -EINVAL;
+ }
+
+ /* jump over the NOP */
+ r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+ if (r)
+ return r;
+
+ h_idx = p->idx - 2;
+ p->idx += wait_reg_mem.count + 2;
+ p->idx += p3reloc.count + 2;
+
+ header = radeon_get_ib_value(p, h_idx);
+ crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
+ reg = CP_PACKET0_GET_REG(header);
+ obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ return -EINVAL;
+ }
+ crtc = obj_to_crtc(obj);
+ radeon_crtc = to_radeon_crtc(crtc);
+ crtc_id = radeon_crtc->crtc_id;
+
+ if (!crtc->enabled) {
+ /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+ ib[h_idx + 2] = PACKET2(0);
+ ib[h_idx + 3] = PACKET2(0);
+ ib[h_idx + 4] = PACKET2(0);
+ ib[h_idx + 5] = PACKET2(0);
+ ib[h_idx + 6] = PACKET2(0);
+ ib[h_idx + 7] = PACKET2(0);
+ ib[h_idx + 8] = PACKET2(0);
+ } else {
+ switch (reg) {
+ case EVERGREEN_VLINE_START_END:
+ header &= ~R600_CP_PACKET0_REG_MASK;
+ header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
+ ib[h_idx] = header;
+ ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
+ break;
+ default:
+ DRM_ERROR("unknown crtc reloc\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
}
static int evergreen_packet0_check(struct radeon_cs_parser *p,
@@ -1086,18 +1274,41 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
}
/**
- * evergreen_cs_handle_reg() - process registers that need special handling.
+ * evergreen_cs_check_reg() - check if register is authorized or not
* @parser: parser structure holding parsing context
* @reg: register we are testing
* @idx: index into the cs buffer
+ *
+ * This function will test against evergreen_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
*/
-static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
- struct radeon_bo_list *reloc;
- u32 tmp, *ib;
+ struct radeon_cs_reloc *reloc;
+ u32 last_reg;
+ u32 m, i, tmp, *ib;
int r;
+ if (p->rdev->family >= CHIP_CAYMAN)
+ last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+ else
+ last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+
+ i = (reg >> 7);
+ if (i >= last_reg) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (!(cayman_reg_safe_bm[i] & m))
+ return 0;
+ } else {
+ if (!(evergreen_reg_safe_bm[i] & m))
+ return 0;
+ }
ib = p->ib.ptr;
switch (reg) {
/* force following reg to 0 in an attempt to disable out buffer
@@ -1140,13 +1351,13 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_LSTMP_RING_BASE:
case SQ_PSTMP_RING_BASE:
case SQ_VSTMP_RING_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case DB_DEPTH_CONTROL:
track->db_depth_control = radeon_get_ib_value(p, idx);
@@ -1169,7 +1380,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_Z_INFO:
track->db_z_info = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1177,12 +1388,12 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
ib[idx] &= ~Z_ARRAY_MODE(0xf);
track->db_z_info &= ~Z_ARRAY_MODE(0xf);
- ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
- track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
- if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
unsigned bankw, bankh, mtaspect, tile_split;
- evergreen_tiling_fields(reloc->tiling_flags,
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
&bankw, &bankh, &mtaspect,
&tile_split);
ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1211,50 +1422,50 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_dirty = true;
break;
case DB_Z_READ_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
track->db_z_read_offset = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_z_read_bo = reloc->robj;
track->db_dirty = true;
break;
case DB_Z_WRITE_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
track->db_z_write_offset = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_z_write_bo = reloc->robj;
track->db_dirty = true;
break;
case DB_STENCIL_READ_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
track->db_s_read_offset = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_s_read_bo = reloc->robj;
track->db_dirty = true;
break;
case DB_STENCIL_WRITE_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
track->db_s_write_offset = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_s_write_bo = reloc->robj;
track->db_dirty = true;
break;
@@ -1270,7 +1481,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_1:
case VGT_STRMOUT_BUFFER_BASE_2:
case VGT_STRMOUT_BUFFER_BASE_3:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1278,7 +1489,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->vgt_strmout_bo[tmp] = reloc->robj;
track->streamout_dirty = true;
break;
@@ -1292,13 +1503,13 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->streamout_dirty = true;
break;
case CP_COHER_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
case CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
track->cb_dirty = true;
@@ -1356,14 +1567,14 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
track->cb_dirty = true;
break;
@@ -1374,14 +1585,14 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
track->cb_dirty = true;
break;
@@ -1435,17 +1646,17 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_ATTRIB:
case CB_COLOR6_ATTRIB:
case CB_COLOR7_ATTRIB:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
unsigned bankw, bankh, mtaspect, tile_split;
- evergreen_tiling_fields(reloc->tiling_flags,
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
&bankw, &bankh, &mtaspect,
&tile_split);
ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1463,17 +1674,17 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
unsigned bankw, bankh, mtaspect, tile_split;
- evergreen_tiling_fields(reloc->tiling_flags,
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
&bankw, &bankh, &mtaspect,
&tile_split);
ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
@@ -1496,12 +1707,12 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR6_FMASK:
case CB_COLOR7_FMASK:
tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_fmask_bo[tmp] = reloc->robj;
break;
case CB_COLOR0_CMASK:
@@ -1513,12 +1724,12 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR6_CMASK:
case CB_COLOR7_CMASK:
tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_cmask_bo[tmp] = reloc->robj;
break;
case CB_COLOR0_FMASK_SLICE:
@@ -1551,7 +1762,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_BASE:
case CB_COLOR6_BASE:
case CB_COLOR7_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1559,7 +1770,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
tmp = (reg - CB_COLOR0_BASE) / 0x3c;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_bo[tmp] = reloc->robj;
track->cb_dirty = true;
break;
@@ -1567,7 +1778,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR9_BASE:
case CB_COLOR10_BASE:
case CB_COLOR11_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1575,19 +1786,19 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_bo[tmp] = reloc->robj;
track->cb_dirty = true;
break;
case DB_HTILE_DATA_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
track->htile_offset = radeon_get_ib_value(p, idx);
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->htile_bo = reloc->robj;
track->db_dirty = true;
break;
@@ -1698,13 +1909,13 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_LS_13:
case SQ_ALU_CONST_CACHE_LS_14:
case SQ_ALU_CONST_CACHE_LS_15:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SX_MEMORY_EXPORT_BASE:
if (p->rdev->family >= CHIP_CAYMAN) {
@@ -1712,13 +1923,13 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONFIG_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case CAYMAN_SX_SCATTER_EXPORT_BASE:
if (p->rdev->family < CHIP_CAYMAN) {
@@ -1726,13 +1937,13 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
"0x%04X\n", reg);
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SX_MISC:
track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
@@ -1744,36 +1955,38 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return 0;
}
-/**
- * evergreen_is_safe_reg() - check if register is authorized or not
- * @parser: parser structure holding parsing context
- * @reg: register we are testing
- *
- * This function will test against reg_safe_bm and return true
- * if register is safe or false otherwise.
- */
-static inline bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg)
+static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
- struct evergreen_cs_track *track = p->track;
- u32 m, i;
+ u32 last_reg, m, i;
+
+ if (p->rdev->family >= CHIP_CAYMAN)
+ last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+ else
+ last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
i = (reg >> 7);
- if (unlikely(i >= REG_SAFE_BM_SIZE)) {
+ if (i >= last_reg) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return false;
}
m = 1 << ((reg >> 2) & 31);
- if (!(track->reg_safe_bm[i] & m))
- return true;
-
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (!(cayman_reg_safe_bm[i] & m))
+ return true;
+ } else {
+ if (!(evergreen_reg_safe_bm[i] & m))
+ return true;
+ }
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return false;
}
static int evergreen_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_bo_list *reloc;
+ struct radeon_cs_reloc *reloc;
struct evergreen_cs_track *track;
- uint32_t *ib;
+ volatile u32 *ib;
unsigned idx;
unsigned i;
unsigned start_reg, end_reg, reg;
@@ -1809,13 +2022,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
(idx_value & 0xfffffff0) +
((u64)(tmp & 0xff) << 32);
@@ -1855,13 +2068,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad INDEX_BASE\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
idx_value +
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
@@ -1875,14 +2088,6 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
break;
}
- case PACKET3_INDEX_BUFFER_SIZE:
- {
- if (pkt->count != 0) {
- DRM_ERROR("bad INDEX_BUFFER_SIZE\n");
- return -EINVAL;
- }
- break;
- }
case PACKET3_DRAW_INDEX:
{
uint64_t offset;
@@ -1890,13 +2095,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
idx_value +
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
@@ -1918,13 +2123,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad DRAW_INDEX_2\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
radeon_get_ib_value(p, idx+1) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -1993,67 +2198,6 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return r;
}
break;
- case PACKET3_SET_BASE:
- {
- /*
- DW 1 HEADER Header of the packet. Shader_Type in bit 1 of the Header will correspond to the shader type of the Load, see Type-3 Packet.
- 2 BASE_INDEX Bits [3:0] BASE_INDEX - Base Index specifies which base address is specified in the last two DWs.
- 0001: DX11 Draw_Index_Indirect Patch Table Base: Base address for Draw_Index_Indirect data.
- 3 ADDRESS_LO Bits [31:3] - Lower bits of QWORD-Aligned Address. Bits [2:0] - Reserved
- 4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32]
- */
- if (pkt->count != 2) {
- DRM_ERROR("bad SET_BASE\n");
- return -EINVAL;
- }
-
- /* currently only supporting setting indirect draw buffer base address */
- if (idx_value != 1) {
- DRM_ERROR("bad SET_BASE\n");
- return -EINVAL;
- }
-
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
- if (r) {
- DRM_ERROR("bad SET_BASE\n");
- return -EINVAL;
- }
-
- track->indirect_draw_buffer_size = radeon_bo_size(reloc->robj);
-
- ib[idx+1] = reloc->gpu_offset;
- ib[idx+2] = upper_32_bits(reloc->gpu_offset) & 0xff;
-
- break;
- }
- case PACKET3_DRAW_INDIRECT:
- case PACKET3_DRAW_INDEX_INDIRECT:
- {
- u64 size = pkt->opcode == PACKET3_DRAW_INDIRECT ? 16 : 20;
-
- /*
- DW 1 HEADER
- 2 DATA_OFFSET Bits [31:0] + byte aligned offset where the required data structure starts. Bits 1:0 are zero
- 3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context
- */
- if (pkt->count != 1) {
- DRM_ERROR("bad DRAW_INDIRECT\n");
- return -EINVAL;
- }
-
- if (idx_value + size > track->indirect_draw_buffer_size) {
- dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
- idx_value, size, track->indirect_draw_buffer_size);
- return -EINVAL;
- }
-
- r = evergreen_cs_track_check(p);
- if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
- return r;
- }
- break;
- }
case PACKET3_DISPATCH_DIRECT:
if (pkt->count != 3) {
DRM_ERROR("bad DISPATCH_DIRECT\n");
@@ -2070,12 +2214,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
- ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff);
+ ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
r = evergreen_cs_track_check(p);
if (r) {
dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
@@ -2091,21 +2235,18 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x10) {
uint64_t offset;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
ib[idx+2] = upper_32_bits(offset) & 0xff;
- } else if (idx_value & 0x100) {
- DRM_ERROR("cannot use PFP on REG wait\n");
- return -EINVAL;
}
break;
case PACKET3_CP_DMA:
@@ -2145,7 +2286,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
/* src address space is memory */
if (((info & 0x60000000) >> 29) == 0) {
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad CP DMA SRC\n");
return -EINVAL;
@@ -2154,7 +2295,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
tmp = radeon_get_ib_value(p, idx) +
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
- offset = reloc->gpu_offset + tmp;
+ offset = reloc->lobj.gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
@@ -2183,7 +2324,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
if (((info & 0x00300000) >> 20) == 0) {
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad CP DMA DST\n");
return -EINVAL;
@@ -2192,7 +2333,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
tmp = radeon_get_ib_value(p, idx+2) +
((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
- offset = reloc->gpu_offset + tmp;
+ offset = reloc->lobj.gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
@@ -2217,12 +2358,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* 0xffffffff/0x0 is flush all cache flag */
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
radeon_get_ib_value(p, idx + 2) != 0) {
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
}
- ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
}
break;
case PACKET3_EVENT_WRITE:
@@ -2233,12 +2374,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (pkt->count) {
uint64_t offset;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -2254,13 +2395,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -2276,13 +2417,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -2299,10 +2440,9 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL;
}
- for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
- if (evergreen_is_safe_reg(p, reg))
- continue;
- r = evergreen_cs_handle_reg(p, reg, idx);
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = evergreen_cs_check_reg(p, reg, idx+1+i);
if (r)
return r;
}
@@ -2316,10 +2456,9 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
return -EINVAL;
}
- for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
- if (evergreen_is_safe_reg(p, reg))
- continue;
- r = evergreen_cs_handle_reg(p, reg, idx);
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ r = evergreen_cs_check_reg(p, reg, idx+1+i);
if (r)
return r;
}
@@ -2345,18 +2484,18 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
ib[idx+1+(i*8)+1] |=
- TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
- if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+ TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
unsigned bankw, bankh, mtaspect, tile_split;
- evergreen_tiling_fields(reloc->tiling_flags,
+ evergreen_tiling_fields(reloc->lobj.tiling_flags,
&bankw, &bankh, &mtaspect,
&tile_split);
ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
@@ -2368,7 +2507,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
}
texture = reloc->robj;
- toffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
/* tex mip base */
tex_dim = ib[idx+1+(i*8)+0] & 0x7;
@@ -2376,18 +2515,18 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
!mip_address &&
- !radeon_cs_packet_next_is_pkt3_nop(p)) {
+ !evergreen_cs_packet_next_is_pkt3_nop(p)) {
/* MIP_ADDRESS should point to FMASK for an MSAA texture.
* It should be 0 if FMASK is disabled. */
moffset = 0;
mipmap = NULL;
} else {
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
- moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
mipmap = reloc->robj;
}
@@ -2401,7 +2540,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset64;
/* vtx base */
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET_RESOURCE (vtx)\n");
return -EINVAL;
@@ -2414,7 +2553,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
}
- offset64 = reloc->gpu_offset + offset;
+ offset64 = reloc->lobj.gpu_offset + offset;
ib[idx+1+(i*8)+0] = offset64;
ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
(upper_32_bits(offset64) & 0xff);
@@ -2483,7 +2622,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* Updating memory at DST_ADDRESS. */
if (idx_value & 0x1) {
u64 offset;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
@@ -2495,14 +2634,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->gpu_offset;
+ offset += reloc->lobj.gpu_offset;
ib[idx+1] = offset;
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
/* Reading data from SRC_ADDRESS. */
if (((idx_value >> 1) & 0x3) == 2) {
u64 offset;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
@@ -2514,7 +2653,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->gpu_offset;
+ offset += reloc->lobj.gpu_offset;
ib[idx+3] = offset;
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
@@ -2527,7 +2666,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
@@ -2543,7 +2682,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->gpu_offset;
+ offset += reloc->lobj.gpu_offset;
ib[idx+0] = offset;
ib[idx+1] = upper_32_bits(offset) & 0xff;
break;
@@ -2556,7 +2695,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x1) {
u64 offset;
/* SRC is memory. */
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad COPY_DW (missing src reloc)\n");
return -EINVAL;
@@ -2568,22 +2707,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->gpu_offset;
+ offset += reloc->lobj.gpu_offset;
ib[idx+1] = offset;
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else {
/* SRC is a reg. */
reg = radeon_get_ib_value(p, idx+1) << 2;
- if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 1);
+ if (!evergreen_is_safe_reg(p, reg, idx+1))
return -EINVAL;
- }
}
if (idx_value & 0x2) {
u64 offset;
/* DST is memory. */
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
@@ -2595,17 +2731,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->gpu_offset;
+ offset += reloc->lobj.gpu_offset;
ib[idx+3] = offset;
ib[idx+4] = upper_32_bits(offset) & 0xff;
} else {
/* DST is a reg. */
reg = radeon_get_ib_value(p, idx+3) << 2;
- if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 3);
+ if (!evergreen_is_safe_reg(p, reg, idx+3))
return -EINVAL;
- }
}
break;
case PACKET3_NOP:
@@ -2630,15 +2763,11 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
if (track == NULL)
return -ENOMEM;
evergreen_cs_track_init(track);
- if (p->rdev->family >= CHIP_CAYMAN) {
+ if (p->rdev->family >= CHIP_CAYMAN)
tmp = p->rdev->config.cayman.tile_config;
- track->reg_safe_bm = cayman_reg_safe_bm;
- } else {
+ else
tmp = p->rdev->config.evergreen.tile_config;
- track->reg_safe_bm = evergreen_reg_safe_bm;
- }
- BUILD_BUG_ON(ARRAY_SIZE(cayman_reg_safe_bm) != REG_SAFE_BM_SIZE);
- BUILD_BUG_ON(ARRAY_SIZE(evergreen_reg_safe_bm) != REG_SAFE_BM_SIZE);
+
switch (tmp & 0xf) {
case 0:
track->npipes = 1;
@@ -2694,7 +2823,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
p->track = track;
}
do {
- r = radeon_cs_packet_parse(p, &pkt, p->idx);
+ r = evergreen_cs_packet_parse(p, &pkt, p->idx);
if (r) {
kfree(p->track);
p->track = NULL;
@@ -2702,12 +2831,12 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case RADEON_PACKET_TYPE0:
+ case PACKET_TYPE0:
r = evergreen_cs_parse_packet0(p, &pkt);
break;
- case RADEON_PACKET_TYPE2:
+ case PACKET_TYPE2:
break;
- case RADEON_PACKET_TYPE3:
+ case PACKET_TYPE3:
r = evergreen_packet3_check(p, &pkt);
break;
default:
@@ -2721,7 +2850,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
p->track = NULL;
return r;
}
- } while (p->idx < p->chunk_ib->length_dw);
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
for (r = 0; r < p->ib.length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2733,6 +2862,16 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
+/*
+ * DMA
+ */
+
+#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
+#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
+#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
+#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
+#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
+
/**
* evergreen_dma_cs_parse() - parse the DMA IB
* @p: parser structure holding parsing context.
@@ -2744,11 +2883,11 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
**/
int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
{
- struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
- struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
- u32 header, cmd, count, sub_cmd;
- uint32_t *ib = p->ib.ptr;
- u32 idx;
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+ u32 header, cmd, count, tiled, new_cmd, misc;
+ volatile u32 *ib = p->ib.ptr;
+ u32 idx, idx_value;
u64 src_offset, dst_offset, dst2_offset;
int r;
@@ -2762,7 +2901,9 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, idx);
cmd = GET_DMA_CMD(header);
count = GET_DMA_COUNT(header);
- sub_cmd = GET_DMA_SUB_CMD(header);
+ tiled = GET_DMA_T(header);
+ new_cmd = GET_DMA_NEW(header);
+ misc = GET_DMA_MISC(header);
switch (cmd) {
case DMA_PACKET_WRITE:
@@ -2771,27 +2912,19 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
- switch (sub_cmd) {
- /* tiled */
- case 8:
+ if (tiled) {
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 7;
- break;
- /* linear */
- case 0:
+ } else {
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
- ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
p->idx += count + 3;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
- return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
@@ -2810,330 +2943,338 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("bad DMA_PACKET_COPY\n");
return -EINVAL;
}
- switch (sub_cmd) {
- /* Copy L2L, DW aligned */
- case 0x00:
- /* L2L, dw */
- src_offset = radeon_get_ib_value(p, idx+2);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
- p->idx += 5;
- break;
- /* Copy L2T/T2L */
- case 0x08:
- /* detile bit */
- if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- /* tiled src, linear dst */
- src_offset = radeon_get_ib_value(p, idx+1);
- src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
-
- dst_offset = radeon_get_ib_value(p, idx + 7);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
- } else {
- /* linear src, tiled dst */
- src_offset = radeon_get_ib_value(p, idx+7);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
-
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
- }
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- p->idx += 9;
- break;
- /* Copy L2L, byte aligned */
- case 0x40:
- /* L2L, byte */
- src_offset = radeon_get_ib_value(p, idx+2);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
- if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
- src_offset + count, radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
- dst_offset + count, radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
- ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xffffffff);
- ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
- p->idx += 5;
- break;
- /* Copy L2L, partial */
- case 0x41:
- /* L2L, partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2L Partial is cayman only !\n");
- return -EINVAL;
- }
- ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff);
- ib[idx+2] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
- ib[idx+4] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
- ib[idx+5] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
-
- p->idx += 9;
- break;
- /* Copy L2L, DW aligned, broadcast */
- case 0x44:
- /* L2L, dw, broadcast */
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
- src_offset = radeon_get_ib_value(p, idx+3);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(dst2_reloc->gpu_offset & 0xfffffffc);
- ib[idx+3] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
- ib[idx+4] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
- ib[idx+5] += upper_32_bits(dst2_reloc->gpu_offset) & 0xff;
- ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
- p->idx += 7;
- break;
- /* Copy L2T Frame to Field */
- case 0x48:
- if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
- }
- ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
- p->idx += 10;
- break;
- /* Copy L2T/T2L, partial */
- case 0x49:
- /* L2T, T2L partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
- return -EINVAL;
- }
- /* detile bit */
- if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- /* tiled src, linear dst */
- ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
-
- ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+ if (tiled) {
+ idx_value = radeon_get_ib_value(p, idx + 2);
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2T, frame to fields */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ case 1:
+ /* L2T, T2L partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ p->idx += 12;
+ break;
+ case 3:
+ /* L2T, broadcast */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ case 4:
+ /* L2T, T2L */
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx+7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ case 5:
+ /* T2T partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ p->idx += 13;
+ break;
+ case 7:
+ /* L2T, broadcast */
+ if (idx_value & (1 << 31)) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset <<= 8;
+ src_offset = radeon_get_ib_value(p, idx+8);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
+ ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
} else {
- /* linear src, tiled dst */
- ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
-
- ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
- }
- p->idx += 12;
- break;
- /* Copy L2T broadcast */
- case 0x4b:
- /* L2T, broadcast */
- if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
+ switch (misc) {
+ case 0:
+ /* detile bit */
+ if (idx_value & (1 << 31)) {
+ /* tiled src, linear dst */
+ src_offset = radeon_get_ib_value(p, idx+1);
+ src_offset <<= 8;
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
+
+ dst_offset = radeon_get_ib_value(p, idx+7);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ } else {
+ /* linear src, tiled dst */
+ src_offset = radeon_get_ib_value(p, idx+7);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
+ ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset <<= 8;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
+ }
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ p->idx += 9;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
}
- ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
- p->idx += 10;
- break;
- /* Copy L2T/T2L (tile units) */
- case 0x4c:
- /* L2T, T2L */
- /* detile bit */
- if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- /* tiled src, linear dst */
- src_offset = radeon_get_ib_value(p, idx+1);
- src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
-
- dst_offset = radeon_get_ib_value(p, idx+7);
- dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+ } else {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2L, byte */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
+ break;
+ case 1:
+ /* L2L, partial */
+ if (p->family < CHIP_CAYMAN) {
+ DRM_ERROR("L2L Partial is cayman only !\n");
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+
+ p->idx += 9;
+ break;
+ case 4:
+ /* L2L, dw, broadcast */
+ r = r600_dma_cs_next_reloc(p, &dst2_reloc);
+ if (r) {
+ DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ return -EINVAL;
+ }
+ dst_offset = radeon_get_ib_value(p, idx+1);
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
+ dst2_offset = radeon_get_ib_value(p, idx+2);
+ dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
+ src_offset = radeon_get_ib_value(p, idx+3);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 7;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
} else {
- /* linear src, tiled dst */
- src_offset = radeon_get_ib_value(p, idx+7);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
- ib[idx+7] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
- ib[idx+8] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
-
+ /* L2L, dw */
+ src_offset = radeon_get_ib_value(p, idx+2);
+ src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
- }
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- p->idx += 9;
- break;
- /* Copy T2T, partial (tile units) */
- case 0x4d:
- /* T2T partial */
- if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
- return -EINVAL;
- }
- ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
- ib[idx+4] += (u32)(dst_reloc->gpu_offset >> 8);
- p->idx += 13;
- break;
- /* Copy L2T broadcast (tile units) */
- case 0x4f:
- /* L2T, broadcast */
- if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- r = r600_dma_cs_next_reloc(p, &dst2_reloc);
- if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
- return -EINVAL;
- }
- dst_offset = radeon_get_ib_value(p, idx+1);
- dst_offset <<= 8;
- dst2_offset = radeon_get_ib_value(p, idx+2);
- dst2_offset <<= 8;
- src_offset = radeon_get_ib_value(p, idx+8);
- src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
- if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
- return -EINVAL;
- }
- if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
- return -EINVAL;
- }
- if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
- return -EINVAL;
+ dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
+ if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ return -EINVAL;
+ }
+ if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
+ dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ p->idx += 5;
}
- ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
- ib[idx+2] += (u32)(dst2_reloc->gpu_offset >> 8);
- ib[idx+8] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
- ib[idx+9] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
- p->idx += 10;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
- return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
@@ -3149,8 +3290,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
p->idx += 4;
break;
case DMA_PACKET_NOP:
@@ -3160,7 +3301,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
- } while (p->idx < p->chunk_ib->length_dw);
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
for (r = 0; r < p->ib->length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -3303,13 +3444,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
switch (pkt->opcode) {
case PACKET3_NOP:
- break;
case PACKET3_SET_BASE:
- if (idx_value != 1) {
- DRM_ERROR("bad SET_BASE");
- return -EINVAL;
- }
- break;
case PACKET3_CLEAR_STATE:
case PACKET3_INDEX_BUFFER_SIZE:
case PACKET3_DISPATCH_DIRECT:
@@ -3452,19 +3587,19 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
do {
pkt.idx = idx;
- pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
- pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt.one_reg_wr = 0;
switch (pkt.type) {
- case RADEON_PACKET_TYPE0:
+ case PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
ret = -EINVAL;
break;
- case RADEON_PACKET_TYPE2:
+ case PACKET_TYPE2:
idx += 1;
break;
- case RADEON_PACKET_TYPE3:
- pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ case PACKET_TYPE3:
+ pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
idx += pkt.count + 2;
break;
@@ -3492,79 +3627,88 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
{
u32 idx = 0;
- u32 header, cmd, count, sub_cmd;
+ u32 header, cmd, count, tiled, new_cmd, misc;
do {
header = ib->ptr[idx];
cmd = GET_DMA_CMD(header);
count = GET_DMA_COUNT(header);
- sub_cmd = GET_DMA_SUB_CMD(header);
+ tiled = GET_DMA_T(header);
+ new_cmd = GET_DMA_NEW(header);
+ misc = GET_DMA_MISC(header);
switch (cmd) {
case DMA_PACKET_WRITE:
- switch (sub_cmd) {
- /* tiled */
- case 8:
+ if (tiled)
idx += count + 7;
- break;
- /* linear */
- case 0:
+ else
idx += count + 3;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
- return -EINVAL;
- }
break;
case DMA_PACKET_COPY:
- switch (sub_cmd) {
- /* Copy L2L, DW aligned */
- case 0x00:
- idx += 5;
- break;
- /* Copy L2T/T2L */
- case 0x08:
- idx += 9;
- break;
- /* Copy L2L, byte aligned */
- case 0x40:
- idx += 5;
- break;
- /* Copy L2L, partial */
- case 0x41:
- idx += 9;
- break;
- /* Copy L2L, DW aligned, broadcast */
- case 0x44:
- idx += 7;
- break;
- /* Copy L2T Frame to Field */
- case 0x48:
- idx += 10;
- break;
- /* Copy L2T/T2L, partial */
- case 0x49:
- idx += 12;
- break;
- /* Copy L2T broadcast */
- case 0x4b:
- idx += 10;
- break;
- /* Copy L2T/T2L (tile units) */
- case 0x4c:
- idx += 9;
- break;
- /* Copy T2T, partial (tile units) */
- case 0x4d:
- idx += 13;
- break;
- /* Copy L2T broadcast (tile units) */
- case 0x4f:
- idx += 10;
- break;
- default:
- DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
- return -EINVAL;
+ if (tiled) {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2T, frame to fields */
+ idx += 10;
+ break;
+ case 1:
+ /* L2T, T2L partial */
+ idx += 12;
+ break;
+ case 3:
+ /* L2T, broadcast */
+ idx += 10;
+ break;
+ case 4:
+ /* L2T, T2L */
+ idx += 9;
+ break;
+ case 5:
+ /* T2T partial */
+ idx += 13;
+ break;
+ case 7:
+ /* L2T, broadcast */
+ idx += 10;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ switch (misc) {
+ case 0:
+ idx += 9;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ }
+ } else {
+ if (new_cmd) {
+ switch (misc) {
+ case 0:
+ /* L2L, byte */
+ idx += 5;
+ break;
+ case 1:
+ /* L2L, partial */
+ idx += 9;
+ break;
+ case 4:
+ /* L2L, dw, broadcast */
+ idx += 7;
+ break;
+ default:
+ DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
+ return -EINVAL;
+ }
+ } else {
+ /* L2L, dw */
+ idx += 5;
+ }
}
break;
case DMA_PACKET_CONSTANT_FILL:
diff --git a/sys/dev/pci/drm/radeon/evergreen_hdmi.c b/sys/dev/pci/drm/radeon/evergreen_hdmi.c
index 025c30122af..4ff587b075d 100644
--- a/sys/dev/pci/drm/radeon/evergreen_hdmi.c
+++ b/sys/dev/pci/drm/radeon/evergreen_hdmi.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: evergreen_hdmi.c,v 1.4 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -24,191 +25,116 @@
* Authors: Christian König
* Rafał Miłecki
*/
-#include <dev/pci/drm/linux_hdmi.h>
#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
-#include "radeon_audio.h"
#include "evergreend.h"
#include "atom.h"
-/* enable the audio stream */
-void dce4_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- u8 enable_mask)
-{
- u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL);
-
- if (!pin)
- return;
-
- if (enable_mask) {
- tmp |= AUDIO_ENABLED;
- if (enable_mask & 1)
- tmp |= PIN0_AUDIO_ENABLED;
- if (enable_mask & 2)
- tmp |= PIN1_AUDIO_ENABLED;
- if (enable_mask & 4)
- tmp |= PIN2_AUDIO_ENABLED;
- if (enable_mask & 8)
- tmp |= PIN3_AUDIO_ENABLED;
- } else {
- tmp &= ~(AUDIO_ENABLED |
- PIN0_AUDIO_ENABLED |
- PIN1_AUDIO_ENABLED |
- PIN2_AUDIO_ENABLED |
- PIN3_AUDIO_ENABLED);
- }
-
- WREG32(AZ_HOT_PLUG_CONTROL, tmp);
-}
-
-void evergreen_hdmi_update_acr(struct drm_encoder *encoder, long offset,
- const struct radeon_hdmi_acr *acr)
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
- int bpc = 8;
-
- if (encoder->crtc) {
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- bpc = radeon_crtc->bpc;
- }
-
- if (bpc > 8)
- WREG32(HDMI_ACR_PACKET_CONTROL + offset,
- HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
- else
- WREG32(HDMI_ACR_PACKET_CONTROL + offset,
- HDMI_ACR_SOURCE | /* select SW CTS value */
- HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
-
- WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr->cts_32khz));
- WREG32(HDMI_ACR_32_1 + offset, acr->n_32khz);
-
- WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr->cts_44_1khz));
- WREG32(HDMI_ACR_44_1 + offset, acr->n_44_1khz);
-
- WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr->cts_48khz));
- WREG32(HDMI_ACR_48_1 + offset, acr->n_48khz);
-}
-
-void dce4_afmt_write_latency_fields(struct drm_encoder *encoder,
- struct drm_connector *connector, struct drm_display_mode *mode)
-{
- struct radeon_device *rdev = encoder->dev->dev_private;
- u32 tmp = 0;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- if (connector->latency_present[1])
- tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
- AUDIO_LIPSYNC(connector->audio_latency[1]);
- else
- tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
- } else {
- if (connector->latency_present[0])
- tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
- AUDIO_LIPSYNC(connector->audio_latency[0]);
- else
- tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
- }
- WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC, tmp);
-}
-
-void dce4_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
- u8 *sadb, int sad_count)
-{
- struct radeon_device *rdev = encoder->dev->dev_private;
- u32 tmp;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
- /* program the speaker allocation */
- tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
- tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
- /* set HDMI mode */
- tmp |= HDMI_CONNECTION;
- if (sad_count)
- tmp |= SPEAKER_ALLOCATION(sadb[0]);
- else
- tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
-}
+ WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
-void dce4_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
- u8 *sadb, int sad_count)
-{
- struct radeon_device *rdev = encoder->dev->dev_private;
- u32 tmp;
+ WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
- /* program the speaker allocation */
- tmp = RREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
- tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
- /* set DP mode */
- tmp |= DP_CONNECTION;
- if (sad_count)
- tmp |= SPEAKER_ALLOCATION(sadb[0]);
- else
- tmp |= SPEAKER_ALLOCATION(5); /* stereo */
- WREG32_ENDPOINT(0, AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+ WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
}
-void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder,
- struct cea_sad *sads, int sad_count)
+/*
+ * calculate the crc for a given info frame
+ */
+static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
+ uint8_t versionNumber,
+ uint8_t length,
+ uint8_t *frame)
{
int i;
- struct radeon_device *rdev = encoder->dev->dev_private;
- static const u16 eld_reg_to_type[][2] = {
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
- { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
- };
-
- for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 value = 0;
- u8 stereo_freqs = 0;
- int max_channels = -1;
- int j;
-
- for (j = 0; j < sad_count; j++) {
- struct cea_sad *sad = &sads[j];
-
- if (sad->format == eld_reg_to_type[i][1]) {
- if (sad->channels > max_channels) {
- value = MAX_CHANNELS(sad->channels) |
- DESCRIPTOR_BYTE_2(sad->byte2) |
- SUPPORTED_FREQUENCIES(sad->freq);
- max_channels = sad->channels;
- }
-
- if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- stereo_freqs |= sad->freq;
- else
- break;
- }
- }
-
- value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
-
- WREG32_ENDPOINT(0, eld_reg_to_type[i][0], value);
- }
+ frame[0] = packetType + versionNumber + length;
+ for (i = 1; i <= length; i++)
+ frame[0] += frame[i];
+ frame[0] = 0x100 - frame[0];
}
/*
- * build a AVI Info Frame
+ * build a HDMI Video Info Frame
*/
-void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
- unsigned char *buffer, size_t size)
+static void evergreen_hdmi_videoinfoframe(
+ struct drm_encoder *encoder,
+ uint8_t color_format,
+ int active_information_present,
+ uint8_t active_format_aspect_ratio,
+ uint8_t scan_information,
+ uint8_t colorimetry,
+ uint8_t ex_colorimetry,
+ uint8_t quantization,
+ int ITC,
+ uint8_t picture_aspect_ratio,
+ uint8_t video_format_identification,
+ uint8_t pixel_repetition,
+ uint8_t non_uniform_picture_scaling,
+ uint8_t bar_info_data_valid,
+ uint16_t top_bar,
+ uint16_t bottom_bar,
+ uint16_t left_bar,
+ uint16_t right_bar
+)
{
- uint8_t *frame = buffer + 3;
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ uint8_t frame[14];
+
+ frame[0x0] = 0;
+ frame[0x1] =
+ (scan_information & 0x3) |
+ ((bar_info_data_valid & 0x3) << 2) |
+ ((active_information_present & 0x1) << 4) |
+ ((color_format & 0x3) << 5);
+ frame[0x2] =
+ (active_format_aspect_ratio & 0xF) |
+ ((picture_aspect_ratio & 0x3) << 4) |
+ ((colorimetry & 0x3) << 6);
+ frame[0x3] =
+ (non_uniform_picture_scaling & 0x3) |
+ ((quantization & 0x3) << 2) |
+ ((ex_colorimetry & 0x7) << 4) |
+ ((ITC & 0x1) << 7);
+ frame[0x4] = (video_format_identification & 0x7F);
+ frame[0x5] = (pixel_repetition & 0xF);
+ frame[0x6] = (top_bar & 0xFF);
+ frame[0x7] = (top_bar >> 8);
+ frame[0x8] = (bottom_bar & 0xFF);
+ frame[0x9] = (bottom_bar >> 8);
+ frame[0xA] = (left_bar & 0xFF);
+ frame[0xB] = (left_bar >> 8);
+ frame[0xC] = (right_bar & 0xFF);
+ frame[0xD] = (right_bar >> 8);
+
+ evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+ /* Our header values (type, version, length) should be alright, Intel
+ * is using the same. Checksum function also seems to be OK, it works
+ * fine for audio infoframe. However calculated value is always lower
+ * by 2 in comparison to fglrx. It breaks displaying anything in case
+ * of TVs that strictly check the checksum. Hack it manually here to
+ * workaround this issue. */
+ frame[0x0] += 2;
WREG32(AFMT_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -217,275 +143,75 @@ void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
WREG32(AFMT_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(AFMT_AVI_INFO3 + offset,
- frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
-
- WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
- HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
- ~HDMI_AVI_INFO_LINE_MASK);
-}
-
-void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
-{
- unsigned int max_ratio = clock / 24000;
- u32 dto_phase;
- u32 wallclock_ratio;
- u32 value;
-
- if (max_ratio >= 8) {
- dto_phase = 192 * 1000;
- wallclock_ratio = 3;
- } else if (max_ratio >= 4) {
- dto_phase = 96 * 1000;
- wallclock_ratio = 2;
- } else if (max_ratio >= 2) {
- dto_phase = 48 * 1000;
- wallclock_ratio = 1;
- } else {
- dto_phase = 24 * 1000;
- wallclock_ratio = 0;
- }
-
- value = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
- value |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
- value &= ~DCCG_AUDIO_DTO1_USE_512FBR_DTO;
- WREG32(DCCG_AUDIO_DTO0_CNTL, value);
-
- /* Two dtos; generally use dto0 for HDMI */
- value = 0;
-
- if (crtc)
- value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
-
- WREG32(DCCG_AUDIO_DTO_SOURCE, value);
-
- /* Express [24MHz / target pixel clock] as an exact rational
- * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
- * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
- */
- WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
-}
-
-void dce4_dp_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
-{
- u32 value;
-
- value = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
- value |= DCCG_AUDIO_DTO1_USE_512FBR_DTO;
- WREG32(DCCG_AUDIO_DTO1_CNTL, value);
-
- /* Two dtos; generally use dto1 for DP */
- value = 0;
- value |= DCCG_AUDIO_DTO_SEL;
-
- if (crtc)
- value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
-
- WREG32(DCCG_AUDIO_DTO_SOURCE, value);
-
- /* Express [24MHz / target pixel clock] as an exact rational
- * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
- * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
- */
- if (ASIC_IS_DCE41(rdev)) {
- unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
- DENTIST_DPREFCLK_WDIVIDER_MASK) >>
- DENTIST_DPREFCLK_WDIVIDER_SHIFT;
- div = radeon_audio_decode_dfs_div(div);
-
- if (div)
- clock = 100 * clock / div;
- }
-
- WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
- WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+ frame[0xC] | (frame[0xD] << 8));
}
-void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- WREG32(HDMI_VBI_PACKET_CONTROL + offset,
- HDMI_NULL_SEND | /* send null packets when required */
- HDMI_GC_SEND | /* send general control packets */
- HDMI_GC_CONT); /* send general control packets every frame */
-}
-
-void dce4_hdmi_set_color_depth(struct drm_encoder *encoder, u32 offset, int bpc)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-#ifdef DRMDEBUG
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-#endif
- uint32_t val;
-
- val = RREG32(HDMI_CONTROL + offset);
- val &= ~HDMI_DEEP_COLOR_ENABLE;
- val &= ~HDMI_DEEP_COLOR_DEPTH_MASK;
-
- switch (bpc) {
- case 0:
- case 6:
- case 8:
- case 16:
- default:
- DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
- connector->name, bpc);
- break;
- case 10:
- val |= HDMI_DEEP_COLOR_ENABLE;
- val |= HDMI_DEEP_COLOR_DEPTH(HDMI_30BIT_DEEP_COLOR);
- DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
- connector->name);
- break;
- case 12:
- val |= HDMI_DEEP_COLOR_ENABLE;
- val |= HDMI_DEEP_COLOR_DEPTH(HDMI_36BIT_DEEP_COLOR);
- DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
- connector->name);
- break;
- }
-
- WREG32(HDMI_CONTROL + offset, val);
-}
-
-void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
- AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
-
- WREG32(AFMT_60958_0 + offset,
- AFMT_60958_CS_CHANNEL_NUMBER_L(1));
-
- WREG32(AFMT_60958_1 + offset,
- AFMT_60958_CS_CHANNEL_NUMBER_R(2));
-
- WREG32(AFMT_60958_2 + offset,
- AFMT_60958_CS_CHANNEL_NUMBER_2(3) |
- AFMT_60958_CS_CHANNEL_NUMBER_3(4) |
- AFMT_60958_CS_CHANNEL_NUMBER_4(5) |
- AFMT_60958_CS_CHANNEL_NUMBER_5(6) |
- AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
- AFMT_60958_CS_CHANNEL_NUMBER_7(8));
-
- WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
- AFMT_AUDIO_CHANNEL_ENABLE(0xff));
-
- WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
- HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
- HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
-
- /* allow 60958 channel status and send audio packets fields to be updated */
- WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
- AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
-}
-
-
-void dce4_set_mute(struct drm_encoder *encoder, u32 offset, bool mute)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- if (mute)
- WREG32_OR(HDMI_GC + offset, HDMI_GC_AVMUTE);
- else
- WREG32_AND(HDMI_GC + offset, ~HDMI_GC_AVMUTE);
-}
-
-void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
+/*
+ * update the info frames with the data from the current display mode
+ */
+void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
if (!dig || !dig->afmt)
return;
- if (enable) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ r600_audio_set_clock(encoder, mode->clock);
- if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
- WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
- HDMI_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
- HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
- HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
- WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
- AFMT_AUDIO_SAMPLE_SEND);
- } else {
- WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
- HDMI_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
- WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
- ~AFMT_AUDIO_SAMPLE_SEND);
- }
- } else {
- WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
- ~AFMT_AUDIO_SAMPLE_SEND);
- WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
- }
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND); /* send null packets when required */
- dig->afmt->enabled = enable;
+ WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
- DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
-}
+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
-void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
- if (!dig || !dig->afmt)
- return;
+ WREG32(HDMI_ACR_PACKET_CONTROL + offset,
+ HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI_ACR_SOURCE); /* select SW CTS value */
- if (enable && connector &&
- drm_detect_monitor_audio(radeon_connector_edid(connector))) {
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct radeon_connector_atom_dig *dig_connector;
- uint32_t val;
+ WREG32(HDMI_VBI_PACKET_CONTROL + offset,
+ HDMI_NULL_SEND | /* send null packets when required */
+ HDMI_GC_SEND | /* send general control packets */
+ HDMI_GC_CONT); /* send general control packets every frame */
- WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
- AFMT_AUDIO_SAMPLE_SEND);
+ WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
- WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
- EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
+ WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
+ AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
- if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) {
- dig_connector = radeon_connector->con_priv;
- val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
- val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
+ WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
+ HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
- if (dig_connector->dp_clock == 162000)
- val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(3);
- else
- val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5);
+ WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
- WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val);
- }
+ evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0);
- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
- EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
- EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
- EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
- EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
- } else {
- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
- WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
- ~AFMT_AUDIO_SAMPLE_SEND);
- }
+ evergreen_hdmi_update_ACR(encoder, mode->clock);
- dig->afmt->enabled = enable;
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+ WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
}
diff --git a/sys/dev/pci/drm/radeon/evergreen_reg.h b/sys/dev/pci/drm/radeon/evergreen_reg.h
index b436badf9ef..584417f1f52 100644
--- a/sys/dev/pci/drm/radeon/evergreen_reg.h
+++ b/sys/dev/pci/drm/radeon/evergreen_reg.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: evergreen_reg.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
@@ -24,17 +25,7 @@
#ifndef __EVERGREEN_REG_H__
#define __EVERGREEN_REG_H__
-/* trinity */
-#define TN_SMC_IND_INDEX_0 0x200
-#define TN_SMC_IND_DATA_0 0x204
-
/* evergreen */
-#define EVERGREEN_PIF_PHY0_INDEX 0x8
-#define EVERGREEN_PIF_PHY0_DATA 0xc
-#define EVERGREEN_PIF_PHY1_INDEX 0x10
-#define EVERGREEN_PIF_PHY1_DATA 0x14
-#define EVERGREEN_MM_INDEX_HI 0x18
-
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
#define EVERGREEN_D3VGA_CONTROL 0x3e0
@@ -50,9 +41,6 @@
#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
-#define EVERGREEN_CG_IND_ADDR 0x8f8
-#define EVERGREEN_CG_IND_DATA 0x8fc
-
#define EVERGREEN_AUDIO_ENABLE 0x5e78
#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
@@ -116,8 +104,6 @@
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
-#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL 0x6808
-# define EVERGREEN_LUT_10BIT_BYPASS_EN (1 << 8)
#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
# define EVERGREEN_GRPH_ENDIAN_NONE 0
@@ -238,7 +224,7 @@
#define EVERGREEN_CRTC_STATUS 0x6e8c
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
-#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
@@ -250,66 +236,5 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
-/*DIG block*/
-#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
-#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
-#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
-#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
-#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
-#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
-
-
-#define NI_DIG_FE_CNTL 0x7000
-# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
-# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
-
-
-#define NI_DIG_BE_CNTL 0x7140
-# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
-# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
-
-#define NI_DIG_BE_EN_CNTL 0x7144
-# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
-# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
-# define NI_DIG_BE_DPSST 0
-
-/* Display Port block */
-#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
-#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
-#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
-#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
-#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
-#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
-
-
-#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
-# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
-# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
-#define EVERGREEN_DP_STEER_FIFO 0x7310
-# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
-#define EVERGREEN_DP_SEC_CNTL 0x7280
-# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
-# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
-# define EVERGREEN_DP_SEC_ATP_ENABLE (1 << 8)
-# define EVERGREEN_DP_SEC_AIP_ENABLE (1 << 12)
-# define EVERGREEN_DP_SEC_GSP_ENABLE (1 << 20)
-# define EVERGREEN_DP_SEC_AVI_ENABLE (1 << 24)
-# define EVERGREEN_DP_SEC_MPG_ENABLE (1 << 28)
-#define EVERGREEN_DP_SEC_TIMESTAMP 0x72a4
-# define EVERGREEN_DP_SEC_TIMESTAMP_MODE(x) (((x) & 0x3) << 0)
-#define EVERGREEN_DP_SEC_AUD_N 0x7294
-# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
-# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
-
-/*DCIO_UNIPHY block*/
-#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
-#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
-#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
-#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
-#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
-#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
-
-#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
-# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
#endif
diff --git a/sys/dev/pci/drm/radeon/evergreend.h b/sys/dev/pci/drm/radeon/evergreend.h
index 13b6029d65c..32e4a326b09 100644
--- a/sys/dev/pci/drm/radeon/evergreend.h
+++ b/sys/dev/pci/drm/radeon/evergreend.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: evergreend.h,v 1.5 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
@@ -48,367 +49,11 @@
#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
-/* pm registers */
-#define SMC_MSG 0x20c
-#define HOST_SMC_MSG(x) ((x) << 0)
-#define HOST_SMC_MSG_MASK (0xff << 0)
-#define HOST_SMC_MSG_SHIFT 0
-#define HOST_SMC_RESP(x) ((x) << 8)
-#define HOST_SMC_RESP_MASK (0xff << 8)
-#define HOST_SMC_RESP_SHIFT 8
-#define SMC_HOST_MSG(x) ((x) << 16)
-#define SMC_HOST_MSG_MASK (0xff << 16)
-#define SMC_HOST_MSG_SHIFT 16
-#define SMC_HOST_RESP(x) ((x) << 24)
-#define SMC_HOST_RESP_MASK (0xff << 24)
-#define SMC_HOST_RESP_SHIFT 24
-
-#define DCCG_DISP_SLOW_SELECT_REG 0x4fc
-#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
-#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
-#define DCCG_DISP1_SLOW_SELECT_SHIFT 0
-#define DCCG_DISP2_SLOW_SELECT(x) ((x) << 4)
-#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
-#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
-
-#define CG_SPLL_FUNC_CNTL 0x600
-#define SPLL_RESET (1 << 0)
-#define SPLL_SLEEP (1 << 1)
-#define SPLL_BYPASS_EN (1 << 3)
-#define SPLL_REF_DIV(x) ((x) << 4)
-#define SPLL_REF_DIV_MASK (0x3f << 4)
-#define SPLL_PDIV_A(x) ((x) << 20)
-#define SPLL_PDIV_A_MASK (0x7f << 20)
-#define CG_SPLL_FUNC_CNTL_2 0x604
-#define SCLK_MUX_SEL(x) ((x) << 0)
-#define SCLK_MUX_SEL_MASK (0x1ff << 0)
-#define SCLK_MUX_UPDATE (1 << 26)
-#define CG_SPLL_FUNC_CNTL_3 0x608
-#define SPLL_FB_DIV(x) ((x) << 0)
-#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
-#define SPLL_DITHEN (1 << 28)
-#define CG_SPLL_STATUS 0x60c
-#define SPLL_CHG_STATUS (1 << 1)
-
-#define MPLL_CNTL_MODE 0x61c
-# define MPLL_MCLK_SEL (1 << 11)
-# define SS_SSEN (1 << 24)
-# define SS_DSMODE_EN (1 << 25)
-
-#define MPLL_AD_FUNC_CNTL 0x624
-#define CLKF(x) ((x) << 0)
-#define CLKF_MASK (0x7f << 0)
-#define CLKR(x) ((x) << 7)
-#define CLKR_MASK (0x1f << 7)
-#define CLKFRAC(x) ((x) << 12)
-#define CLKFRAC_MASK (0x1f << 12)
-#define YCLK_POST_DIV(x) ((x) << 17)
-#define YCLK_POST_DIV_MASK (3 << 17)
-#define IBIAS(x) ((x) << 20)
-#define IBIAS_MASK (0x3ff << 20)
-#define RESET (1 << 30)
-#define PDNB (1 << 31)
-#define MPLL_AD_FUNC_CNTL_2 0x628
-#define BYPASS (1 << 19)
-#define BIAS_GEN_PDNB (1 << 24)
-#define RESET_EN (1 << 25)
-#define VCO_MODE (1 << 29)
-#define MPLL_DQ_FUNC_CNTL 0x62c
-#define MPLL_DQ_FUNC_CNTL_2 0x630
-
-#define GENERAL_PWRMGT 0x63c
-# define GLOBAL_PWRMGT_EN (1 << 0)
-# define STATIC_PM_EN (1 << 1)
-# define THERMAL_PROTECTION_DIS (1 << 2)
-# define THERMAL_PROTECTION_TYPE (1 << 3)
-# define ENABLE_GEN2PCIE (1 << 4)
-# define ENABLE_GEN2XSP (1 << 5)
-# define SW_SMIO_INDEX(x) ((x) << 6)
-# define SW_SMIO_INDEX_MASK (3 << 6)
-# define SW_SMIO_INDEX_SHIFT 6
-# define LOW_VOLT_D2_ACPI (1 << 8)
-# define LOW_VOLT_D3_ACPI (1 << 9)
-# define VOLT_PWRMGT_EN (1 << 10)
-# define BACKBIAS_PAD_EN (1 << 18)
-# define BACKBIAS_VALUE (1 << 19)
-# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
-# define AC_DC_SW (1 << 24)
-
-#define SCLK_PWRMGT_CNTL 0x644
-# define SCLK_PWRMGT_OFF (1 << 0)
-# define SCLK_LOW_D1 (1 << 1)
-# define FIR_RESET (1 << 4)
-# define FIR_FORCE_TREND_SEL (1 << 5)
-# define FIR_TREND_MODE (1 << 6)
-# define DYN_GFX_CLK_OFF_EN (1 << 7)
-# define GFX_CLK_FORCE_ON (1 << 8)
-# define GFX_CLK_REQUEST_OFF (1 << 9)
-# define GFX_CLK_FORCE_OFF (1 << 10)
-# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
-# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
-# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
-# define DYN_LIGHT_SLEEP_EN (1 << 14)
-#define MCLK_PWRMGT_CNTL 0x648
-# define DLL_SPEED(x) ((x) << 0)
-# define DLL_SPEED_MASK (0x1f << 0)
-# define MPLL_PWRMGT_OFF (1 << 5)
-# define DLL_READY (1 << 6)
-# define MC_INT_CNTL (1 << 7)
-# define MRDCKA0_PDNB (1 << 8)
-# define MRDCKA1_PDNB (1 << 9)
-# define MRDCKB0_PDNB (1 << 10)
-# define MRDCKB1_PDNB (1 << 11)
-# define MRDCKC0_PDNB (1 << 12)
-# define MRDCKC1_PDNB (1 << 13)
-# define MRDCKD0_PDNB (1 << 14)
-# define MRDCKD1_PDNB (1 << 15)
-# define MRDCKA0_RESET (1 << 16)
-# define MRDCKA1_RESET (1 << 17)
-# define MRDCKB0_RESET (1 << 18)
-# define MRDCKB1_RESET (1 << 19)
-# define MRDCKC0_RESET (1 << 20)
-# define MRDCKC1_RESET (1 << 21)
-# define MRDCKD0_RESET (1 << 22)
-# define MRDCKD1_RESET (1 << 23)
-# define DLL_READY_READ (1 << 24)
-# define USE_DISPLAY_GAP (1 << 25)
-# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
-# define MPLL_TURNOFF_D2 (1 << 28)
-#define DLL_CNTL 0x64c
-# define MRDCKA0_BYPASS (1 << 24)
-# define MRDCKA1_BYPASS (1 << 25)
-# define MRDCKB0_BYPASS (1 << 26)
-# define MRDCKB1_BYPASS (1 << 27)
-# define MRDCKC0_BYPASS (1 << 28)
-# define MRDCKC1_BYPASS (1 << 29)
-# define MRDCKD0_BYPASS (1 << 30)
-# define MRDCKD1_BYPASS (1 << 31)
-
-#define CG_AT 0x6d4
-# define CG_R(x) ((x) << 0)
-# define CG_R_MASK (0xffff << 0)
-# define CG_L(x) ((x) << 16)
-# define CG_L_MASK (0xffff << 16)
-
-#define CG_DISPLAY_GAP_CNTL 0x714
-# define DISP1_GAP(x) ((x) << 0)
-# define DISP1_GAP_MASK (3 << 0)
-# define DISP2_GAP(x) ((x) << 2)
-# define DISP2_GAP_MASK (3 << 2)
-# define VBI_TIMER_COUNT(x) ((x) << 4)
-# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
-# define VBI_TIMER_UNIT(x) ((x) << 20)
-# define VBI_TIMER_UNIT_MASK (7 << 20)
-# define DISP1_GAP_MCHG(x) ((x) << 24)
-# define DISP1_GAP_MCHG_MASK (3 << 24)
-# define DISP2_GAP_MCHG(x) ((x) << 26)
-# define DISP2_GAP_MCHG_MASK (3 << 26)
-
-#define CG_BIF_REQ_AND_RSP 0x7f4
-#define CG_CLIENT_REQ(x) ((x) << 0)
-#define CG_CLIENT_REQ_MASK (0xff << 0)
-#define CG_CLIENT_REQ_SHIFT 0
-#define CG_CLIENT_RESP(x) ((x) << 8)
-#define CG_CLIENT_RESP_MASK (0xff << 8)
-#define CG_CLIENT_RESP_SHIFT 8
-#define CLIENT_CG_REQ(x) ((x) << 16)
-#define CLIENT_CG_REQ_MASK (0xff << 16)
-#define CLIENT_CG_REQ_SHIFT 16
-#define CLIENT_CG_RESP(x) ((x) << 24)
-#define CLIENT_CG_RESP_MASK (0xff << 24)
-#define CLIENT_CG_RESP_SHIFT 24
-
-#define CG_SPLL_SPREAD_SPECTRUM 0x790
-#define SSEN (1 << 0)
-#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
-
-#define MPLL_SS1 0x85c
-#define CLKV(x) ((x) << 0)
-#define CLKV_MASK (0x3ffffff << 0)
-#define MPLL_SS2 0x860
-#define CLKS(x) ((x) << 0)
-#define CLKS_MASK (0xfff << 0)
-
-#define CG_IND_ADDR 0x8f8
-#define CG_IND_DATA 0x8fc
-/* CGIND regs */
-#define CG_CGTT_LOCAL_0 0x00
-#define CG_CGTT_LOCAL_1 0x01
-#define CG_CGTT_LOCAL_2 0x02
-#define CG_CGTT_LOCAL_3 0x03
-#define CG_CGLS_TILE_0 0x20
-#define CG_CGLS_TILE_1 0x21
-#define CG_CGLS_TILE_2 0x22
-#define CG_CGLS_TILE_3 0x23
-#define CG_CGLS_TILE_4 0x24
-#define CG_CGLS_TILE_5 0x25
-#define CG_CGLS_TILE_6 0x26
-#define CG_CGLS_TILE_7 0x27
-#define CG_CGLS_TILE_8 0x28
-#define CG_CGLS_TILE_9 0x29
-#define CG_CGLS_TILE_10 0x2a
-#define CG_CGLS_TILE_11 0x2b
-
-#define VM_L2_CG 0x15c0
-
-#define MC_CONFIG 0x2000
-
-#define MC_CONFIG_MCD 0x20a0
-#define MC_CG_CONFIG_MCD 0x20a4
-#define MC_RD_ENABLE_MCD(x) ((x) << 8)
-#define MC_RD_ENABLE_MCD_MASK (7 << 8)
-
-#define MC_HUB_MISC_HUB_CG 0x20b8
-#define MC_HUB_MISC_VM_CG 0x20bc
-#define MC_HUB_MISC_SIP_CG 0x20c0
-
-#define MC_XPB_CLK_GAT 0x2478
-
-#define MC_CG_CONFIG 0x25bc
-#define MC_RD_ENABLE(x) ((x) << 4)
-#define MC_RD_ENABLE_MASK (3 << 4)
-
-#define MC_CITF_MISC_RD_CG 0x2648
-#define MC_CITF_MISC_WR_CG 0x264c
-#define MC_CITF_MISC_VM_CG 0x2650
-# define MEM_LS_ENABLE (1 << 19)
-
-#define MC_ARB_BURST_TIME 0x2808
-#define STATE0(x) ((x) << 0)
-#define STATE0_MASK (0x1f << 0)
-#define STATE1(x) ((x) << 5)
-#define STATE1_MASK (0x1f << 5)
-#define STATE2(x) ((x) << 10)
-#define STATE2_MASK (0x1f << 10)
-#define STATE3(x) ((x) << 15)
-#define STATE3_MASK (0x1f << 15)
-
-#define MC_SEQ_RAS_TIMING 0x28a0
-#define MC_SEQ_CAS_TIMING 0x28a4
-#define MC_SEQ_MISC_TIMING 0x28a8
-#define MC_SEQ_MISC_TIMING2 0x28ac
-
-#define MC_SEQ_RD_CTL_D0 0x28b4
-#define MC_SEQ_RD_CTL_D1 0x28b8
-#define MC_SEQ_WR_CTL_D0 0x28bc
-#define MC_SEQ_WR_CTL_D1 0x28c0
-
-#define MC_SEQ_STATUS_M 0x29f4
-# define PMG_PWRSTATE (1 << 16)
-
-#define MC_SEQ_MISC1 0x2a04
-#define MC_SEQ_RESERVE_M 0x2a08
-#define MC_PMG_CMD_EMRS 0x2a0c
-
-#define MC_SEQ_MISC3 0x2a2c
-
-#define MC_SEQ_MISC5 0x2a54
-#define MC_SEQ_MISC6 0x2a58
-
-#define MC_SEQ_MISC7 0x2a64
-
-#define MC_SEQ_CG 0x2a68
-#define CG_SEQ_REQ(x) ((x) << 0)
-#define CG_SEQ_REQ_MASK (0xff << 0)
-#define CG_SEQ_REQ_SHIFT 0
-#define CG_SEQ_RESP(x) ((x) << 8)
-#define CG_SEQ_RESP_MASK (0xff << 8)
-#define CG_SEQ_RESP_SHIFT 8
-#define SEQ_CG_REQ(x) ((x) << 16)
-#define SEQ_CG_REQ_MASK (0xff << 16)
-#define SEQ_CG_REQ_SHIFT 16
-#define SEQ_CG_RESP(x) ((x) << 24)
-#define SEQ_CG_RESP_MASK (0xff << 24)
-#define SEQ_CG_RESP_SHIFT 24
-#define MC_SEQ_RAS_TIMING_LP 0x2a6c
-#define MC_SEQ_CAS_TIMING_LP 0x2a70
-#define MC_SEQ_MISC_TIMING_LP 0x2a74
-#define MC_SEQ_MISC_TIMING2_LP 0x2a78
-#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
-#define MC_SEQ_WR_CTL_D1_LP 0x2a80
-#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
-#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
-
-#define MC_PMG_CMD_MRS 0x2aac
-
-#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
-#define MC_SEQ_RD_CTL_D1_LP 0x2b20
-
-#define MC_PMG_CMD_MRS1 0x2b44
-#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
-
-#define CGTS_SM_CTRL_REG 0x9150
-
/* Registers */
#define RCU_IND_INDEX 0x100
#define RCU_IND_DATA 0x104
-/* discrete uvd clocks */
-#define CG_UPLL_FUNC_CNTL 0x718
-# define UPLL_RESET_MASK 0x00000001
-# define UPLL_SLEEP_MASK 0x00000002
-# define UPLL_BYPASS_EN_MASK 0x00000004
-# define UPLL_CTLREQ_MASK 0x00000008
-# define UPLL_REF_DIV_MASK 0x003F0000
-# define UPLL_VCO_MODE_MASK 0x00000200
-# define UPLL_CTLACK_MASK 0x40000000
-# define UPLL_CTLACK2_MASK 0x80000000
-#define CG_UPLL_FUNC_CNTL_2 0x71c
-# define UPLL_PDIV_A(x) ((x) << 0)
-# define UPLL_PDIV_A_MASK 0x0000007F
-# define UPLL_PDIV_B(x) ((x) << 8)
-# define UPLL_PDIV_B_MASK 0x00007F00
-# define VCLK_SRC_SEL(x) ((x) << 20)
-# define VCLK_SRC_SEL_MASK 0x01F00000
-# define DCLK_SRC_SEL(x) ((x) << 25)
-# define DCLK_SRC_SEL_MASK 0x3E000000
-#define CG_UPLL_FUNC_CNTL_3 0x720
-# define UPLL_FB_DIV(x) ((x) << 0)
-# define UPLL_FB_DIV_MASK 0x01FFFFFF
-#define CG_UPLL_FUNC_CNTL_4 0x854
-# define UPLL_SPARE_ISPARE9 0x00020000
-#define CG_UPLL_SPREAD_SPECTRUM 0x79c
-# define SSEN_MASK 0x00000001
-
-/* fusion uvd clocks */
-#define CG_DCLK_CNTL 0x610
-# define DCLK_DIVIDER_MASK 0x7f
-# define DCLK_DIR_CNTL_EN (1 << 8)
-#define CG_DCLK_STATUS 0x614
-# define DCLK_STATUS (1 << 0)
-#define CG_VCLK_CNTL 0x618
-#define CG_VCLK_STATUS 0x61c
-#define CG_SCRATCH1 0x820
-
-#define RLC_CNTL 0x3f00
-# define RLC_ENABLE (1 << 0)
-# define GFX_POWER_GATING_ENABLE (1 << 7)
-# define GFX_POWER_GATING_SRC (1 << 8)
-# define DYN_PER_SIMD_PG_ENABLE (1 << 27)
-# define LB_CNT_SPIM_ACTIVE (1 << 30)
-# define LOAD_BALANCE_ENABLE (1 << 31)
-
-#define RLC_HB_BASE 0x3f10
-#define RLC_HB_CNTL 0x3f0c
-#define RLC_HB_RPTR 0x3f20
-#define RLC_HB_WPTR 0x3f1c
-#define RLC_HB_WPTR_LSB_ADDR 0x3f14
-#define RLC_HB_WPTR_MSB_ADDR 0x3f18
-#define RLC_MC_CNTL 0x3f44
-#define RLC_UCODE_CNTL 0x3f48
-#define RLC_UCODE_ADDR 0x3f2c
-#define RLC_UCODE_DATA 0x3f30
-
-/* new for TN */
-#define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10
-#define TN_RLC_LB_CNTR_MAX 0x3f14
-#define TN_RLC_LB_CNTR_INIT 0x3f18
-#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
-#define TN_RLC_LB_INIT_SIMD_MASK 0x3fe4
-#define TN_RLC_LB_ALWAYS_ACTIVE_SIMD_MASK 0x3fe8
-#define TN_RLC_LB_PARAMS 0x3fec
-
#define GRBM_GFX_INDEX 0x802C
#define INSTANCE_INDEX(x) ((x) << 0)
#define SE_INDEX(x) ((x) << 16)
@@ -501,20 +146,11 @@
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
#define DCCG_AUDIO_DTO0_LOAD 0x05b8
#define DCCG_AUDIO_DTO0_CNTL 0x05bc
-# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
-# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
-# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
#define DCCG_AUDIO_DTO1_LOAD 0x05c8
#define DCCG_AUDIO_DTO1_CNTL 0x05cc
-# define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
-
-#define DCE41_DENTIST_DISPCLK_CNTL 0x049c
-# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
-# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
-# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
/* DCE 4.0 AFMT */
#define HDMI_CONTROL 0x7030
@@ -523,11 +159,10 @@
# define HDMI_ERROR_ACK (1 << 8)
# define HDMI_ERROR_MASK (1 << 9)
# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
-# define HDMI_DEEP_COLOR_DEPTH(x) (((x) & 3) << 28)
+# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28)
# define HDMI_24BIT_DEEP_COLOR 0
# define HDMI_30BIT_DEEP_COLOR 1
# define HDMI_36BIT_DEEP_COLOR 2
-# define HDMI_DEEP_COLOR_DEPTH_MASK (3 << 28)
#define HDMI_STATUS 0x7034
# define HDMI_ACTIVE_AVMUTE (1 << 0)
# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
@@ -563,7 +198,6 @@
# define HDMI_MPEG_INFO_CONT (1 << 9)
#define HDMI_INFOFRAME_CONTROL1 0x7048
# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
-# define HDMI_AVI_INFO_LINE_MASK (0x3f << 0)
# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI_GENERIC_PACKET_CONTROL 0x704c
@@ -725,13 +359,6 @@
#define AFMT_GENERIC0_7 0x7138
/* DCE4/5 ELD audio interface */
-#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x5f78
-#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
-#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
-#define SPEAKER_ALLOCATION_SHIFT 0
-#define HDMI_CONNECTION (1 << 16)
-#define DP_CONNECTION (1 << 17)
-
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
@@ -761,44 +388,6 @@
* bit6 = 192 kHz
*/
-#define AZ_CHANNEL_COUNT_CONTROL 0x5fe4
-# define HBR_CHANNEL_COUNT(x) (((x) & 0x7) << 0)
-# define COMPRESSED_CHANNEL_COUNT(x) (((x) & 0x7) << 4)
-/* HBR_CHANNEL_COUNT, COMPRESSED_CHANNEL_COUNT
- * 0 = use stream header
- * 1-7 = channel count - 1
- */
-#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_LIPSYNC 0x5fe8
-# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
-# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
-/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
- * 0 = invalid
- * x = legal delay value
- * 255 = sync not supported
- */
-#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_HBR 0x5fec
-# define HBR_CAPABLE (1 << 0) /* enabled by default */
-
-#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION0 0x5ff4
-# define DISPLAY0_TYPE(x) (((x) & 0x3) << 0)
-# define DISPLAY_TYPE_NONE 0
-# define DISPLAY_TYPE_HDMI 1
-# define DISPLAY_TYPE_DP 2
-# define DISPLAY0_ID(x) (((x) & 0x3f) << 2)
-# define DISPLAY1_TYPE(x) (((x) & 0x3) << 8)
-# define DISPLAY1_ID(x) (((x) & 0x3f) << 10)
-# define DISPLAY2_TYPE(x) (((x) & 0x3) << 16)
-# define DISPLAY2_ID(x) (((x) & 0x3f) << 18)
-# define DISPLAY3_TYPE(x) (((x) & 0x3) << 24)
-# define DISPLAY3_ID(x) (((x) & 0x3f) << 26)
-#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_ASSOCIATION1 0x5ff8
-# define DISPLAY4_TYPE(x) (((x) & 0x3) << 0)
-# define DISPLAY4_ID(x) (((x) & 0x3f) << 2)
-# define DISPLAY5_TYPE(x) (((x) & 0x3) << 8)
-# define DISPLAY5_ID(x) (((x) & 0x3f) << 10)
-#define AZ_F0_CODEC_PIN0_CONTROL_RESPONSE_AV_NUMBER 0x5ffc
-# define NUMBER_OF_DISPLAY_ID(x) (((x) & 0x7) << 0)
-
#define AZ_HOT_PLUG_CONTROL 0x5e78
# define AZ_FORCE_CODEC_WAKE (1 << 0)
# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
@@ -877,30 +466,6 @@
#define CG_THERMAL_CTRL 0x72c
#define TOFFSET_MASK 0x00003FE0
#define TOFFSET_SHIFT 5
-#define DIG_THERM_DPM(x) ((x) << 14)
-#define DIG_THERM_DPM_MASK 0x003FC000
-#define DIG_THERM_DPM_SHIFT 14
-
-#define CG_THERMAL_INT 0x734
-#define DIG_THERM_INTH(x) ((x) << 8)
-#define DIG_THERM_INTH_MASK 0x0000FF00
-#define DIG_THERM_INTH_SHIFT 8
-#define DIG_THERM_INTL(x) ((x) << 16)
-#define DIG_THERM_INTL_MASK 0x00FF0000
-#define DIG_THERM_INTL_SHIFT 16
-#define THERM_INT_MASK_HIGH (1 << 24)
-#define THERM_INT_MASK_LOW (1 << 25)
-
-#define TN_CG_THERMAL_INT_CTRL 0x738
-#define TN_DIG_THERM_INTH(x) ((x) << 0)
-#define TN_DIG_THERM_INTH_MASK 0x000000FF
-#define TN_DIG_THERM_INTH_SHIFT 0
-#define TN_DIG_THERM_INTL(x) ((x) << 8)
-#define TN_DIG_THERM_INTL_MASK 0x0000FF00
-#define TN_DIG_THERM_INTL_SHIFT 8
-#define TN_THERM_INT_MASK_HIGH (1 << 24)
-#define TN_THERM_INT_MASK_LOW (1 << 25)
-
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x07FF0000
@@ -908,7 +473,6 @@
#define CG_TS0_STATUS 0x760
#define TS0_ADC_DOUT_MASK 0x000003FF
#define TS0_ADC_DOUT_SHIFT 0
-
/* APU */
#define CG_THERMAL_STATUS 0x678
@@ -1166,18 +730,6 @@
#define WAIT_UNTIL 0x8040
#define SRBM_STATUS 0x0E50
-#define RLC_RQ_PENDING (1 << 3)
-#define GRBM_RQ_PENDING (1 << 5)
-#define VMC_BUSY (1 << 8)
-#define MCB_BUSY (1 << 9)
-#define MCB_NON_DISPLAY_BUSY (1 << 10)
-#define MCC_BUSY (1 << 11)
-#define MCD_BUSY (1 << 12)
-#define SEM_BUSY (1 << 14)
-#define RLC_BUSY (1 << 15)
-#define IH_BUSY (1 << 17)
-#define SRBM_STATUS2 0x0EC4
-#define DMA_BUSY (1 << 5)
#define SRBM_SOFT_RESET 0x0E60
#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
#define SOFT_RESET_BIF (1 << 1)
@@ -1196,10 +748,6 @@
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
-#define SRBM_READ_ERROR 0xE98
-#define SRBM_INT_CNTL 0xEA0
-#define SRBM_INT_ACK 0xEA8
-
/* display watermarks */
#define DC_LB_MEMORY_SPLIT 0x6b0c
#define PRIORITY_A_CNT 0x6b18
@@ -1365,38 +913,6 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
-/* DCE4/5/6 FMT blocks */
-#define FMT_DYNAMIC_EXP_CNTL 0x6fb4
-# define FMT_DYNAMIC_EXP_EN (1 << 0)
-# define FMT_DYNAMIC_EXP_MODE (1 << 4)
- /* 0 = 10bit -> 12bit, 1 = 8bit -> 12bit */
-#define FMT_CONTROL 0x6fb8
-# define FMT_PIXEL_ENCODING (1 << 16)
- /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
-#define FMT_BIT_DEPTH_CONTROL 0x6fc8
-# define FMT_TRUNCATE_EN (1 << 0)
-# define FMT_TRUNCATE_DEPTH (1 << 4)
-# define FMT_SPATIAL_DITHER_EN (1 << 8)
-# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
-# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
-# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
-# define FMT_RGB_RANDOM_ENABLE (1 << 14)
-# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
-# define FMT_TEMPORAL_DITHER_EN (1 << 16)
-# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
-# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
-# define FMT_TEMPORAL_LEVEL (1 << 24)
-# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
-# define FMT_25FRC_SEL(x) ((x) << 26)
-# define FMT_50FRC_SEL(x) ((x) << 28)
-# define FMT_75FRC_SEL(x) ((x) << 30)
-#define FMT_CLAMP_CONTROL 0x6fe4
-# define FMT_CLAMP_DATA_EN (1 << 0)
-# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
-# define FMT_CLAMP_6BPC 0
-# define FMT_CLAMP_8BPC 1
-# define FMT_CLAMP_10BPC 2
-
/* ASYNC DMA */
#define DMA_RB_RPTR 0xd008
#define DMA_RB_WPTR 0xd00c
@@ -1413,66 +929,22 @@
#define CAYMAN_DMA1_CNTL 0xd82c
/* async DMA packets */
-#define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) | \
- (((sub_cmd) & 0xFF) << 20) |\
- (((n) & 0xFFFFF) << 0))
-#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
-#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
-#define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20)
-
+#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
+ (((t) & 0x1) << 23) | \
+ (((s) & 0x1) << 22) | \
+ (((n) & 0xFFFFF) << 0))
/* async DMA Packet types */
-#define DMA_PACKET_WRITE 0x2
-#define DMA_PACKET_COPY 0x3
-#define DMA_PACKET_INDIRECT_BUFFER 0x4
-#define DMA_PACKET_SEMAPHORE 0x5
-#define DMA_PACKET_FENCE 0x6
-#define DMA_PACKET_TRAP 0x7
-#define DMA_PACKET_SRBM_WRITE 0x9
-#define DMA_PACKET_CONSTANT_FILL 0xd
-#define DMA_PACKET_NOP 0xf
-
-/* PIF PHY0 indirect regs */
-#define PB0_PIF_CNTL 0x10
-# define LS2_EXIT_TIME(x) ((x) << 17)
-# define LS2_EXIT_TIME_MASK (0x7 << 17)
-# define LS2_EXIT_TIME_SHIFT 17
-#define PB0_PIF_PAIRING 0x11
-# define MULTI_PIF (1 << 25)
-#define PB0_PIF_PWRDOWN_0 0x12
-# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
-# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_0_SHIFT 24
-#define PB0_PIF_PWRDOWN_1 0x13
-# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
-# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_1_SHIFT 24
-/* PIF PHY1 indirect regs */
-#define PB1_PIF_CNTL 0x10
-#define PB1_PIF_PAIRING 0x11
-#define PB1_PIF_PWRDOWN_0 0x12
-#define PB1_PIF_PWRDOWN_1 0x13
-/* PCIE PORT indirect regs */
-#define PCIE_LC_CNTL 0xa0
-# define LC_L0S_INACTIVITY(x) ((x) << 8)
-# define LC_L0S_INACTIVITY_MASK (0xf << 8)
-# define LC_L0S_INACTIVITY_SHIFT 8
-# define LC_L1_INACTIVITY(x) ((x) << 12)
-# define LC_L1_INACTIVITY_MASK (0xf << 12)
-# define LC_L1_INACTIVITY_SHIFT 12
-# define LC_PMI_TO_L1_DIS (1 << 16)
-# define LC_ASPM_TO_L1_DIS (1 << 24)
+#define DMA_PACKET_WRITE 0x2
+#define DMA_PACKET_COPY 0x3
+#define DMA_PACKET_INDIRECT_BUFFER 0x4
+#define DMA_PACKET_SEMAPHORE 0x5
+#define DMA_PACKET_FENCE 0x6
+#define DMA_PACKET_TRAP 0x7
+#define DMA_PACKET_SRBM_WRITE 0x9
+#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_NOP 0xf
+
+/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_SHIFT 0
@@ -1492,9 +964,6 @@
# define LC_SHORT_RECONFIG_EN (1 << 11)
# define LC_UPCONFIGURE_SUPPORT (1 << 12)
# define LC_UPCONFIGURE_DIS (1 << 13)
-# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
-# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
-# define LC_DYN_LANES_PWR_STATE_SHIFT 21
#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
# define LC_GEN2_EN_STRAP (1 << 0)
# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
@@ -1503,9 +972,6 @@
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
# define LC_CURRENT_DATA_RATE (1 << 11)
-# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
-# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
-# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
@@ -1516,21 +982,19 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
-
-/*
- * UVD
- */
-#define UVD_UDEC_ADDR_CONFIG 0xef4c
-#define UVD_UDEC_DB_ADDR_CONFIG 0xef50
-#define UVD_UDEC_DBW_ADDR_CONFIG 0xef54
-#define UVD_RBC_RB_RPTR 0xf690
-#define UVD_RBC_RB_WPTR 0xf694
-#define UVD_STATUS 0xf6bc
-
/*
* PM4
*/
-#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -1539,7 +1003,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
diff --git a/sys/dev/pci/drm/radeon/mkregtable.c b/sys/dev/pci/drm/radeon/mkregtable.c
index b928c17bdee..de1d79eda18 100644
--- a/sys/dev/pci/drm/radeon/mkregtable.c
+++ b/sys/dev/pci/drm/radeon/mkregtable.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: mkregtable.c,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/* utility to create the register check tables
* this includes inlined list.h safe for userspace.
*
@@ -347,7 +348,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
@@ -356,7 +357,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_first_entry - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*
* Note, that list is expected to be not empty.
*/
@@ -373,6 +374,19 @@ static inline void list_splice_tail_init(struct list_head *list,
pos = pos->next)
/**
+ * __list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ *
+ * This variant differs from list_for_each() in that it's the
+ * simplest possible list iteration code, no prefetching is done.
+ * Use this for code that knows the list to be very short (empty
+ * or 1 entry) most of the time.
+ */
+#define __list_for_each(pos, head) \
+ for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
* list_for_each_prev - iterate over a list backwards
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
@@ -406,7 +420,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
@@ -417,7 +431,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_reverse - iterate backwards over list of given type.
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \
@@ -428,7 +442,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
* @pos: the type * to use as a start point
* @head: the head of the list
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*
* Prepares a pos entry for use as a start point in list_for_each_entry_continue().
*/
@@ -439,7 +453,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_continue - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*
* Continue to iterate over list of given type, continuing after
* the current position.
@@ -453,7 +467,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_continue_reverse - iterate backwards from the given point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*
* Start to iterate over list of given type backwards, continuing after
* the current position.
@@ -467,7 +481,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* list_for_each_entry_from - iterate over list of given type from the current point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*
* Iterate over list of given type, continuing from current position.
*/
@@ -480,7 +494,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
@@ -493,7 +507,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*
* Iterate over list of given type, continuing after current point,
* safe against removal of list entry.
@@ -509,7 +523,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*
* Iterate over list of given type from current point, safe against
* removal of list entry.
@@ -524,7 +538,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
- * @member: the name of the list_head within the struct.
+ * @member: the name of the list_struct within the struct.
*
* Iterate backwards over list of given type, safe against removal
* of list entry.
@@ -655,7 +669,7 @@ static int parser_auth(struct table *t, const char *filename)
/* first line will contain the last register
* and gpu name */
- sscanf(buf, "%9s %9s", gpu_name, last_reg_s);
+ sscanf(buf, "%s %s", gpu_name, last_reg_s);
t->gpu_prefix = gpu_name;
last_reg = strtol(last_reg_s, NULL, 16);
diff --git a/sys/dev/pci/drm/radeon/ni.c b/sys/dev/pci/drm/radeon/ni.c
index af18279f912..eda1ed330f6 100644
--- a/sys/dev/pci/drm/radeon/ni.c
+++ b/sys/dev/pci/drm/radeon/ni.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: ni.c,v 1.12 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
@@ -24,168 +25,12 @@
#include <dev/pci/drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
-#include "radeon_audio.h"
#include <dev/pci/drm/radeon_drm.h>
#include "nid.h"
#include "atom.h"
#include "ni_reg.h"
#include "cayman_blit_shaders.h"
-#include "radeon_ucode.h"
-#include "clearstate_cayman.h"
-/*
- * Indirect registers accessor
- */
-u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&rdev->smc_idx_lock, flags);
- WREG32(TN_SMC_IND_INDEX_0, (reg));
- r = RREG32(TN_SMC_IND_DATA_0);
- spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
- return r;
-}
-
-void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->smc_idx_lock, flags);
- WREG32(TN_SMC_IND_INDEX_0, (reg));
- WREG32(TN_SMC_IND_DATA_0, (v));
- spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
-}
-
-static const u32 tn_rlc_save_restore_register_list[] =
-{
- 0x98fc,
- 0x98f0,
- 0x9834,
- 0x9838,
- 0x9870,
- 0x9874,
- 0x8a14,
- 0x8b24,
- 0x8bcc,
- 0x8b10,
- 0x8c30,
- 0x8d00,
- 0x8d04,
- 0x8c00,
- 0x8c04,
- 0x8c10,
- 0x8c14,
- 0x8d8c,
- 0x8cf0,
- 0x8e38,
- 0x9508,
- 0x9688,
- 0x9608,
- 0x960c,
- 0x9610,
- 0x9614,
- 0x88c4,
- 0x8978,
- 0x88d4,
- 0x900c,
- 0x9100,
- 0x913c,
- 0x90e8,
- 0x9354,
- 0xa008,
- 0x98f8,
- 0x9148,
- 0x914c,
- 0x3f94,
- 0x98f4,
- 0x9b7c,
- 0x3f8c,
- 0x8950,
- 0x8954,
- 0x8a18,
- 0x8b28,
- 0x9144,
- 0x3f90,
- 0x915c,
- 0x9160,
- 0x9178,
- 0x917c,
- 0x9180,
- 0x918c,
- 0x9190,
- 0x9194,
- 0x9198,
- 0x919c,
- 0x91a8,
- 0x91ac,
- 0x91b0,
- 0x91b4,
- 0x91b8,
- 0x91c4,
- 0x91c8,
- 0x91cc,
- 0x91d0,
- 0x91d4,
- 0x91e0,
- 0x91e4,
- 0x91ec,
- 0x91f0,
- 0x91f4,
- 0x9200,
- 0x9204,
- 0x929c,
- 0x8030,
- 0x9150,
- 0x9a60,
- 0x920c,
- 0x9210,
- 0x9228,
- 0x922c,
- 0x9244,
- 0x9248,
- 0x91e8,
- 0x9294,
- 0x9208,
- 0x9224,
- 0x9240,
- 0x9220,
- 0x923c,
- 0x9258,
- 0x9744,
- 0xa200,
- 0xa204,
- 0xa208,
- 0xa20c,
- 0x8d58,
- 0x9030,
- 0x9034,
- 0x9038,
- 0x903c,
- 0x9040,
- 0x9654,
- 0x897c,
- 0xa210,
- 0xa214,
- 0x9868,
- 0xa02c,
- 0x9664,
- 0x9698,
- 0x949c,
- 0x8e10,
- 0x8e18,
- 0x8c50,
- 0x8c58,
- 0x8c60,
- 0x8c68,
- 0x89b4,
- 0x9830,
- 0x802c,
-};
-
-extern bool evergreen_is_display_hung(struct radeon_device *rdev);
-extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
@@ -194,310 +39,40 @@ extern void evergreen_irq_suspend(struct radeon_device *rdev);
extern int evergreen_mc_init(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
-extern void evergreen_program_aspm(struct radeon_device *rdev);
-extern void sumo_rlc_fini(struct radeon_device *rdev);
-extern int sumo_rlc_init(struct radeon_device *rdev);
-extern void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
+extern void si_rlc_fini(struct radeon_device *rdev);
+extern int si_rlc_init(struct radeon_device *rdev);
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define BTC_MC_UCODE_SIZE 6024
+
+#define CAYMAN_PFP_UCODE_SIZE 2176
+#define CAYMAN_PM4_UCODE_SIZE 2176
+#define CAYMAN_RLC_UCODE_SIZE 1024
+#define CAYMAN_MC_UCODE_SIZE 6037
+
+#define ARUBA_RLC_UCODE_SIZE 1536
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
MODULE_FIRMWARE("radeon/BARTS_me.bin");
MODULE_FIRMWARE("radeon/BARTS_mc.bin");
-MODULE_FIRMWARE("radeon/BARTS_smc.bin");
MODULE_FIRMWARE("radeon/BTC_rlc.bin");
MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
MODULE_FIRMWARE("radeon/TURKS_me.bin");
MODULE_FIRMWARE("radeon/TURKS_mc.bin");
-MODULE_FIRMWARE("radeon/TURKS_smc.bin");
MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
MODULE_FIRMWARE("radeon/CAICOS_me.bin");
MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
-MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
-MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
MODULE_FIRMWARE("radeon/ARUBA_me.bin");
MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
-
-static const u32 cayman_golden_registers2[] =
-{
- 0x3e5c, 0xffffffff, 0x00000000,
- 0x3e48, 0xffffffff, 0x00000000,
- 0x3e4c, 0xffffffff, 0x00000000,
- 0x3e64, 0xffffffff, 0x00000000,
- 0x3e50, 0xffffffff, 0x00000000,
- 0x3e60, 0xffffffff, 0x00000000
-};
-
-static const u32 cayman_golden_registers[] =
-{
- 0x5eb4, 0xffffffff, 0x00000002,
- 0x5e78, 0x8f311ff1, 0x001000f0,
- 0x3f90, 0xffff0000, 0xff000000,
- 0x9148, 0xffff0000, 0xff000000,
- 0x3f94, 0xffff0000, 0xff000000,
- 0x914c, 0xffff0000, 0xff000000,
- 0xc78, 0x00000080, 0x00000080,
- 0xbd4, 0x70073777, 0x00011003,
- 0xd02c, 0xbfffff1f, 0x08421000,
- 0xd0b8, 0x73773777, 0x02011003,
- 0x5bc0, 0x00200000, 0x50100000,
- 0x98f8, 0x33773777, 0x02011003,
- 0x98fc, 0xffffffff, 0x76541032,
- 0x7030, 0x31000311, 0x00000011,
- 0x2f48, 0x33773777, 0x42010001,
- 0x6b28, 0x00000010, 0x00000012,
- 0x7728, 0x00000010, 0x00000012,
- 0x10328, 0x00000010, 0x00000012,
- 0x10f28, 0x00000010, 0x00000012,
- 0x11b28, 0x00000010, 0x00000012,
- 0x12728, 0x00000010, 0x00000012,
- 0x240c, 0x000007ff, 0x00000000,
- 0x8a14, 0xf000001f, 0x00000007,
- 0x8b24, 0x3fff3fff, 0x00ff0fff,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x28a4c, 0x07ffffff, 0x06000000,
- 0x10c, 0x00000001, 0x00010003,
- 0xa02c, 0xffffffff, 0x0000009b,
- 0x913c, 0x0000010f, 0x01000100,
- 0x8c04, 0xf8ff00ff, 0x40600060,
- 0x28350, 0x00000f01, 0x00000000,
- 0x9508, 0x3700001f, 0x00000002,
- 0x960c, 0xffffffff, 0x54763210,
- 0x88c4, 0x001f3ae3, 0x00000082,
- 0x88d0, 0xffffffff, 0x0f40df40,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x8974, 0xffffffff, 0x00000000
-};
-
-static const u32 dvst_golden_registers2[] =
-{
- 0x8f8, 0xffffffff, 0,
- 0x8fc, 0x00380000, 0,
- 0x8f8, 0xffffffff, 1,
- 0x8fc, 0x0e000000, 0
-};
-
-static const u32 dvst_golden_registers[] =
-{
- 0x690, 0x3fff3fff, 0x20c00033,
- 0x918c, 0x0fff0fff, 0x00010006,
- 0x91a8, 0x0fff0fff, 0x00010006,
- 0x9150, 0xffffdfff, 0x6e944040,
- 0x917c, 0x0fff0fff, 0x00030002,
- 0x9198, 0x0fff0fff, 0x00030002,
- 0x915c, 0x0fff0fff, 0x00010000,
- 0x3f90, 0xffff0001, 0xff000000,
- 0x9178, 0x0fff0fff, 0x00070000,
- 0x9194, 0x0fff0fff, 0x00070000,
- 0x9148, 0xffff0001, 0xff000000,
- 0x9190, 0x0fff0fff, 0x00090008,
- 0x91ac, 0x0fff0fff, 0x00090008,
- 0x3f94, 0xffff0000, 0xff000000,
- 0x914c, 0xffff0000, 0xff000000,
- 0x929c, 0x00000fff, 0x00000001,
- 0x55e4, 0xff607fff, 0xfc000100,
- 0x8a18, 0xff000fff, 0x00000100,
- 0x8b28, 0xff000fff, 0x00000100,
- 0x9144, 0xfffc0fff, 0x00000100,
- 0x6ed8, 0x00010101, 0x00010000,
- 0x9830, 0xffffffff, 0x00000000,
- 0x9834, 0xf00fffff, 0x00000400,
- 0x9838, 0xfffffffe, 0x00000000,
- 0xd0c0, 0xff000fff, 0x00000100,
- 0xd02c, 0xbfffff1f, 0x08421000,
- 0xd0b8, 0x73773777, 0x12010001,
- 0x5bb0, 0x000000f0, 0x00000070,
- 0x98f8, 0x73773777, 0x12010001,
- 0x98fc, 0xffffffff, 0x00000010,
- 0x9b7c, 0x00ff0000, 0x00fc0000,
- 0x8030, 0x00001f0f, 0x0000100a,
- 0x2f48, 0x73773777, 0x12010001,
- 0x2408, 0x00030000, 0x000c007f,
- 0x8a14, 0xf000003f, 0x00000007,
- 0x8b24, 0x3fff3fff, 0x00ff0fff,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x28a4c, 0x07ffffff, 0x06000000,
- 0x4d8, 0x00000fff, 0x00000100,
- 0xa008, 0xffffffff, 0x00010000,
- 0x913c, 0xffff03ff, 0x01000100,
- 0x8c00, 0x000000ff, 0x00000003,
- 0x8c04, 0xf8ff00ff, 0x40600060,
- 0x8cf0, 0x1fff1fff, 0x08e00410,
- 0x28350, 0x00000f01, 0x00000000,
- 0x9508, 0xf700071f, 0x00000002,
- 0x960c, 0xffffffff, 0x54763210,
- 0x20ef8, 0x01ff01ff, 0x00000002,
- 0x20e98, 0xfffffbff, 0x00200000,
- 0x2015c, 0xffffffff, 0x00000f40,
- 0x88c4, 0x001f3ae3, 0x00000082,
- 0x8978, 0x3fffffff, 0x04050140,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x8974, 0xffffffff, 0x00000000
-};
-
-static const u32 scrapper_golden_registers[] =
-{
- 0x690, 0x3fff3fff, 0x20c00033,
- 0x918c, 0x0fff0fff, 0x00010006,
- 0x918c, 0x0fff0fff, 0x00010006,
- 0x91a8, 0x0fff0fff, 0x00010006,
- 0x91a8, 0x0fff0fff, 0x00010006,
- 0x9150, 0xffffdfff, 0x6e944040,
- 0x9150, 0xffffdfff, 0x6e944040,
- 0x917c, 0x0fff0fff, 0x00030002,
- 0x917c, 0x0fff0fff, 0x00030002,
- 0x9198, 0x0fff0fff, 0x00030002,
- 0x9198, 0x0fff0fff, 0x00030002,
- 0x915c, 0x0fff0fff, 0x00010000,
- 0x915c, 0x0fff0fff, 0x00010000,
- 0x3f90, 0xffff0001, 0xff000000,
- 0x3f90, 0xffff0001, 0xff000000,
- 0x9178, 0x0fff0fff, 0x00070000,
- 0x9178, 0x0fff0fff, 0x00070000,
- 0x9194, 0x0fff0fff, 0x00070000,
- 0x9194, 0x0fff0fff, 0x00070000,
- 0x9148, 0xffff0001, 0xff000000,
- 0x9148, 0xffff0001, 0xff000000,
- 0x9190, 0x0fff0fff, 0x00090008,
- 0x9190, 0x0fff0fff, 0x00090008,
- 0x91ac, 0x0fff0fff, 0x00090008,
- 0x91ac, 0x0fff0fff, 0x00090008,
- 0x3f94, 0xffff0000, 0xff000000,
- 0x3f94, 0xffff0000, 0xff000000,
- 0x914c, 0xffff0000, 0xff000000,
- 0x914c, 0xffff0000, 0xff000000,
- 0x929c, 0x00000fff, 0x00000001,
- 0x929c, 0x00000fff, 0x00000001,
- 0x55e4, 0xff607fff, 0xfc000100,
- 0x8a18, 0xff000fff, 0x00000100,
- 0x8a18, 0xff000fff, 0x00000100,
- 0x8b28, 0xff000fff, 0x00000100,
- 0x8b28, 0xff000fff, 0x00000100,
- 0x9144, 0xfffc0fff, 0x00000100,
- 0x9144, 0xfffc0fff, 0x00000100,
- 0x6ed8, 0x00010101, 0x00010000,
- 0x9830, 0xffffffff, 0x00000000,
- 0x9830, 0xffffffff, 0x00000000,
- 0x9834, 0xf00fffff, 0x00000400,
- 0x9834, 0xf00fffff, 0x00000400,
- 0x9838, 0xfffffffe, 0x00000000,
- 0x9838, 0xfffffffe, 0x00000000,
- 0xd0c0, 0xff000fff, 0x00000100,
- 0xd02c, 0xbfffff1f, 0x08421000,
- 0xd02c, 0xbfffff1f, 0x08421000,
- 0xd0b8, 0x73773777, 0x12010001,
- 0xd0b8, 0x73773777, 0x12010001,
- 0x5bb0, 0x000000f0, 0x00000070,
- 0x98f8, 0x73773777, 0x12010001,
- 0x98f8, 0x73773777, 0x12010001,
- 0x98fc, 0xffffffff, 0x00000010,
- 0x98fc, 0xffffffff, 0x00000010,
- 0x9b7c, 0x00ff0000, 0x00fc0000,
- 0x9b7c, 0x00ff0000, 0x00fc0000,
- 0x8030, 0x00001f0f, 0x0000100a,
- 0x8030, 0x00001f0f, 0x0000100a,
- 0x2f48, 0x73773777, 0x12010001,
- 0x2f48, 0x73773777, 0x12010001,
- 0x2408, 0x00030000, 0x000c007f,
- 0x8a14, 0xf000003f, 0x00000007,
- 0x8a14, 0xf000003f, 0x00000007,
- 0x8b24, 0x3fff3fff, 0x00ff0fff,
- 0x8b24, 0x3fff3fff, 0x00ff0fff,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x28a4c, 0x07ffffff, 0x06000000,
- 0x28a4c, 0x07ffffff, 0x06000000,
- 0x4d8, 0x00000fff, 0x00000100,
- 0x4d8, 0x00000fff, 0x00000100,
- 0xa008, 0xffffffff, 0x00010000,
- 0xa008, 0xffffffff, 0x00010000,
- 0x913c, 0xffff03ff, 0x01000100,
- 0x913c, 0xffff03ff, 0x01000100,
- 0x90e8, 0x001fffff, 0x010400c0,
- 0x8c00, 0x000000ff, 0x00000003,
- 0x8c00, 0x000000ff, 0x00000003,
- 0x8c04, 0xf8ff00ff, 0x40600060,
- 0x8c04, 0xf8ff00ff, 0x40600060,
- 0x8c30, 0x0000000f, 0x00040005,
- 0x8cf0, 0x1fff1fff, 0x08e00410,
- 0x8cf0, 0x1fff1fff, 0x08e00410,
- 0x900c, 0x00ffffff, 0x0017071f,
- 0x28350, 0x00000f01, 0x00000000,
- 0x28350, 0x00000f01, 0x00000000,
- 0x9508, 0xf700071f, 0x00000002,
- 0x9508, 0xf700071f, 0x00000002,
- 0x9688, 0x00300000, 0x0017000f,
- 0x960c, 0xffffffff, 0x54763210,
- 0x960c, 0xffffffff, 0x54763210,
- 0x20ef8, 0x01ff01ff, 0x00000002,
- 0x20e98, 0xfffffbff, 0x00200000,
- 0x2015c, 0xffffffff, 0x00000f40,
- 0x88c4, 0x001f3ae3, 0x00000082,
- 0x88c4, 0x001f3ae3, 0x00000082,
- 0x8978, 0x3fffffff, 0x04050140,
- 0x8978, 0x3fffffff, 0x04050140,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x8974, 0xffffffff, 0x00000000,
- 0x8974, 0xffffffff, 0x00000000
-};
-
-static void ni_init_golden_registers(struct radeon_device *rdev)
-{
- switch (rdev->family) {
- case CHIP_CAYMAN:
- radeon_program_register_sequence(rdev,
- cayman_golden_registers,
- (const u32)ARRAY_SIZE(cayman_golden_registers));
- radeon_program_register_sequence(rdev,
- cayman_golden_registers2,
- (const u32)ARRAY_SIZE(cayman_golden_registers2));
- break;
- case CHIP_ARUBA:
- if ((rdev->pdev->device == 0x9900) ||
- (rdev->pdev->device == 0x9901) ||
- (rdev->pdev->device == 0x9903) ||
- (rdev->pdev->device == 0x9904) ||
- (rdev->pdev->device == 0x9905) ||
- (rdev->pdev->device == 0x9906) ||
- (rdev->pdev->device == 0x9907) ||
- (rdev->pdev->device == 0x9908) ||
- (rdev->pdev->device == 0x9909) ||
- (rdev->pdev->device == 0x990A) ||
- (rdev->pdev->device == 0x990B) ||
- (rdev->pdev->device == 0x990C) ||
- (rdev->pdev->device == 0x990D) ||
- (rdev->pdev->device == 0x990E) ||
- (rdev->pdev->device == 0x990F) ||
- (rdev->pdev->device == 0x9910) ||
- (rdev->pdev->device == 0x9913) ||
- (rdev->pdev->device == 0x9917) ||
- (rdev->pdev->device == 0x9918)) {
- radeon_program_register_sequence(rdev,
- dvst_golden_registers,
- (const u32)ARRAY_SIZE(dvst_golden_registers));
- radeon_program_register_sequence(rdev,
- dvst_golden_registers2,
- (const u32)ARRAY_SIZE(dvst_golden_registers2));
- } else {
- radeon_program_register_sequence(rdev,
- scrapper_golden_registers,
- (const u32)ARRAY_SIZE(scrapper_golden_registers));
- radeon_program_register_sequence(rdev,
- dvst_golden_registers2,
- (const u32)ARRAY_SIZE(dvst_golden_registers2));
- }
- break;
- default:
- break;
- }
-}
-
#define BTC_IO_MC_REGS_SIZE 29
static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
@@ -681,7 +256,7 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
}
/* load the MC ucode */
- fw_data = (const __be32 *)rdev->mc_fw->data;
+ fw_data = (const __be32 *)rdev->mc_fw;
for (i = 0; i < ucode_size; i++)
WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
@@ -709,7 +284,6 @@ int ni_init_microcode(struct radeon_device *rdev)
const char *chip_name;
const char *rlc_chip_name;
size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
- size_t smc_req_size = 0;
char fw_name[30];
int err;
@@ -717,44 +291,40 @@ int ni_init_microcode(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_BARTS:
- chip_name = "BARTS";
- rlc_chip_name = "BTC";
+ chip_name = "barts";
+ rlc_chip_name = "btc";
pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
mc_req_size = BTC_MC_UCODE_SIZE * 4;
- smc_req_size = roundup2(BARTS_SMC_UCODE_SIZE, 4);
break;
case CHIP_TURKS:
- chip_name = "TURKS";
- rlc_chip_name = "BTC";
+ chip_name = "turks";
+ rlc_chip_name = "btc";
pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
mc_req_size = BTC_MC_UCODE_SIZE * 4;
- smc_req_size = roundup2(TURKS_SMC_UCODE_SIZE, 4);
break;
case CHIP_CAICOS:
- chip_name = "CAICOS";
- rlc_chip_name = "BTC";
+ chip_name = "caicos";
+ rlc_chip_name = "btc";
pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
mc_req_size = BTC_MC_UCODE_SIZE * 4;
- smc_req_size = roundup2(CAICOS_SMC_UCODE_SIZE, 4);
break;
case CHIP_CAYMAN:
- chip_name = "CAYMAN";
- rlc_chip_name = "CAYMAN";
+ chip_name = "cayman";
+ rlc_chip_name = "cayman";
pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
- smc_req_size = roundup2(CAYMAN_SMC_UCODE_SIZE, 4);
break;
case CHIP_ARUBA:
- chip_name = "ARUBA";
- rlc_chip_name = "ARUBA";
+ chip_name = "aruba";
+ rlc_chip_name = "aruba";
/* pfp/me same size as CAYMAN */
pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
@@ -766,127 +336,79 @@ int ni_init_microcode(struct radeon_device *rdev)
DRM_INFO("Loading %s Microcode\n", chip_name);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
- err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_pfp", chip_name);
+ err = loadfirmware(fw_name, &rdev->pfp_fw, &rdev->pfp_fw_size);
if (err)
goto out;
- if (rdev->pfp_fw->size != pfp_req_size) {
- printk(KERN_ERR
+ if (rdev->pfp_fw_size != pfp_req_size) {
+ DRM_ERROR(
"ni_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->pfp_fw->size, fw_name);
+ rdev->pfp_fw_size, fw_name);
err = -EINVAL;
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
- err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_me", chip_name);
+ err = loadfirmware(fw_name, &rdev->me_fw, &rdev->me_fw_size);
if (err)
goto out;
- if (rdev->me_fw->size != me_req_size) {
- printk(KERN_ERR
+ if (rdev->me_fw_size != me_req_size) {
+ DRM_ERROR(
"ni_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
+ rdev->me_fw_size, fw_name);
err = -EINVAL;
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
- err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_rlc", rlc_chip_name);
+ err = loadfirmware(fw_name, &rdev->rlc_fw, &rdev->rlc_fw_size);
if (err)
goto out;
- if (rdev->rlc_fw->size != rlc_req_size) {
- printk(KERN_ERR
+ if (rdev->rlc_fw_size != rlc_req_size) {
+ DRM_ERROR(
"ni_rlc: Bogus length %zu in firmware \"%s\"\n",
- rdev->rlc_fw->size, fw_name);
+ rdev->rlc_fw_size, fw_name);
err = -EINVAL;
}
/* no MC ucode on TN */
if (!(rdev->flags & RADEON_IS_IGP)) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
- err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_mc", chip_name);
+ err = loadfirmware(fw_name, &rdev->mc_fw, &rdev->mc_fw_size);
if (err)
goto out;
- if (rdev->mc_fw->size != mc_req_size) {
- printk(KERN_ERR
- "ni_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
- err = -EINVAL;
- }
- }
-
- if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
- err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err) {
- printk(KERN_ERR
- "smc: error loading firmware \"%s\"\n",
- fw_name);
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
- err = 0;
- } else if (rdev->smc_fw->size != smc_req_size) {
- printk(KERN_ERR
+ if (rdev->mc_fw_size != mc_req_size) {
+ DRM_ERROR(
"ni_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
+ rdev->mc_fw_size, fw_name);
err = -EINVAL;
}
}
-
out:
if (err) {
if (err != -EINVAL)
- printk(KERN_ERR
+ DRM_ERROR(
"ni_cp: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(rdev->pfp_fw);
- rdev->pfp_fw = NULL;
- release_firmware(rdev->me_fw);
- rdev->me_fw = NULL;
- release_firmware(rdev->rlc_fw);
- rdev->rlc_fw = NULL;
- release_firmware(rdev->mc_fw);
- rdev->mc_fw = NULL;
+ if (rdev->pfp_fw) {
+ free(rdev->pfp_fw, M_DEVBUF, 0);
+ rdev->pfp_fw = NULL;
+ }
+ if (rdev->me_fw) {
+ free(rdev->me_fw, M_DEVBUF, 0);
+ rdev->me_fw = NULL;
+ }
+ if (rdev->rlc_fw) {
+ free(rdev->rlc_fw, M_DEVBUF, 0);
+ rdev->rlc_fw = NULL;
+ }
+ if (rdev->mc_fw) {
+ free(rdev->mc_fw, M_DEVBUF, 0);
+ rdev->mc_fw = NULL;
+ }
}
return err;
}
-/**
- * cayman_get_allowed_info_register - fetch the register for the info ioctl
- *
- * @rdev: radeon_device pointer
- * @reg: register offset in bytes
- * @val: register value
- *
- * Returns 0 for success or -EINVAL for an invalid register
- *
- */
-int cayman_get_allowed_info_register(struct radeon_device *rdev,
- u32 reg, u32 *val)
-{
- switch (reg) {
- case GRBM_STATUS:
- case GRBM_STATUS_SE0:
- case GRBM_STATUS_SE1:
- case SRBM_STATUS:
- case SRBM_STATUS2:
- case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
- case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
- case UVD_STATUS:
- *val = RREG32(reg);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-int tn_get_temp(struct radeon_device *rdev)
-{
- u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
- int actual_temp = (temp / 8) - 49;
-
- return actual_temp * 1000;
-}
-
/*
* Core functions
*/
@@ -1013,8 +535,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
- WREG32(SRBM_INT_CNTL, 0x1);
- WREG32(SRBM_INT_ACK, 0x1);
evergreen_fix_pci_max_read_req_size(rdev);
@@ -1102,26 +622,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
- tmp = 0;
- for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
- tmp |= (1 << i);
- /* if all the backends are disabled, fix it up here */
- if ((disabled_rb_mask & tmp) == tmp) {
- for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
- disabled_rb_mask &= ~(1 << i);
- }
-
- for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
- u32 simd_disable_bitmap;
-
- WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
- WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
- simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
- simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
- tmp <<= 16;
- tmp |= simd_disable_bitmap;
- }
- rdev->config.cayman.active_simds = hweight32(~tmp);
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
@@ -1133,18 +633,15 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
- WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
if ((rdev->config.cayman.max_backends_per_se == 1) &&
(rdev->flags & RADEON_IS_IGP)) {
- if ((disabled_rb_mask & 3) == 2) {
- /* RB1 disabled, RB0 enabled */
- tmp = 0x00000000;
- } else {
+ if ((disabled_rb_mask & 3) == 1) {
/* RB0 disabled, RB1 enabled */
tmp = 0x11111111;
+ } else {
+ /* RB1 disabled, RB0 enabled */
+ tmp = 0x00000000;
}
} else {
tmp = gb_addr_config & NUM_PIPES_MASK;
@@ -1248,16 +745,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
udelay(50);
-
- /* set clockgating golden values on TN */
- if (rdev->family == CHIP_ARUBA) {
- tmp = RREG32_CG(CG_CGTT_LOCAL_0);
- tmp &= ~0x00380000;
- WREG32_CG(CG_CGTT_LOCAL_0, tmp);
- tmp = RREG32_CG(CG_CGTT_LOCAL_1);
- tmp &= ~0x0e000000;
- WREG32_CG(CG_CGTT_LOCAL_1, tmp);
- }
}
/*
@@ -1283,6 +770,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
@@ -1293,14 +781,12 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
- ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7) |
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
- BANK_SELECT(6) |
L2_CACHE_BIGK_FRAGMENT_SIZE(6));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
@@ -1323,10 +809,9 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
*/
for (i = 1; i < 8; i++) {
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
- WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
- rdev->vm_manager.max_pfn - 1);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
- rdev->vm_manager.saved_table_addr[i]);
+ rdev->gart.table_addr >> 12);
}
/* enable context1-7 */
@@ -1334,7 +819,6 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
@@ -1358,13 +842,6 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
static void cayman_pcie_gart_disable(struct radeon_device *rdev)
{
- unsigned i;
-
- for (i = 1; i < 8; ++i) {
- rdev->vm_manager.saved_table_addr[i] = RREG32(
- VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
- }
-
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
@@ -1393,7 +870,9 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl)
{
- WREG32(SRBM_GFX_CNTL, RINGID(ring));
+ u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+
+ WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
WREG32(CP_INT_CNTL, cp_int_cntl);
}
@@ -1417,7 +896,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
- radeon_ring_write(ring, lower_32_bits(addr));
+ radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
@@ -1426,7 +905,6 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
- unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
PACKET3_SH_ACTION_ENA;
@@ -1449,14 +927,15 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
+ radeon_ring_write(ring, ib->length_dw |
+ (ib->vm ? (ib->vm->id << 24) : 0));
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
+ radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
}
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
@@ -1464,63 +943,13 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
if (enable)
WREG32(CP_ME_CNTL, 0);
else {
- if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
}
}
-u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- u32 rptr;
-
- if (rdev->wb.enabled)
- rptr = rdev->wb.wb[ring->rptr_offs/4];
- else {
- if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
- rptr = RREG32(CP_RB0_RPTR);
- else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
- rptr = RREG32(CP_RB1_RPTR);
- else
- rptr = RREG32(CP_RB2_RPTR);
- }
-
- return rptr;
-}
-
-u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- u32 wptr;
-
- if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
- wptr = RREG32(CP_RB0_WPTR);
- else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
- wptr = RREG32(CP_RB1_WPTR);
- else
- wptr = RREG32(CP_RB2_WPTR);
-
- return wptr;
-}
-
-void cayman_gfx_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
- WREG32(CP_RB0_WPTR, ring->wptr);
- (void)RREG32(CP_RB0_WPTR);
- } else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
- WREG32(CP_RB1_WPTR, ring->wptr);
- (void)RREG32(CP_RB1_WPTR);
- } else {
- WREG32(CP_RB2_WPTR, ring->wptr);
- (void)RREG32(CP_RB2_WPTR);
- }
-}
-
static int cayman_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -1531,13 +960,13 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
cayman_cp_enable(rdev, false);
- fw_data = (const __be32 *)rdev->pfp_fw->data;
+ fw_data = (const __be32 *)rdev->pfp_fw;
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
WREG32(CP_PFP_UCODE_ADDR, 0);
- fw_data = (const __be32 *)rdev->me_fw->data;
+ fw_data = (const __be32 *)rdev->me_fw;
WREG32(CP_ME_RAM_WADDR, 0);
for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
@@ -1565,7 +994,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
cayman_cp_enable(rdev, true);
@@ -1607,7 +1036,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
/* XXX init other rings */
@@ -1649,16 +1078,6 @@ static int cayman_cp_resume(struct radeon_device *rdev)
CP_RB1_BASE,
CP_RB2_BASE
};
- static const unsigned cp_rb_rptr[] = {
- CP_RB0_RPTR,
- CP_RB1_RPTR,
- CP_RB2_RPTR
- };
- static const unsigned cp_rb_wptr[] = {
- CP_RB0_WPTR,
- CP_RB1_WPTR,
- CP_RB2_WPTR
- };
struct radeon_ring *ring;
int i, r;
@@ -1692,8 +1111,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */
ring = &rdev->ring[ridx[i]];
- rb_cntl = order_base_2(ring->ring_size / 8);
- rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
+ rb_cntl = drm_order(ring->ring_size / 8);
+ rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
#ifdef __BIG_ENDIAN
rb_cntl |= BUF_SWAP_32BIT;
#endif
@@ -1716,9 +1135,9 @@ static int cayman_cp_resume(struct radeon_device *rdev)
ring = &rdev->ring[ridx[i]];
WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
- ring->wptr = 0;
- WREG32(cp_rb_rptr[i], 0);
- WREG32(cp_rb_wptr[i], ring->wptr);
+ ring->rptr = ring->wptr = 0;
+ WREG32(ring->rptr_reg, ring->rptr);
+ WREG32(ring->wptr_reg, ring->wptr);
mdelay(1);
WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
@@ -1738,262 +1157,359 @@ static int cayman_cp_resume(struct radeon_device *rdev)
return r;
}
- if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
return 0;
}
-u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
{
- u32 reset_mask = 0;
- u32 tmp;
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
- /* GRBM_STATUS */
- tmp = RREG32(GRBM_STATUS);
- if (tmp & (PA_BUSY | SC_BUSY |
- SH_BUSY | SX_BUSY |
- TA_BUSY | VGT_BUSY |
- DB_BUSY | CB_BUSY |
- GDS_BUSY | SPI_BUSY |
- IA_BUSY | IA_BUSY_NO_DMA))
- reset_mask |= RADEON_RESET_GFX;
-
- if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
- CP_BUSY | CP_COHERENCY_BUSY))
- reset_mask |= RADEON_RESET_CP;
-
- if (tmp & GRBM_EE_BUSY)
- reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
-
- /* DMA_STATUS_REG 0 */
- tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
- if (!(tmp & DMA_IDLE))
- reset_mask |= RADEON_RESET_DMA;
-
- /* DMA_STATUS_REG 1 */
- tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
- if (!(tmp & DMA_IDLE))
- reset_mask |= RADEON_RESET_DMA1;
-
- /* SRBM_STATUS2 */
- tmp = RREG32(SRBM_STATUS2);
- if (tmp & DMA_BUSY)
- reset_mask |= RADEON_RESET_DMA;
-
- if (tmp & DMA1_BUSY)
- reset_mask |= RADEON_RESET_DMA1;
-
- /* SRBM_STATUS */
- tmp = RREG32(SRBM_STATUS);
- if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
- reset_mask |= RADEON_RESET_RLC;
-
- if (tmp & IH_BUSY)
- reset_mask |= RADEON_RESET_IH;
-
- if (tmp & SEM_BUSY)
- reset_mask |= RADEON_RESET_SEM;
-
- if (tmp & GRBM_RQ_PENDING)
- reset_mask |= RADEON_RESET_GRBM;
-
- if (tmp & VMC_BUSY)
- reset_mask |= RADEON_RESET_VMC;
-
- if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
- MCC_BUSY | MCD_BUSY))
- reset_mask |= RADEON_RESET_MC;
-
- if (evergreen_is_display_hung(rdev))
- reset_mask |= RADEON_RESET_DISPLAY;
-
- /* VM_L2_STATUS */
- tmp = RREG32(VM_L2_STATUS);
- if (tmp & L2_BUSY)
- reset_mask |= RADEON_RESET_VMC;
-
- /* Skip MC reset as it's mostly likely not hung, just busy */
- if (reset_mask & RADEON_RESET_MC) {
- DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
- reset_mask &= ~RADEON_RESET_MC;
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
}
- return reset_mask;
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
}
-static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
{
- struct evergreen_mc_save save;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- u32 tmp;
+ u32 rb_cntl;
- if (reset_mask == 0)
- return;
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
- dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ /* dma0 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
- evergreen_print_gpu_status_regs(rdev);
- dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(0x14F8));
- dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(0x14D8));
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(0x14FC));
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(0x14DC));
+ /* dma1 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
-
- if (reset_mask & RADEON_RESET_DMA) {
- /* dma0 */
- tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
- }
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
- if (reset_mask & RADEON_RESET_DMA1) {
- /* dma1 */
- tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
- }
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
- evergreen_mc_stop(rdev, &save);
- if (evergreen_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = DMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = DMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
- grbm_soft_reset = SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_GDS |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_SPI |
- SOFT_RESET_SH |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VGT |
- SOFT_RESET_IA;
- }
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
- if (reset_mask & RADEON_RESET_CP) {
- grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
- srbm_soft_reset |= SOFT_RESET_GRBM;
- }
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR + reg_offset, 0);
+ WREG32(DMA_RB_WPTR + reg_offset, 0);
- if (reset_mask & RADEON_RESET_DMA)
- srbm_soft_reset |= SOFT_RESET_DMA;
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
- if (reset_mask & RADEON_RESET_DMA1)
- srbm_soft_reset |= SOFT_RESET_DMA1;
+ WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
- if (reset_mask & RADEON_RESET_DISPLAY)
- srbm_soft_reset |= SOFT_RESET_DC;
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
- if (reset_mask & RADEON_RESET_RLC)
- srbm_soft_reset |= SOFT_RESET_RLC;
+ dma_cntl = RREG32(DMA_CNTL + reg_offset);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL + reg_offset, dma_cntl);
- if (reset_mask & RADEON_RESET_SEM)
- srbm_soft_reset |= SOFT_RESET_SEM;
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
- if (reset_mask & RADEON_RESET_IH)
- srbm_soft_reset |= SOFT_RESET_IH;
+ ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
- if (reset_mask & RADEON_RESET_GRBM)
- srbm_soft_reset |= SOFT_RESET_GRBM;
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
- if (reset_mask & RADEON_RESET_VMC)
- srbm_soft_reset |= SOFT_RESET_VMC;
+ ring->ready = true;
- if (!(rdev->flags & RADEON_IS_IGP)) {
- if (reset_mask & RADEON_RESET_MC)
- srbm_soft_reset |= SOFT_RESET_MC;
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
}
- if (grbm_soft_reset) {
- tmp = RREG32(GRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(GRBM_SOFT_RESET, tmp);
- tmp = RREG32(GRBM_SOFT_RESET);
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
- udelay(50);
+ return 0;
+}
- tmp &= ~grbm_soft_reset;
- WREG32(GRBM_SOFT_RESET, tmp);
- tmp = RREG32(GRBM_SOFT_RESET);
- }
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+ cayman_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
+static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
+{
+ u32 grbm_reset = 0;
- if (srbm_soft_reset) {
- tmp = RREG32(SRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ return;
- udelay(50);
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
+ RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
- tmp &= ~srbm_soft_reset;
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
- }
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
- /* Wait a little for things to settle down */
+ /* reset all the gfx blocks */
+ grbm_reset = (SOFT_RESET_CP |
+ SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA);
+
+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+ WREG32(GRBM_SOFT_RESET, grbm_reset);
+ (void)RREG32(GRBM_SOFT_RESET);
udelay(50);
+ WREG32(GRBM_SOFT_RESET, 0);
+ (void)RREG32(GRBM_SOFT_RESET);
+
+ dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
+ RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
- evergreen_mc_resume(rdev, &save);
+}
+
+static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
+
+ /* dma0 */
+ tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+
+ /* dma1 */
+ tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
- evergreen_print_gpu_status_regs(rdev);
}
-int cayman_asic_reset(struct radeon_device *rdev)
+static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
- u32 reset_mask;
+ struct evergreen_mc_save save;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
- reset_mask = cayman_gpu_check_soft_reset(rdev);
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
+
+ if (reset_mask == 0)
+ return 0;
- if (reset_mask)
- r600_set_bios_scratch_engine_hung(rdev, true);
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14F8));
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14D8));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14FC));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14DC));
- cayman_gpu_soft_reset(rdev, reset_mask);
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
- reset_mask = cayman_gpu_check_soft_reset(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ cayman_gpu_soft_reset_gfx(rdev);
- if (reset_mask)
- evergreen_gpu_pci_config_reset(rdev);
+ if (reset_mask & RADEON_RESET_DMA)
+ cayman_gpu_soft_reset_dma(rdev);
- r600_set_bios_scratch_engine_hung(rdev, false);
+ /* Wait a little for things to settle down */
+ udelay(50);
+ evergreen_mc_resume(rdev, &save);
return 0;
}
+int cayman_asic_reset(struct radeon_device *rdev)
+{
+ return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
+}
+
/**
- * cayman_gfx_is_lockup - Check if the GFX engine is locked up
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
- * Check if the GFX engine is locked up.
+ * Check if the async DMA engine is locked up (cayman-SI).
* Returns true if the engine appears to be locked up, false if not.
*/
-bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+ u32 dma_status_reg;
- if (!(reset_mask & (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_CP))) {
- radeon_ring_lockup_update(rdev, ring);
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
+ else
+ dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
+ if (dma_status_reg & DMA_IDLE) {
+ radeon_ring_lockup_update(ring);
return false;
}
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
@@ -2004,17 +1520,26 @@ static int cayman_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
evergreen_pcie_gen2_enable(rdev);
- /* enable aspm */
- evergreen_program_aspm(rdev);
-
- /* scratch needs to be initialized before MC */
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
evergreen_mc_program(rdev);
- if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ } else {
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
@@ -2022,18 +1547,25 @@ static int cayman_startup(struct radeon_device *rdev)
}
}
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
r = cayman_pcie_gart_enable(rdev);
if (r)
return r;
cayman_gpu_init(rdev);
+ r = evergreen_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy.copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+
/* allocate rlc buffers */
if (rdev->flags & RADEON_IS_IGP) {
- rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
- rdev->rlc.reg_list_size =
- (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
- rdev->rlc.cs_data = cayman_cs_data;
- r = sumo_rlc_init(rdev);
+ r = si_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -2051,35 +1583,6 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
- r = uvd_v2_2_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
- }
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
-
- if (rdev->family == CHIP_ARUBA) {
- r = radeon_vce_resume(rdev);
- if (!r)
- r = vce_v1_0_resume(rdev);
-
- if (!r)
- r = radeon_fence_driver_start_ring(rdev,
- TN_RING_TYPE_VCE1_INDEX);
- if (!r)
- r = radeon_fence_driver_start_ring(rdev,
- TN_RING_TYPE_VCE2_INDEX);
-
- if (r) {
- dev_err(rdev->dev, "VCE init error (%d).\n", r);
- rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
- rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
- }
- }
-
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
if (r) {
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
@@ -2120,19 +1623,24 @@ static int cayman_startup(struct radeon_device *rdev)
evergreen_irq_set(rdev);
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- RADEON_CP_PACKET2);
+ CP_RB0_RPTR, CP_RB0_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -2147,31 +1655,6 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- RADEON_CP_PACKET2);
- if (!r)
- r = uvd_v1_0_init(rdev);
- if (r)
- DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
- }
-
- if (rdev->family == CHIP_ARUBA) {
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
-
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
-
- if (!r)
- r = vce_v1_0_init(rdev);
- if (r)
- DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
- }
-
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2184,7 +1667,7 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_audio_init(rdev);
+ r = r600_audio_init(rdev);
if (r)
return r;
@@ -2202,12 +1685,6 @@ int cayman_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* init golden registers */
- ni_init_golden_registers(rdev);
-
- if (rdev->pm.pm_method == PM_METHOD_DPM)
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = cayman_startup(rdev);
if (r) {
@@ -2220,13 +1697,10 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
- radeon_audio_fini(rdev);
+ r600_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
- uvd_v1_0_fini(rdev);
- radeon_uvd_suspend(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
@@ -2267,8 +1741,6 @@ int cayman_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
- /* init golden registers */
- ni_init_golden_registers(rdev);
/* Initialize scratch registers */
r600_scratch_init(rdev);
/* Initialize surface registers */
@@ -2288,27 +1760,6 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
- if (rdev->flags & RADEON_IS_IGP) {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = ni_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
- } else {
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
- r = ni_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
- }
-
- /* Initialize power management */
- radeon_pm_init(rdev);
-
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
@@ -2320,26 +1771,6 @@ int cayman_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 64 * 1024);
- r = radeon_uvd_init(rdev);
- if (!r) {
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
- }
-
- if (rdev->family == CHIP_ARUBA) {
- r = radeon_vce_init(rdev);
- if (!r) {
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
-
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
- }
- }
-
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2355,7 +1786,7 @@ int cayman_init(struct radeon_device *rdev)
cayman_dma_fini(rdev);
r600_irq_fini(rdev);
if (rdev->flags & RADEON_IS_IGP)
- sumo_rlc_fini(rdev);
+ si_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_vm_manager_fini(rdev);
@@ -2381,20 +1812,16 @@ int cayman_init(struct radeon_device *rdev)
void cayman_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
+ r600_blit_fini(rdev);
cayman_cp_fini(rdev);
cayman_dma_fini(rdev);
r600_irq_fini(rdev);
if (rdev->flags & RADEON_IS_IGP)
- sumo_rlc_fini(rdev);
+ si_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- uvd_v1_0_fini(rdev);
- radeon_uvd_fini(rdev);
- if (rdev->family == CHIP_ARUBA)
- radeon_vce_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
@@ -2426,165 +1853,96 @@ void cayman_vm_fini(struct radeon_device *rdev)
{
}
+#define R600_ENTRY_VALID (1 << 0)
+#define R600_PTE_SYSTEM (1 << 1)
+#define R600_PTE_SNOOPED (1 << 2)
+#define R600_PTE_READABLE (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
+{
+ uint32_t r600_flags = 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ r600_flags |= R600_PTE_SYSTEM;
+ r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+ }
+ return r600_flags;
+}
+
/**
- * cayman_vm_decode_fault - print human readable fault info
+ * cayman_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
- * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
- * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
*
- * Print human readable fault information (cayman/TN).
+ * Update the page tables using the CP (cayman-si).
*/
-void cayman_vm_decode_fault(struct radeon_device *rdev,
- u32 status, u32 addr)
+void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
- u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
- u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
- u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
- char *block;
-
- switch (mc_id) {
- case 32:
- case 16:
- case 96:
- case 80:
- case 160:
- case 144:
- case 224:
- case 208:
- block = "CB";
- break;
- case 33:
- case 17:
- case 97:
- case 81:
- case 161:
- case 145:
- case 225:
- case 209:
- block = "CB_FMASK";
- break;
- case 34:
- case 18:
- case 98:
- case 82:
- case 162:
- case 146:
- case 226:
- case 210:
- block = "CB_CMASK";
- break;
- case 35:
- case 19:
- case 99:
- case 83:
- case 163:
- case 147:
- case 227:
- case 211:
- block = "CB_IMMED";
- break;
- case 36:
- case 20:
- case 100:
- case 84:
- case 164:
- case 148:
- case 228:
- case 212:
- block = "DB";
- break;
- case 37:
- case 21:
- case 101:
- case 85:
- case 165:
- case 149:
- case 229:
- case 213:
- block = "DB_HTILE";
- break;
- case 38:
- case 22:
- case 102:
- case 86:
- case 166:
- case 150:
- case 230:
- case 214:
- block = "SX";
- break;
- case 39:
- case 23:
- case 103:
- case 87:
- case 167:
- case 151:
- case 231:
- case 215:
- block = "DB_STEN";
- break;
- case 40:
- case 24:
- case 104:
- case 88:
- case 232:
- case 216:
- case 168:
- case 152:
- block = "TC_TFETCH";
- break;
- case 41:
- case 25:
- case 105:
- case 89:
- case 233:
- case 217:
- case 169:
- case 153:
- block = "TC_VFETCH";
- break;
- case 42:
- case 26:
- case 106:
- case 90:
- case 234:
- case 218:
- case 170:
- case 154:
- block = "VC";
- break;
- case 112:
- block = "CP";
- break;
- case 113:
- case 114:
- block = "SH";
- break;
- case 115:
- block = "VGT";
- break;
- case 178:
- block = "IH";
- break;
- case 51:
- block = "RLC";
- break;
- case 55:
- block = "DMA";
- break;
- case 56:
- block = "HDP";
- break;
- default:
- block = "unknown";
- break;
+ struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 1 + count * 2;
+ if (ndw > 0x3FFF)
+ ndw = 0x3FFF;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
}
-
- printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
- protections, vmid, addr,
- (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
- block, mc_id);
}
/**
@@ -2595,11 +1953,15 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
* Update the page table base and flush the VM TLB
* using the CP (cayman-si).
*/
-void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
- radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
- radeon_ring_write(ring, pd_addr >> 12);
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
/* flush hdp cache */
radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
@@ -2607,50 +1969,32 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
- radeon_ring_write(ring, 1 << vm_id);
-
- /* wait for the invalidate to complete */
- radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
- WAIT_REG_MEM_ENGINE(0))); /* me */
- radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0); /* ref */
- radeon_ring_write(ring, 0); /* mask */
- radeon_ring_write(ring, 0x20); /* poll interval */
+ radeon_ring_write(ring, 1 << vm->id);
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
-int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
- struct atom_clock_dividers dividers;
- int r, i;
+ struct radeon_ring *ring = &rdev->ring[ridx];
- r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
- ecclk, false, &dividers);
- if (r)
- return r;
-
- for (i = 0; i < 100; i++) {
- if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
- break;
- mdelay(10);
- }
- if (i == 100)
- return -ETIMEDOUT;
+ if (vm == NULL)
+ return;
- WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
- for (i = 0; i < 100; i++) {
- if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
- break;
- mdelay(10);
- }
- if (i == 100)
- return -ETIMEDOUT;
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
- return 0;
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
}
+
diff --git a/sys/dev/pci/drm/radeon/ni_reg.h b/sys/dev/pci/drm/radeon/ni_reg.h
index da310a70c0f..90c1bc7ac0f 100644
--- a/sys/dev/pci/drm/radeon/ni_reg.h
+++ b/sys/dev/pci/drm/radeon/ni_reg.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: ni_reg.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
@@ -83,48 +84,4 @@
# define NI_REGAMMA_PROG_B 4
# define NI_OVL_REGAMMA_MODE(x) (((x) & 0x7) << 4)
-#define NI_DP_MSE_LINK_TIMING 0x73a0
-# define NI_DP_MSE_LINK_FRAME (((x) & 0x3ff) << 0)
-# define NI_DP_MSE_LINK_LINE (((x) & 0x3) << 16)
-
-#define NI_DP_MSE_MISC_CNTL 0x736c
-# define NI_DP_MSE_BLANK_CODE (((x) & 0x1) << 0)
-# define NI_DP_MSE_TIMESTAMP_MODE (((x) & 0x1) << 4)
-# define NI_DP_MSE_ZERO_ENCODER (((x) & 0x1) << 8)
-
-#define NI_DP_MSE_RATE_CNTL 0x7384
-# define NI_DP_MSE_RATE_Y(x) (((x) & 0x3ffffff) << 0)
-# define NI_DP_MSE_RATE_X(x) (((x) & 0x3f) << 26)
-
-#define NI_DP_MSE_RATE_UPDATE 0x738c
-
-#define NI_DP_MSE_SAT0 0x7390
-# define NI_DP_MSE_SAT_SRC0(x) (((x) & 0x7) << 0)
-# define NI_DP_MSE_SAT_SLOT_COUNT0(x) (((x) & 0x3f) << 8)
-# define NI_DP_MSE_SAT_SRC1(x) (((x) & 0x7) << 16)
-# define NI_DP_MSE_SAT_SLOT_COUNT1(x) (((x) & 0x3f) << 24)
-
-#define NI_DP_MSE_SAT1 0x7394
-
-#define NI_DP_MSE_SAT2 0x7398
-
-#define NI_DP_MSE_SAT_UPDATE 0x739c
-
-#define NI_DIG_BE_CNTL 0x7140
-# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8)
-# define NI_DIG_FE_DIG_MODE(x) (((x) & 0x7) << 16)
-# define NI_DIG_MODE_DP_SST 0
-# define NI_DIG_MODE_LVDS 1
-# define NI_DIG_MODE_TMDS_DVI 2
-# define NI_DIG_MODE_TMDS_HDMI 3
-# define NI_DIG_MODE_DP_MST 5
-# define NI_DIG_HPD_SELECT(x) (((x) & 0x7) << 28)
-
-#define NI_DIG_FE_CNTL 0x7000
-# define NI_DIG_SOURCE_SELECT(x) (((x) & 0x3) << 0)
-# define NI_DIG_STEREOSYNC_SELECT(x) (((x) & 0x3) << 4)
-# define NI_DIG_STEREOSYNC_GATE_EN(x) (((x) & 0x1) << 8)
-# define NI_DIG_DUAL_LINK_ENABLE(x) (((x) & 0x1) << 16)
-# define NI_DIG_SWAP(x) (((x) & 0x1) << 18)
-# define NI_DIG_SYMCLK_FE_ON (0x1 << 24)
#endif
diff --git a/sys/dev/pci/drm/radeon/nid.h b/sys/dev/pci/drm/radeon/nid.h
index 47eb49b77d3..35f3cd1a20e 100644
--- a/sys/dev/pci/drm/radeon/nid.h
+++ b/sys/dev/pci/drm/radeon/nid.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: nid.h,v 1.5 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
@@ -46,13 +47,6 @@
#define DMIF_ADDR_CONFIG 0xBD4
-/* fusion vce clocks */
-#define CG_ECLK_CNTL 0x620
-# define ECLK_DIVIDER_MASK 0x7f
-# define ECLK_DIR_CNTL_EN (1 << 8)
-#define CG_ECLK_STATUS 0x624
-# define ECLK_STATUS (1 << 0)
-
/* DCE6 only */
#define DMIF_ADDR_CALC 0xC00
@@ -60,16 +54,6 @@
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
-#define RLC_RQ_PENDING (1 << 3)
-#define GRBM_RQ_PENDING (1 << 5)
-#define VMC_BUSY (1 << 8)
-#define MCB_BUSY (1 << 9)
-#define MCB_NON_DISPLAY_BUSY (1 << 10)
-#define MCC_BUSY (1 << 11)
-#define MCD_BUSY (1 << 12)
-#define SEM_BUSY (1 << 14)
-#define RLC_BUSY (1 << 15)
-#define IH_BUSY (1 << 17)
#define SRBM_SOFT_RESET 0x0E60
#define SOFT_RESET_BIF (1 << 1)
@@ -89,14 +73,6 @@
#define SOFT_RESET_REGBB (1 << 22)
#define SOFT_RESET_ORB (1 << 23)
-#define SRBM_READ_ERROR 0xE98
-#define SRBM_INT_CNTL 0xEA0
-#define SRBM_INT_ACK 0xEA8
-
-#define SRBM_STATUS2 0x0EC4
-#define DMA_BUSY (1 << 5)
-#define DMA1_BUSY (1 << 6)
-
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
#define RESPONSE_TYPE_MASK 0x000000F0
@@ -139,28 +115,11 @@
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
-#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
#define VM_INVALIDATE_REQUEST 0x1478
#define VM_INVALIDATE_RESPONSE 0x147c
-#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
-#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
-#define PROTECTIONS_MASK (0xf << 0)
-#define PROTECTIONS_SHIFT 0
- /* bit 0: range
- * bit 2: pde0
- * bit 3: valid
- * bit 4: read
- * bit 5: write
- */
-#define MEMORY_CLIENT_ID_MASK (0xff << 12)
-#define MEMORY_CLIENT_ID_SHIFT 12
-#define MEMORY_CLIENT_RW_MASK (1 << 24)
-#define MEMORY_CLIENT_RW_SHIFT 24
-#define FAULT_VMID_MASK (0x7 << 25)
-#define FAULT_VMID_SHIFT 25
#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
@@ -517,312 +476,6 @@
# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
-/* TN SMU registers */
-#define TN_CURRENT_GNB_TEMP 0x1F390
-
-/* pm registers */
-#define SMC_MSG 0x20c
-#define HOST_SMC_MSG(x) ((x) << 0)
-#define HOST_SMC_MSG_MASK (0xff << 0)
-#define HOST_SMC_MSG_SHIFT 0
-#define HOST_SMC_RESP(x) ((x) << 8)
-#define HOST_SMC_RESP_MASK (0xff << 8)
-#define HOST_SMC_RESP_SHIFT 8
-#define SMC_HOST_MSG(x) ((x) << 16)
-#define SMC_HOST_MSG_MASK (0xff << 16)
-#define SMC_HOST_MSG_SHIFT 16
-#define SMC_HOST_RESP(x) ((x) << 24)
-#define SMC_HOST_RESP_MASK (0xff << 24)
-#define SMC_HOST_RESP_SHIFT 24
-
-#define CG_SPLL_FUNC_CNTL 0x600
-#define SPLL_RESET (1 << 0)
-#define SPLL_SLEEP (1 << 1)
-#define SPLL_BYPASS_EN (1 << 3)
-#define SPLL_REF_DIV(x) ((x) << 4)
-#define SPLL_REF_DIV_MASK (0x3f << 4)
-#define SPLL_PDIV_A(x) ((x) << 20)
-#define SPLL_PDIV_A_MASK (0x7f << 20)
-#define SPLL_PDIV_A_SHIFT 20
-#define CG_SPLL_FUNC_CNTL_2 0x604
-#define SCLK_MUX_SEL(x) ((x) << 0)
-#define SCLK_MUX_SEL_MASK (0x1ff << 0)
-#define CG_SPLL_FUNC_CNTL_3 0x608
-#define SPLL_FB_DIV(x) ((x) << 0)
-#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
-#define SPLL_FB_DIV_SHIFT 0
-#define SPLL_DITHEN (1 << 28)
-
-#define MPLL_CNTL_MODE 0x61c
-# define SS_SSEN (1 << 24)
-# define SS_DSMODE_EN (1 << 25)
-
-#define MPLL_AD_FUNC_CNTL 0x624
-#define CLKF(x) ((x) << 0)
-#define CLKF_MASK (0x7f << 0)
-#define CLKR(x) ((x) << 7)
-#define CLKR_MASK (0x1f << 7)
-#define CLKFRAC(x) ((x) << 12)
-#define CLKFRAC_MASK (0x1f << 12)
-#define YCLK_POST_DIV(x) ((x) << 17)
-#define YCLK_POST_DIV_MASK (3 << 17)
-#define IBIAS(x) ((x) << 20)
-#define IBIAS_MASK (0x3ff << 20)
-#define RESET (1 << 30)
-#define PDNB (1 << 31)
-#define MPLL_AD_FUNC_CNTL_2 0x628
-#define BYPASS (1 << 19)
-#define BIAS_GEN_PDNB (1 << 24)
-#define RESET_EN (1 << 25)
-#define VCO_MODE (1 << 29)
-#define MPLL_DQ_FUNC_CNTL 0x62c
-#define MPLL_DQ_FUNC_CNTL_2 0x630
-
-#define GENERAL_PWRMGT 0x63c
-# define GLOBAL_PWRMGT_EN (1 << 0)
-# define STATIC_PM_EN (1 << 1)
-# define THERMAL_PROTECTION_DIS (1 << 2)
-# define THERMAL_PROTECTION_TYPE (1 << 3)
-# define ENABLE_GEN2PCIE (1 << 4)
-# define ENABLE_GEN2XSP (1 << 5)
-# define SW_SMIO_INDEX(x) ((x) << 6)
-# define SW_SMIO_INDEX_MASK (3 << 6)
-# define SW_SMIO_INDEX_SHIFT 6
-# define LOW_VOLT_D2_ACPI (1 << 8)
-# define LOW_VOLT_D3_ACPI (1 << 9)
-# define VOLT_PWRMGT_EN (1 << 10)
-# define BACKBIAS_PAD_EN (1 << 18)
-# define BACKBIAS_VALUE (1 << 19)
-# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
-# define AC_DC_SW (1 << 24)
-
-#define SCLK_PWRMGT_CNTL 0x644
-# define SCLK_PWRMGT_OFF (1 << 0)
-# define SCLK_LOW_D1 (1 << 1)
-# define FIR_RESET (1 << 4)
-# define FIR_FORCE_TREND_SEL (1 << 5)
-# define FIR_TREND_MODE (1 << 6)
-# define DYN_GFX_CLK_OFF_EN (1 << 7)
-# define GFX_CLK_FORCE_ON (1 << 8)
-# define GFX_CLK_REQUEST_OFF (1 << 9)
-# define GFX_CLK_FORCE_OFF (1 << 10)
-# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
-# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
-# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
-# define DYN_LIGHT_SLEEP_EN (1 << 14)
-#define MCLK_PWRMGT_CNTL 0x648
-# define DLL_SPEED(x) ((x) << 0)
-# define DLL_SPEED_MASK (0x1f << 0)
-# define MPLL_PWRMGT_OFF (1 << 5)
-# define DLL_READY (1 << 6)
-# define MC_INT_CNTL (1 << 7)
-# define MRDCKA0_PDNB (1 << 8)
-# define MRDCKA1_PDNB (1 << 9)
-# define MRDCKB0_PDNB (1 << 10)
-# define MRDCKB1_PDNB (1 << 11)
-# define MRDCKC0_PDNB (1 << 12)
-# define MRDCKC1_PDNB (1 << 13)
-# define MRDCKD0_PDNB (1 << 14)
-# define MRDCKD1_PDNB (1 << 15)
-# define MRDCKA0_RESET (1 << 16)
-# define MRDCKA1_RESET (1 << 17)
-# define MRDCKB0_RESET (1 << 18)
-# define MRDCKB1_RESET (1 << 19)
-# define MRDCKC0_RESET (1 << 20)
-# define MRDCKC1_RESET (1 << 21)
-# define MRDCKD0_RESET (1 << 22)
-# define MRDCKD1_RESET (1 << 23)
-# define DLL_READY_READ (1 << 24)
-# define USE_DISPLAY_GAP (1 << 25)
-# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
-# define MPLL_TURNOFF_D2 (1 << 28)
-#define DLL_CNTL 0x64c
-# define MRDCKA0_BYPASS (1 << 24)
-# define MRDCKA1_BYPASS (1 << 25)
-# define MRDCKB0_BYPASS (1 << 26)
-# define MRDCKB1_BYPASS (1 << 27)
-# define MRDCKC0_BYPASS (1 << 28)
-# define MRDCKC1_BYPASS (1 << 29)
-# define MRDCKD0_BYPASS (1 << 30)
-# define MRDCKD1_BYPASS (1 << 31)
-
-#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
-# define CURRENT_STATE_INDEX_MASK (0xf << 4)
-# define CURRENT_STATE_INDEX_SHIFT 4
-
-#define CG_AT 0x6d4
-# define CG_R(x) ((x) << 0)
-# define CG_R_MASK (0xffff << 0)
-# define CG_L(x) ((x) << 16)
-# define CG_L_MASK (0xffff << 16)
-
-#define CG_BIF_REQ_AND_RSP 0x7f4
-#define CG_CLIENT_REQ(x) ((x) << 0)
-#define CG_CLIENT_REQ_MASK (0xff << 0)
-#define CG_CLIENT_REQ_SHIFT 0
-#define CG_CLIENT_RESP(x) ((x) << 8)
-#define CG_CLIENT_RESP_MASK (0xff << 8)
-#define CG_CLIENT_RESP_SHIFT 8
-#define CLIENT_CG_REQ(x) ((x) << 16)
-#define CLIENT_CG_REQ_MASK (0xff << 16)
-#define CLIENT_CG_REQ_SHIFT 16
-#define CLIENT_CG_RESP(x) ((x) << 24)
-#define CLIENT_CG_RESP_MASK (0xff << 24)
-#define CLIENT_CG_RESP_SHIFT 24
-
-#define CG_SPLL_SPREAD_SPECTRUM 0x790
-#define SSEN (1 << 0)
-#define CLK_S(x) ((x) << 4)
-#define CLK_S_MASK (0xfff << 4)
-#define CLK_S_SHIFT 4
-#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
-#define CLK_V(x) ((x) << 0)
-#define CLK_V_MASK (0x3ffffff << 0)
-#define CLK_V_SHIFT 0
-
-#define SMC_SCRATCH0 0x81c
-
-#define CG_SPLL_FUNC_CNTL_4 0x850
-
-#define MPLL_SS1 0x85c
-#define CLKV(x) ((x) << 0)
-#define CLKV_MASK (0x3ffffff << 0)
-#define MPLL_SS2 0x860
-#define CLKS(x) ((x) << 0)
-#define CLKS_MASK (0xfff << 0)
-
-#define CG_CAC_CTRL 0x88c
-#define TID_CNT(x) ((x) << 0)
-#define TID_CNT_MASK (0x3fff << 0)
-#define TID_UNIT(x) ((x) << 14)
-#define TID_UNIT_MASK (0xf << 14)
-
-#define CG_IND_ADDR 0x8f8
-#define CG_IND_DATA 0x8fc
-/* CGIND regs */
-#define CG_CGTT_LOCAL_0 0x00
-#define CG_CGTT_LOCAL_1 0x01
-
-#define MC_CG_CONFIG 0x25bc
-#define MCDW_WR_ENABLE (1 << 0)
-#define MCDX_WR_ENABLE (1 << 1)
-#define MCDY_WR_ENABLE (1 << 2)
-#define MCDZ_WR_ENABLE (1 << 3)
-#define MC_RD_ENABLE(x) ((x) << 4)
-#define MC_RD_ENABLE_MASK (3 << 4)
-#define INDEX(x) ((x) << 6)
-#define INDEX_MASK (0xfff << 6)
-#define INDEX_SHIFT 6
-
-#define MC_ARB_CAC_CNTL 0x2750
-#define ENABLE (1 << 0)
-#define READ_WEIGHT(x) ((x) << 1)
-#define READ_WEIGHT_MASK (0x3f << 1)
-#define READ_WEIGHT_SHIFT 1
-#define WRITE_WEIGHT(x) ((x) << 7)
-#define WRITE_WEIGHT_MASK (0x3f << 7)
-#define WRITE_WEIGHT_SHIFT 7
-#define ALLOW_OVERFLOW (1 << 13)
-
-#define MC_ARB_DRAM_TIMING 0x2774
-#define MC_ARB_DRAM_TIMING2 0x2778
-
-#define MC_ARB_RFSH_RATE 0x27b0
-#define POWERMODE0(x) ((x) << 0)
-#define POWERMODE0_MASK (0xff << 0)
-#define POWERMODE0_SHIFT 0
-#define POWERMODE1(x) ((x) << 8)
-#define POWERMODE1_MASK (0xff << 8)
-#define POWERMODE1_SHIFT 8
-#define POWERMODE2(x) ((x) << 16)
-#define POWERMODE2_MASK (0xff << 16)
-#define POWERMODE2_SHIFT 16
-#define POWERMODE3(x) ((x) << 24)
-#define POWERMODE3_MASK (0xff << 24)
-#define POWERMODE3_SHIFT 24
-
-#define MC_ARB_CG 0x27e8
-#define CG_ARB_REQ(x) ((x) << 0)
-#define CG_ARB_REQ_MASK (0xff << 0)
-#define CG_ARB_REQ_SHIFT 0
-#define CG_ARB_RESP(x) ((x) << 8)
-#define CG_ARB_RESP_MASK (0xff << 8)
-#define CG_ARB_RESP_SHIFT 8
-#define ARB_CG_REQ(x) ((x) << 16)
-#define ARB_CG_REQ_MASK (0xff << 16)
-#define ARB_CG_REQ_SHIFT 16
-#define ARB_CG_RESP(x) ((x) << 24)
-#define ARB_CG_RESP_MASK (0xff << 24)
-#define ARB_CG_RESP_SHIFT 24
-
-#define MC_ARB_DRAM_TIMING_1 0x27f0
-#define MC_ARB_DRAM_TIMING_2 0x27f4
-#define MC_ARB_DRAM_TIMING_3 0x27f8
-#define MC_ARB_DRAM_TIMING2_1 0x27fc
-#define MC_ARB_DRAM_TIMING2_2 0x2800
-#define MC_ARB_DRAM_TIMING2_3 0x2804
-#define MC_ARB_BURST_TIME 0x2808
-#define STATE0(x) ((x) << 0)
-#define STATE0_MASK (0x1f << 0)
-#define STATE0_SHIFT 0
-#define STATE1(x) ((x) << 5)
-#define STATE1_MASK (0x1f << 5)
-#define STATE1_SHIFT 5
-#define STATE2(x) ((x) << 10)
-#define STATE2_MASK (0x1f << 10)
-#define STATE2_SHIFT 10
-#define STATE3(x) ((x) << 15)
-#define STATE3_MASK (0x1f << 15)
-#define STATE3_SHIFT 15
-
-#define MC_CG_DATAPORT 0x2884
-
-#define MC_SEQ_RAS_TIMING 0x28a0
-#define MC_SEQ_CAS_TIMING 0x28a4
-#define MC_SEQ_MISC_TIMING 0x28a8
-#define MC_SEQ_MISC_TIMING2 0x28ac
-#define MC_SEQ_PMG_TIMING 0x28b0
-#define MC_SEQ_RD_CTL_D0 0x28b4
-#define MC_SEQ_RD_CTL_D1 0x28b8
-#define MC_SEQ_WR_CTL_D0 0x28bc
-#define MC_SEQ_WR_CTL_D1 0x28c0
-
-#define MC_SEQ_MISC0 0x2a00
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-#define MC_SEQ_MISC1 0x2a04
-#define MC_SEQ_RESERVE_M 0x2a08
-#define MC_PMG_CMD_EMRS 0x2a0c
-
-#define MC_SEQ_MISC3 0x2a2c
-
-#define MC_SEQ_MISC5 0x2a54
-#define MC_SEQ_MISC6 0x2a58
-
-#define MC_SEQ_MISC7 0x2a64
-
-#define MC_SEQ_RAS_TIMING_LP 0x2a6c
-#define MC_SEQ_CAS_TIMING_LP 0x2a70
-#define MC_SEQ_MISC_TIMING_LP 0x2a74
-#define MC_SEQ_MISC_TIMING2_LP 0x2a78
-#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
-#define MC_SEQ_WR_CTL_D1_LP 0x2a80
-#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
-#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
-
-#define MC_PMG_CMD_MRS 0x2aac
-
-#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
-#define MC_SEQ_RD_CTL_D1_LP 0x2b20
-
-#define MC_PMG_CMD_MRS1 0x2b44
-#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
-#define MC_SEQ_PMG_TIMING_LP 0x2b4c
-
-#define MC_PMG_CMD_MRS2 0x2b5c
-#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
-
#define AUX_CONTROL 0x6200
#define AUX_EN (1 << 0)
#define AUX_LS_READ_EN (1 << 8)
@@ -869,282 +522,19 @@
#define AUX_SW_DATA_INDEX(x) (((x) & 0x1f) << 16)
#define AUX_SW_AUTOINCREMENT_DISABLE (1 << 31)
-#define LB_SYNC_RESET_SEL 0x6b28
-#define LB_SYNC_RESET_SEL_MASK (3 << 0)
-#define LB_SYNC_RESET_SEL_SHIFT 0
-
-#define DC_STUTTER_CNTL 0x6b30
-#define DC_STUTTER_ENABLE_A (1 << 0)
-#define DC_STUTTER_ENABLE_B (1 << 1)
-
-#define SQ_CAC_THRESHOLD 0x8e4c
-#define VSP(x) ((x) << 0)
-#define VSP_MASK (0xff << 0)
-#define VSP_SHIFT 0
-#define VSP0(x) ((x) << 8)
-#define VSP0_MASK (0xff << 8)
-#define VSP0_SHIFT 8
-#define GPR(x) ((x) << 16)
-#define GPR_MASK (0xff << 16)
-#define GPR_SHIFT 16
-
-#define SQ_POWER_THROTTLE 0x8e58
-#define MIN_POWER(x) ((x) << 0)
-#define MIN_POWER_MASK (0x3fff << 0)
-#define MIN_POWER_SHIFT 0
-#define MAX_POWER(x) ((x) << 16)
-#define MAX_POWER_MASK (0x3fff << 16)
-#define MAX_POWER_SHIFT 0
-#define SQ_POWER_THROTTLE2 0x8e5c
-#define MAX_POWER_DELTA(x) ((x) << 0)
-#define MAX_POWER_DELTA_MASK (0x3fff << 0)
-#define MAX_POWER_DELTA_SHIFT 0
-#define STI_SIZE(x) ((x) << 16)
-#define STI_SIZE_MASK (0x3ff << 16)
-#define STI_SIZE_SHIFT 16
-#define LTI_RATIO(x) ((x) << 27)
-#define LTI_RATIO_MASK (0xf << 27)
-#define LTI_RATIO_SHIFT 27
-
-/* CG indirect registers */
-#define CG_CAC_REGION_1_WEIGHT_0 0x83
-#define WEIGHT_TCP_SIG0(x) ((x) << 0)
-#define WEIGHT_TCP_SIG0_MASK (0x3f << 0)
-#define WEIGHT_TCP_SIG0_SHIFT 0
-#define WEIGHT_TCP_SIG1(x) ((x) << 6)
-#define WEIGHT_TCP_SIG1_MASK (0x3f << 6)
-#define WEIGHT_TCP_SIG1_SHIFT 6
-#define WEIGHT_TA_SIG(x) ((x) << 12)
-#define WEIGHT_TA_SIG_MASK (0x3f << 12)
-#define WEIGHT_TA_SIG_SHIFT 12
-#define CG_CAC_REGION_1_WEIGHT_1 0x84
-#define WEIGHT_TCC_EN0(x) ((x) << 0)
-#define WEIGHT_TCC_EN0_MASK (0x3f << 0)
-#define WEIGHT_TCC_EN0_SHIFT 0
-#define WEIGHT_TCC_EN1(x) ((x) << 6)
-#define WEIGHT_TCC_EN1_MASK (0x3f << 6)
-#define WEIGHT_TCC_EN1_SHIFT 6
-#define WEIGHT_TCC_EN2(x) ((x) << 12)
-#define WEIGHT_TCC_EN2_MASK (0x3f << 12)
-#define WEIGHT_TCC_EN2_SHIFT 12
-#define WEIGHT_TCC_EN3(x) ((x) << 18)
-#define WEIGHT_TCC_EN3_MASK (0x3f << 18)
-#define WEIGHT_TCC_EN3_SHIFT 18
-#define CG_CAC_REGION_2_WEIGHT_0 0x85
-#define WEIGHT_CB_EN0(x) ((x) << 0)
-#define WEIGHT_CB_EN0_MASK (0x3f << 0)
-#define WEIGHT_CB_EN0_SHIFT 0
-#define WEIGHT_CB_EN1(x) ((x) << 6)
-#define WEIGHT_CB_EN1_MASK (0x3f << 6)
-#define WEIGHT_CB_EN1_SHIFT 6
-#define WEIGHT_CB_EN2(x) ((x) << 12)
-#define WEIGHT_CB_EN2_MASK (0x3f << 12)
-#define WEIGHT_CB_EN2_SHIFT 12
-#define WEIGHT_CB_EN3(x) ((x) << 18)
-#define WEIGHT_CB_EN3_MASK (0x3f << 18)
-#define WEIGHT_CB_EN3_SHIFT 18
-#define CG_CAC_REGION_2_WEIGHT_1 0x86
-#define WEIGHT_DB_SIG0(x) ((x) << 0)
-#define WEIGHT_DB_SIG0_MASK (0x3f << 0)
-#define WEIGHT_DB_SIG0_SHIFT 0
-#define WEIGHT_DB_SIG1(x) ((x) << 6)
-#define WEIGHT_DB_SIG1_MASK (0x3f << 6)
-#define WEIGHT_DB_SIG1_SHIFT 6
-#define WEIGHT_DB_SIG2(x) ((x) << 12)
-#define WEIGHT_DB_SIG2_MASK (0x3f << 12)
-#define WEIGHT_DB_SIG2_SHIFT 12
-#define WEIGHT_DB_SIG3(x) ((x) << 18)
-#define WEIGHT_DB_SIG3_MASK (0x3f << 18)
-#define WEIGHT_DB_SIG3_SHIFT 18
-#define CG_CAC_REGION_2_WEIGHT_2 0x87
-#define WEIGHT_SXM_SIG0(x) ((x) << 0)
-#define WEIGHT_SXM_SIG0_MASK (0x3f << 0)
-#define WEIGHT_SXM_SIG0_SHIFT 0
-#define WEIGHT_SXM_SIG1(x) ((x) << 6)
-#define WEIGHT_SXM_SIG1_MASK (0x3f << 6)
-#define WEIGHT_SXM_SIG1_SHIFT 6
-#define WEIGHT_SXM_SIG2(x) ((x) << 12)
-#define WEIGHT_SXM_SIG2_MASK (0x3f << 12)
-#define WEIGHT_SXM_SIG2_SHIFT 12
-#define WEIGHT_SXS_SIG0(x) ((x) << 18)
-#define WEIGHT_SXS_SIG0_MASK (0x3f << 18)
-#define WEIGHT_SXS_SIG0_SHIFT 18
-#define WEIGHT_SXS_SIG1(x) ((x) << 24)
-#define WEIGHT_SXS_SIG1_MASK (0x3f << 24)
-#define WEIGHT_SXS_SIG1_SHIFT 24
-#define CG_CAC_REGION_3_WEIGHT_0 0x88
-#define WEIGHT_XBR_0(x) ((x) << 0)
-#define WEIGHT_XBR_0_MASK (0x3f << 0)
-#define WEIGHT_XBR_0_SHIFT 0
-#define WEIGHT_XBR_1(x) ((x) << 6)
-#define WEIGHT_XBR_1_MASK (0x3f << 6)
-#define WEIGHT_XBR_1_SHIFT 6
-#define WEIGHT_XBR_2(x) ((x) << 12)
-#define WEIGHT_XBR_2_MASK (0x3f << 12)
-#define WEIGHT_XBR_2_SHIFT 12
-#define WEIGHT_SPI_SIG0(x) ((x) << 18)
-#define WEIGHT_SPI_SIG0_MASK (0x3f << 18)
-#define WEIGHT_SPI_SIG0_SHIFT 18
-#define CG_CAC_REGION_3_WEIGHT_1 0x89
-#define WEIGHT_SPI_SIG1(x) ((x) << 0)
-#define WEIGHT_SPI_SIG1_MASK (0x3f << 0)
-#define WEIGHT_SPI_SIG1_SHIFT 0
-#define WEIGHT_SPI_SIG2(x) ((x) << 6)
-#define WEIGHT_SPI_SIG2_MASK (0x3f << 6)
-#define WEIGHT_SPI_SIG2_SHIFT 6
-#define WEIGHT_SPI_SIG3(x) ((x) << 12)
-#define WEIGHT_SPI_SIG3_MASK (0x3f << 12)
-#define WEIGHT_SPI_SIG3_SHIFT 12
-#define WEIGHT_SPI_SIG4(x) ((x) << 18)
-#define WEIGHT_SPI_SIG4_MASK (0x3f << 18)
-#define WEIGHT_SPI_SIG4_SHIFT 18
-#define WEIGHT_SPI_SIG5(x) ((x) << 24)
-#define WEIGHT_SPI_SIG5_MASK (0x3f << 24)
-#define WEIGHT_SPI_SIG5_SHIFT 24
-#define CG_CAC_REGION_4_WEIGHT_0 0x8a
-#define WEIGHT_LDS_SIG0(x) ((x) << 0)
-#define WEIGHT_LDS_SIG0_MASK (0x3f << 0)
-#define WEIGHT_LDS_SIG0_SHIFT 0
-#define WEIGHT_LDS_SIG1(x) ((x) << 6)
-#define WEIGHT_LDS_SIG1_MASK (0x3f << 6)
-#define WEIGHT_LDS_SIG1_SHIFT 6
-#define WEIGHT_SC(x) ((x) << 24)
-#define WEIGHT_SC_MASK (0x3f << 24)
-#define WEIGHT_SC_SHIFT 24
-#define CG_CAC_REGION_4_WEIGHT_1 0x8b
-#define WEIGHT_BIF(x) ((x) << 0)
-#define WEIGHT_BIF_MASK (0x3f << 0)
-#define WEIGHT_BIF_SHIFT 0
-#define WEIGHT_CP(x) ((x) << 6)
-#define WEIGHT_CP_MASK (0x3f << 6)
-#define WEIGHT_CP_SHIFT 6
-#define WEIGHT_PA_SIG0(x) ((x) << 12)
-#define WEIGHT_PA_SIG0_MASK (0x3f << 12)
-#define WEIGHT_PA_SIG0_SHIFT 12
-#define WEIGHT_PA_SIG1(x) ((x) << 18)
-#define WEIGHT_PA_SIG1_MASK (0x3f << 18)
-#define WEIGHT_PA_SIG1_SHIFT 18
-#define WEIGHT_VGT_SIG0(x) ((x) << 24)
-#define WEIGHT_VGT_SIG0_MASK (0x3f << 24)
-#define WEIGHT_VGT_SIG0_SHIFT 24
-#define CG_CAC_REGION_4_WEIGHT_2 0x8c
-#define WEIGHT_VGT_SIG1(x) ((x) << 0)
-#define WEIGHT_VGT_SIG1_MASK (0x3f << 0)
-#define WEIGHT_VGT_SIG1_SHIFT 0
-#define WEIGHT_VGT_SIG2(x) ((x) << 6)
-#define WEIGHT_VGT_SIG2_MASK (0x3f << 6)
-#define WEIGHT_VGT_SIG2_SHIFT 6
-#define WEIGHT_DC_SIG0(x) ((x) << 12)
-#define WEIGHT_DC_SIG0_MASK (0x3f << 12)
-#define WEIGHT_DC_SIG0_SHIFT 12
-#define WEIGHT_DC_SIG1(x) ((x) << 18)
-#define WEIGHT_DC_SIG1_MASK (0x3f << 18)
-#define WEIGHT_DC_SIG1_SHIFT 18
-#define WEIGHT_DC_SIG2(x) ((x) << 24)
-#define WEIGHT_DC_SIG2_MASK (0x3f << 24)
-#define WEIGHT_DC_SIG2_SHIFT 24
-#define CG_CAC_REGION_4_WEIGHT_3 0x8d
-#define WEIGHT_DC_SIG3(x) ((x) << 0)
-#define WEIGHT_DC_SIG3_MASK (0x3f << 0)
-#define WEIGHT_DC_SIG3_SHIFT 0
-#define WEIGHT_UVD_SIG0(x) ((x) << 6)
-#define WEIGHT_UVD_SIG0_MASK (0x3f << 6)
-#define WEIGHT_UVD_SIG0_SHIFT 6
-#define WEIGHT_UVD_SIG1(x) ((x) << 12)
-#define WEIGHT_UVD_SIG1_MASK (0x3f << 12)
-#define WEIGHT_UVD_SIG1_SHIFT 12
-#define WEIGHT_SPARE0(x) ((x) << 18)
-#define WEIGHT_SPARE0_MASK (0x3f << 18)
-#define WEIGHT_SPARE0_SHIFT 18
-#define WEIGHT_SPARE1(x) ((x) << 24)
-#define WEIGHT_SPARE1_MASK (0x3f << 24)
-#define WEIGHT_SPARE1_SHIFT 24
-#define CG_CAC_REGION_5_WEIGHT_0 0x8e
-#define WEIGHT_SQ_VSP(x) ((x) << 0)
-#define WEIGHT_SQ_VSP_MASK (0x3fff << 0)
-#define WEIGHT_SQ_VSP_SHIFT 0
-#define WEIGHT_SQ_VSP0(x) ((x) << 14)
-#define WEIGHT_SQ_VSP0_MASK (0x3fff << 14)
-#define WEIGHT_SQ_VSP0_SHIFT 14
-#define CG_CAC_REGION_4_OVERRIDE_4 0xab
-#define OVR_MODE_SPARE_0(x) ((x) << 16)
-#define OVR_MODE_SPARE_0_MASK (0x1 << 16)
-#define OVR_MODE_SPARE_0_SHIFT 16
-#define OVR_VAL_SPARE_0(x) ((x) << 17)
-#define OVR_VAL_SPARE_0_MASK (0x1 << 17)
-#define OVR_VAL_SPARE_0_SHIFT 17
-#define OVR_MODE_SPARE_1(x) ((x) << 18)
-#define OVR_MODE_SPARE_1_MASK (0x3f << 18)
-#define OVR_MODE_SPARE_1_SHIFT 18
-#define OVR_VAL_SPARE_1(x) ((x) << 19)
-#define OVR_VAL_SPARE_1_MASK (0x3f << 19)
-#define OVR_VAL_SPARE_1_SHIFT 19
-#define CG_CAC_REGION_5_WEIGHT_1 0xb7
-#define WEIGHT_SQ_GPR(x) ((x) << 0)
-#define WEIGHT_SQ_GPR_MASK (0x3fff << 0)
-#define WEIGHT_SQ_GPR_SHIFT 0
-#define WEIGHT_SQ_LDS(x) ((x) << 14)
-#define WEIGHT_SQ_LDS_MASK (0x3fff << 14)
-#define WEIGHT_SQ_LDS_SHIFT 14
-
-/* PCIE link stuff */
-#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
-#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
-# define LC_LINK_WIDTH_SHIFT 0
-# define LC_LINK_WIDTH_MASK 0x7
-# define LC_LINK_WIDTH_X0 0
-# define LC_LINK_WIDTH_X1 1
-# define LC_LINK_WIDTH_X2 2
-# define LC_LINK_WIDTH_X4 3
-# define LC_LINK_WIDTH_X8 4
-# define LC_LINK_WIDTH_X16 6
-# define LC_LINK_WIDTH_RD_SHIFT 4
-# define LC_LINK_WIDTH_RD_MASK 0x70
-# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
-# define LC_RECONFIG_NOW (1 << 8)
-# define LC_RENEGOTIATION_SUPPORT (1 << 9)
-# define LC_RENEGOTIATE_EN (1 << 10)
-# define LC_SHORT_RECONFIG_EN (1 << 11)
-# define LC_UPCONFIGURE_SUPPORT (1 << 12)
-# define LC_UPCONFIGURE_DIS (1 << 13)
-#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
-# define LC_GEN2_EN_STRAP (1 << 0)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 1)
-# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 5)
-# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 6)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
-# define LC_CURRENT_DATA_RATE (1 << 11)
-# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
-# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
-# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
-# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
-# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
-# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
-# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 24)
-#define MM_CFGREGS_CNTL 0x544c
-# define MM_WR_TO_CFG_EN (1 << 3)
-#define LINK_CNTL2 0x88 /* F0 */
-# define TARGET_LINK_SPEED_MASK (0xf << 0)
-# define SELECTABLE_DEEMPHASIS (1 << 6)
-
-/*
- * UVD
- */
-#define UVD_SEMA_ADDR_LOW 0xEF00
-#define UVD_SEMA_ADDR_HIGH 0xEF04
-#define UVD_SEMA_CMD 0xEF08
-#define UVD_UDEC_ADDR_CONFIG 0xEF4C
-#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
-#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
-#define UVD_RBC_RB_RPTR 0xF690
-#define UVD_RBC_RB_WPTR 0xF694
-#define UVD_STATUS 0xf6bc
-
/*
* PM4
*/
-#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -1153,7 +543,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
@@ -1191,23 +581,6 @@
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
-#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
- /* 0 - always
- * 1 - <
- * 2 - <=
- * 3 - ==
- * 4 - !=
- * 5 - >=
- * 6 - >
- */
-#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
- /* 0 - reg
- * 1 - mem
- */
-#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
- /* 0 - me
- * 1 - pfp
- */
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
@@ -1342,18 +715,6 @@
(((vmid) & 0xF) << 20) | \
(((n) & 0xFFFFF) << 0))
-#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
- (1 << 26) | \
- (1 << 21) | \
- (((n) & 0xFFFFF) << 0))
-
-#define DMA_SRBM_POLL_PACKET ((9 << 28) | \
- (1 << 27) | \
- (1 << 26))
-
-#define DMA_SRBM_READ_PACKET ((9 << 28) | \
- (1 << 27))
-
/* async DMA Packet types */
#define DMA_PACKET_WRITE 0x2
#define DMA_PACKET_COPY 0x3
diff --git a/sys/dev/pci/drm/radeon/r100.c b/sys/dev/pci/drm/radeon/r100.c
index 9fcac11ad7c..9723320499d 100644
--- a/sys/dev/pci/drm/radeon/r100.c
+++ b/sys/dev/pci/drm/radeon/r100.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: r100.c,v 1.18 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -40,13 +41,13 @@
#include "rn50_reg_safe.h"
/* Firmware Names */
-#define FIRMWARE_R100 "radeon/R100_cp.bin"
-#define FIRMWARE_R200 "radeon/R200_cp.bin"
-#define FIRMWARE_R300 "radeon/R300_cp.bin"
-#define FIRMWARE_R420 "radeon/R420_cp.bin"
-#define FIRMWARE_RS690 "radeon/RS690_cp.bin"
-#define FIRMWARE_RS600 "radeon/RS600_cp.bin"
-#define FIRMWARE_R520 "radeon/R520_cp.bin"
+#define FIRMWARE_R100 "radeon-r100_cp"
+#define FIRMWARE_R200 "radeon-r200_cp"
+#define FIRMWARE_R300 "radeon-r300_cp"
+#define FIRMWARE_R420 "radeon-r420_cp"
+#define FIRMWARE_RS690 "radeon-rs690_cp"
+#define FIRMWARE_RS600 "radeon-rs600_cp"
+#define FIRMWARE_R520 "radeon-r520_cp"
MODULE_FIRMWARE(FIRMWARE_R100);
MODULE_FIRMWARE(FIRMWARE_R200);
@@ -137,6 +138,36 @@ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
}
/**
+ * r100_pre_page_flip - pre-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to prepare for pageflip on
+ *
+ * Pre-pageflip callback (r1xx-r4xx).
+ * Enables the pageflip irq (vblank irq).
+ */
+void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+/**
+ * r100_post_page_flip - pos-pageflip callback.
+ *
+ * @rdev: radeon_device pointer
+ * @crtc: crtc to cleanup pageflip on
+ *
+ * Post-pageflip callback (r1xx-r4xx).
+ * Disables the pageflip irq (vblank irq).
+ */
+void r100_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+/**
* r100_page_flip - pageflip callback.
*
* @rdev: radeon_device pointer
@@ -147,8 +178,9 @@ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
* During vblank we take the crtc lock and wait for the update_pending
* bit to go high, when it does, we release the lock, and allow the
* double buffered update to take place.
+ * Returns the current update pending status.
*/
-void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
@@ -170,24 +202,8 @@ void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
-}
-
-/**
- * r100_page_flip_pending - check if page flip is still pending
- *
- * @rdev: radeon_device pointer
- * @crtc_id: crtc to check
- *
- * Check if the last pagefilp is still pending (r1xx-r4xx).
- * Returns the current update pending status.
- */
-bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id)
-{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
-
/* Return current update_pending status: */
- return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) &
- RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET);
+ return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
}
/**
@@ -639,7 +655,6 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
- rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev);
}
@@ -648,6 +663,7 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
+ radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp);
@@ -677,16 +693,15 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
WREG32(RADEON_AIC_HI_ADDR, 0);
}
-uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
-{
- return addr;
-}
-
-void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t entry)
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
u32 *gtt = rdev->gart.ptr;
- gtt[i] = cpu_to_le32(lower_32_bits(entry));
+
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+ gtt[i] = cpu_to_le32((u32)addr);
+ return 0;
}
void r100_pci_gart_fini(struct radeon_device *rdev)
@@ -723,10 +738,6 @@ int r100_irq_set(struct radeon_device *rdev)
tmp |= RADEON_FP2_DETECT_MASK;
}
WREG32(RADEON_GEN_INT_CNTL, tmp);
-
- /* read back to post the write */
- RREG32(RADEON_GEN_INT_CNTL);
-
return 0;
}
@@ -779,7 +790,7 @@ int r100_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
+ radeon_crtc_handle_flip(rdev, 0);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[1]) {
@@ -788,7 +799,7 @@ int r100_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
+ radeon_crtc_handle_flip(rdev, 1);
}
if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true;
@@ -801,7 +812,7 @@ int r100_irq_process(struct radeon_device *rdev)
status = r100_irq_ack(rdev);
}
if (queue_hotplug)
- schedule_delayed_work(&rdev->hotplug_work, 0);
+ task_add(systq, &rdev->hotplug_task);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS400:
@@ -826,20 +837,6 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
return RREG32(RADEON_CRTC2_CRNT_FRAME);
}
-/**
- * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
- * rdev: radeon device structure
- * ring: ring buffer struct for emitting packets
- */
-static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
- RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
-}
-
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
void r100_fence_ring_emit(struct radeon_device *rdev,
@@ -856,7 +853,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
/* Wait until IDLE & CLEAN */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
- r100_ring_hdp_flush(rdev, ring);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(ring, fence->seq);
@@ -864,24 +865,22 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
-bool r100_semaphore_ring_emit(struct radeon_device *rdev,
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
/* Unused on older asics, since we don't have semaphores or multiple rings */
BUG();
- return false;
}
-struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv)
+int r100_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- struct radeon_fence *fence;
uint32_t cur_pages;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch;
@@ -902,7 +901,7 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ring, ndw);
if (r) {
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
while (num_gpu_pages > 0) {
cur_pages = num_gpu_pages;
@@ -942,13 +941,11 @@ struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE);
- r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return ERR_PTR(r);
+ if (fence) {
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
- radeon_ring_unlock_commit(rdev, ring, false);
- return fence;
+ radeon_ring_unlock_commit(rdev, ring);
+ return r;
}
static int r100_cp_wait_for_idle(struct radeon_device *rdev)
@@ -980,7 +977,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
}
@@ -988,7 +985,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
static int r100_cp_init_microcode(struct radeon_device *rdev)
{
const char *fw_name = NULL;
- int err;
+ int err = 0;
DRM_DEBUG_KMS("\n");
@@ -1033,49 +1030,22 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
fw_name = FIRMWARE_R520;
}
- err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+ err = loadfirmware(fw_name, &rdev->me_fw, &rdev->me_fw_size);
if (err) {
- printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
+ DRM_ERROR("radeon_cp: Failed to load firmware \"%s\"\n",
fw_name);
- } else if (rdev->me_fw->size % 8) {
- printk(KERN_ERR
+ err = -err;
+ rdev->me_fw = NULL;
+ } else if (rdev->me_fw_size % 8) {
+ DRM_ERROR(
"radeon_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
+ rdev->me_fw_size, fw_name);
err = -EINVAL;
- release_firmware(rdev->me_fw);
+ free(rdev->me_fw, M_DEVBUF, 0);
rdev->me_fw = NULL;
}
- return err;
-}
-
-u32 r100_gfx_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- u32 rptr;
- if (rdev->wb.enabled)
- rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
- else
- rptr = RREG32(RADEON_CP_RB_RPTR);
-
- return rptr;
-}
-
-u32 r100_gfx_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- u32 wptr;
-
- wptr = RREG32(RADEON_CP_RB_WPTR);
-
- return wptr;
-}
-
-void r100_gfx_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- WREG32(RADEON_CP_RB_WPTR, ring->wptr);
- (void)RREG32(RADEON_CP_RB_WPTR);
+ return err;
}
static void r100_cp_load_microcode(struct radeon_device *rdev)
@@ -1089,8 +1059,8 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
}
if (rdev->me_fw) {
- size = rdev->me_fw->size / 4;
- fw_data = (const __be32 *)&rdev->me_fw->data[0];
+ size = rdev->me_fw_size / 4;
+ fw_data = (const __be32 *)&rdev->me_fw[0];
WREG32(RADEON_CP_ME_RAM_ADDR, 0);
for (i = 0; i < size; i += 2) {
WREG32(RADEON_CP_ME_RAM_DATAH,
@@ -1126,11 +1096,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
}
/* Align ring size */
- rb_bufsz = order_base_2(ring_size / 8);
+ rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
- RADEON_CP_PACKET2);
+ RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
+ 0, 0x7fffff, RADEON_CP_PACKET2);
if (r) {
return r;
}
@@ -1191,6 +1162,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
+ ring->rptr = RREG32(RADEON_CP_RB_RPTR);
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1200,7 +1172,9 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
/* at this point everything should be setup correctly to enable master */
+#ifdef notyet
pci_set_master(rdev->pdev);
+#endif
radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
@@ -1259,28 +1233,28 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
int r;
u32 tile_flags = 0;
u32 tmp;
- struct radeon_bo_list *reloc;
+ struct radeon_cs_reloc *reloc;
u32 value;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
value = radeon_get_ib_value(p, idx);
tmp = value & 0x003fffff;
- tmp += (((u32)reloc->gpu_offset) >> 10);
+ tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_DST_TILE_MACRO;
- if (reloc->tiling_flags & RADEON_TILING_MICRO) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n");
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return -EINVAL;
}
tile_flags |= RADEON_DST_TILE_MICRO;
@@ -1298,7 +1272,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
int idx)
{
unsigned c, i;
- struct radeon_bo_list *reloc;
+ struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
int r = 0;
volatile uint32_t *ib;
@@ -1310,46 +1284,46 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
if (c > 16) {
DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
pkt->opcode);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return -EINVAL;
}
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize &= 0x7F;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
+ ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = idx_value >> 24;
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].esize &= 0x7F;
@@ -1402,6 +1376,67 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
return 0;
}
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ volatile uint32_t *ib;
+ unsigned i;
+ unsigned idx;
+
+ ib = p->ib.ptr;
+ idx = pkt->idx;
+ for (i = 0; i <= (pkt->count + 1); i++, idx++) {
+ DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+ }
+}
+
+/**
+ * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet informations
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = CP_PACKET_GET_TYPE(header);
+ pkt->count = CP_PACKET_GET_COUNT(header);
+ switch (pkt->type) {
+ case PACKET_TYPE0:
+ pkt->reg = CP_PACKET0_GET_REG(header);
+ pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
+ break;
+ case PACKET_TYPE3:
+ pkt->opcode = CP_PACKET3_GET_OPCODE(header);
+ break;
+ case PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* r100_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
@@ -1418,6 +1453,7 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
*/
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
+ struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_cs_packet p3reloc, waitreloc;
@@ -1429,7 +1465,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
ib = p->ib.ptr;
/* parse the wait until */
- r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
+ r = r100_cs_packet_parse(p, &waitreloc, p->idx);
if (r)
return r;
@@ -1446,7 +1482,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
}
/* jump over the NOP */
- r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
+ r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
if (r)
return r;
@@ -1456,12 +1492,13 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
- reg = R100_CP_PACKET0_GET_REG(header);
- crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
- if (!crtc) {
+ reg = CP_PACKET0_GET_REG(header);
+ obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- return -ENOENT;
+ return -EINVAL;
}
+ crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
crtc_id = radeon_crtc->crtc_id;
@@ -1490,6 +1527,54 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
return 0;
}
+/**
+ * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = r100_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ r100_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ r100_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
+
static int r100_get_vtx_size(uint32_t vtx_fmt)
{
int vtx_size;
@@ -1547,7 +1632,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_bo_list *reloc;
+ struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
@@ -1567,7 +1652,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -1580,53 +1665,53 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
track->cb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_TXOFFSET_0:
case RADEON_PP_TXOFFSET_1:
case RADEON_PP_TXOFFSET_2:
i = (reg - RADEON_PP_TXOFFSET_0) / 24;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_TXO_MACRO_TILE;
- if (reloc->tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_TXO_MICRO_TILE_X2;
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
- ib[idx] = tmp + ((u32)reloc->gpu_offset);
+ ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
} else
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -1636,15 +1721,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T0_3:
case RADEON_PP_CUBIC_OFFSET_T0_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
track->textures[0].cube_info[i].offset = idx_value;
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[0].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -1654,15 +1739,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T1_3:
case RADEON_PP_CUBIC_OFFSET_T1_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
track->textures[1].cube_info[i].offset = idx_value;
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[1].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -1672,15 +1757,15 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_CUBIC_OFFSET_T2_3:
case RADEON_PP_CUBIC_OFFSET_T2_4:
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
track->textures[2].cube_info[i].offset = idx_value;
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[2].cube_info[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -1690,17 +1775,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
- if (reloc->tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
@@ -1761,14 +1846,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_CNTL:
{
@@ -1906,7 +1991,7 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
static int r100_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_bo_list *reloc;
+ struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
unsigned idx;
volatile uint32_t *ib;
@@ -1922,13 +2007,13 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
return r;
break;
case PACKET3_INDX_BUFFER:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
+ ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) {
return r;
@@ -1936,13 +2021,13 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break;
case 0x23:
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
+ ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
track->num_arrays = 1;
track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
@@ -2036,37 +2121,38 @@ int r100_cs_parse(struct radeon_cs_parser *p)
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
- r = radeon_cs_packet_parse(p, &pkt, p->idx);
+ r = r100_cs_packet_parse(p, &pkt, p->idx);
if (r) {
return r;
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case RADEON_PACKET_TYPE0:
- if (p->rdev->family >= CHIP_R200)
- r = r100_cs_parse_packet0(p, &pkt,
- p->rdev->config.r100.reg_safe_bm,
- p->rdev->config.r100.reg_safe_bm_size,
- &r200_packet0_check);
- else
- r = r100_cs_parse_packet0(p, &pkt,
- p->rdev->config.r100.reg_safe_bm,
- p->rdev->config.r100.reg_safe_bm_size,
- &r100_packet0_check);
- break;
- case RADEON_PACKET_TYPE2:
- break;
- case RADEON_PACKET_TYPE3:
- r = r100_packet3_check(p, &pkt);
- break;
- default:
- DRM_ERROR("Unknown packet type %d !\n",
- pkt.type);
- return -EINVAL;
+ case PACKET_TYPE0:
+ if (p->rdev->family >= CHIP_R200)
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r200_packet0_check);
+ else
+ r = r100_cs_parse_packet0(p, &pkt,
+ p->rdev->config.r100.reg_safe_bm,
+ p->rdev->config.r100.reg_safe_bm_size,
+ &r100_packet0_check);
+ break;
+ case PACKET_TYPE2:
+ break;
+ case PACKET_TYPE3:
+ r = r100_packet3_check(p, &pkt);
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d !\n",
+ pkt.type);
+ return -EINVAL;
}
- if (r)
+ if (r) {
return r;
- } while (p->idx < p->chunk_ib->length_dw);
+ }
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
return 0;
}
@@ -2518,9 +2604,11 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- radeon_ring_lockup_update(rdev, ring);
+ radeon_ring_lockup_update(ring);
return false;
}
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
@@ -2546,7 +2634,9 @@ void r100_bm_disable(struct radeon_device *rdev)
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
tmp = RREG32(RADEON_BUS_CNTL);
mdelay(1);
+#ifdef notyet
pci_clear_master(rdev->pdev);
+#endif
mdelay(1);
}
@@ -2571,7 +2661,9 @@ int r100_asic_reset(struct radeon_device *rdev)
WREG32(RADEON_CP_RB_WPTR, 0);
WREG32(RADEON_CP_RB_CNTL, tmp);
/* save PCI state */
+#ifdef notyet
pci_save_state(rdev->pdev);
+#endif
/* disable bus mastering */
r100_bm_disable(rdev);
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
@@ -2593,7 +2685,9 @@ int r100_asic_reset(struct radeon_device *rdev)
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
+#ifdef notyet
pci_restore_state(rdev->pdev);
+#endif
r100_enable_bm(rdev);
/* Check if GPU is idle */
if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
@@ -2731,7 +2825,7 @@ static void r100_vram_get_type(struct radeon_device *rdev)
static u32 r100_get_accessible_vram(struct radeon_device *rdev)
{
u32 aper_size;
- u8 byte;
+ pcireg_t reg;
aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
@@ -2750,8 +2844,8 @@ static u32 r100_get_accessible_vram(struct radeon_device *rdev)
* check if it's a multifunction card by reading the PCI config
* header type... Limit those to one aperture size
*/
- pci_read_config_byte(rdev->pdev, 0xe, &byte);
- if (byte & 0x80) {
+ reg = pci_conf_read(rdev->pc, rdev->pa_tag, PCI_BHLC_REG);
+ if (PCI_HDRTYPE_MULTIFN(reg)) {
DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
DRM_INFO("Limiting VRAM to one aperture\n");
return aper_size;
@@ -2876,28 +2970,21 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
{
- unsigned long flags;
uint32_t data;
- spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
r100_pll_errata_after_index(rdev);
data = RREG32(RADEON_CLOCK_CNTL_DATA);
r100_pll_errata_after_data(rdev);
- spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
return data;
}
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->pll_idx_lock, flags);
WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
r100_pll_errata_after_index(rdev);
WREG32(RADEON_CLOCK_CNTL_DATA, v);
r100_pll_errata_after_data(rdev);
- spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
}
static void r100_set_safe_registers(struct radeon_device *rdev)
@@ -3212,12 +3299,6 @@ void r100_bandwidth_update(struct radeon_device *rdev)
uint32_t pixel_bytes1 = 0;
uint32_t pixel_bytes2 = 0;
- /* Guess line buffer size to be 8192 pixels */
- u32 lb_size = 8192;
-
- if (!rdev->mode_info.mode_config_initialized)
- return;
-
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled) {
@@ -3629,13 +3710,6 @@ void r100_bandwidth_update(struct radeon_device *rdev)
DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
}
-
- /* Save number of lines the linebuffer leads before the scanout */
- if (mode1)
- rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
-
- if (mode2)
- rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
}
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
@@ -3659,7 +3733,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
}
radeon_ring_write(ring, PACKET0(scratch, 0));
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
@@ -3721,7 +3795,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[6] = PACKET2(0);
ib.ptr[7] = PACKET2(0);
ib.length_dw = 8;
- r = radeon_ib_schedule(rdev, &ib, NULL, false);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
@@ -3837,7 +3911,7 @@ static void r100_mc_program(struct radeon_device *rdev)
WREG32(R_00014C_MC_AGP_LOCATION,
S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
- WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
+ WREG32(R_000170_AGP_BASE, (u32)(rdev->mc.agp_base));
if (rdev->family > CHIP_RV200)
WREG32(R_00015C_AGP_BASE_2,
upper_32_bits(rdev->mc.agp_base) & 0xff);
@@ -3958,7 +4032,6 @@ int r100_resume(struct radeon_device *rdev)
int r100_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -3969,7 +4042,6 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -4076,9 +4148,6 @@ int r100_init(struct radeon_device *rdev)
}
r100_set_safe_registers(rdev);
- /* Initialize power management */
- radeon_pm_init(rdev);
-
rdev->accel_working = true;
r = r100_startup(rdev);
if (r) {
@@ -4095,30 +4164,42 @@ int r100_init(struct radeon_device *rdev)
return 0;
}
-uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg)
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect)
{
- unsigned long flags;
- uint32_t ret;
-
- spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
- bus_space_write_4(rdev->memt, rdev->rmmio_bsh,
- RADEON_MM_INDEX, reg);
- ret = bus_space_read_4(rdev->memt, rdev->rmmio_bsh,
- RADEON_MM_DATA);
- spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
- return ret;
+ if (reg < rdev->rmmio_size && !always_indirect)
+ return bus_space_read_4(rdev->memt, rdev->rmmio, reg);
+
+ else {
+ unsigned long flags;
+ uint32_t ret;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ bus_space_write_4(rdev->memt, rdev->rmmio,
+ RADEON_MM_INDEX, reg);
+ ret = bus_space_read_4(rdev->memt, rdev->rmmio,
+ RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ return ret;
+ }
}
-void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect)
{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
- bus_space_write_4(rdev->memt, rdev->rmmio_bsh,
- RADEON_MM_INDEX, reg);
- bus_space_write_4(rdev->memt, rdev->rmmio_bsh,
- RADEON_MM_DATA, v);
- spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+ if (reg < rdev->rmmio_size && !always_indirect)
+ bus_space_write_4(rdev->memt, rdev->rmmio, reg, v);
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ bus_space_write_4(rdev->memt, rdev->rmmio,
+ RADEON_MM_INDEX, reg);
+ bus_space_write_4(rdev->memt, rdev->rmmio,
+ RADEON_MM_DATA, v);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+ }
}
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
diff --git a/sys/dev/pci/drm/radeon/r100_track.h b/sys/dev/pci/drm/radeon/r100_track.h
index eb40888bdfc..5d0ae292c6a 100644
--- a/sys/dev/pci/drm/radeon/r100_track.h
+++ b/sys/dev/pci/drm/radeon/r100_track.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: r100_track.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
#define R100_TRACK_MAX_TEXTURE 3
#define R200_TRACK_MAX_TEXTURE 6
@@ -81,6 +82,10 @@ struct r100_cs_track {
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
+int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt);
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
diff --git a/sys/dev/pci/drm/radeon/r100d.h b/sys/dev/pci/drm/radeon/r100d.h
index f0f8ee69f48..31a783f1e75 100644
--- a/sys/dev/pci/drm/radeon/r100d.h
+++ b/sys/dev/pci/drm/radeon/r100d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: r100d.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -64,6 +65,17 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/sys/dev/pci/drm/radeon/r200.c b/sys/dev/pci/drm/radeon/r200.c
index 8267fd0f4be..ea04034724b 100644
--- a/sys/dev/pci/drm/radeon/r200.c
+++ b/sys/dev/pci/drm/radeon/r200.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: r200.c,v 1.5 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -80,14 +81,13 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
return vtx_size;
}
-struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv)
+int r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- struct radeon_fence *fence;
uint32_t size;
uint32_t cur_size;
int i, num_loops;
@@ -99,7 +99,7 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- return ERR_PTR(r);
+ return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
@@ -119,13 +119,11 @@ struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
}
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
- r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return ERR_PTR(r);
+ if (fence) {
+ r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
- radeon_ring_unlock_commit(rdev, ring, false);
- return fence;
+ radeon_ring_unlock_commit(rdev, ring);
+ return r;
}
@@ -146,7 +144,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_bo_list *reloc;
+ struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
@@ -165,7 +163,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -178,30 +176,30 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
break;
case RADEON_RB3D_DEPTHOFFSET:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = idx_value;
track->cb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R200_PP_TXOFFSET_0:
case R200_PP_TXOFFSET_1:
@@ -210,24 +208,24 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXOFFSET_4:
case R200_PP_TXOFFSET_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R200_TXO_MACRO_TILE;
- if (reloc->tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R200_TXO_MICRO_TILE;
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
- ib[idx] = tmp + ((u32)reloc->gpu_offset);
+ ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
} else
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -263,15 +261,15 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_CUBIC_OFFSET_F5_5:
i = (reg - R200_PP_TXOFFSET_0) / 24;
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
track->textures[i].cube_info[face - 1].offset = idx_value;
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj;
track->tex_dirty = true;
break;
@@ -281,18 +279,18 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_COLORPITCH:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
- if (reloc->tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
@@ -358,14 +356,14 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->zb_dirty = true;
break;
case RADEON_RB3D_ZPASS_ADDR:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_CNTL:
{
diff --git a/sys/dev/pci/drm/radeon/r300.c b/sys/dev/pci/drm/radeon/r300.c
index 4ebb653cf65..b34f87e7141 100644
--- a/sys/dev/pci/drm/radeon/r300.c
+++ b/sys/dev/pci/drm/radeon/r300.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: r300.c,v 1.9 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -48,31 +49,6 @@
*/
/*
- * Indirect registers accessor
- */
-uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
-{
- unsigned long flags;
- uint32_t r;
-
- spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
- WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
- r = RREG32(RADEON_PCIE_DATA);
- spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
- return r;
-}
-
-void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->pcie_idx_lock, flags);
- WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
- WREG32(RADEON_PCIE_DATA, (v));
- spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags);
-}
-
-/*
* rv370,rv380 PCIE GART
*/
static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
@@ -92,32 +68,25 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
mb();
}
-#define R300_PTE_UNSNOOPED (1 << 0)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
-uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
+int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
- addr = (lower_32_bits(addr) >> 8) |
- ((upper_32_bits(addr) & 0xff) << 24);
- if (flags & RADEON_GART_PAGE_READ)
- addr |= R300_PTE_READABLE;
- if (flags & RADEON_GART_PAGE_WRITE)
- addr |= R300_PTE_WRITEABLE;
- if (!(flags & RADEON_GART_PAGE_SNOOP))
- addr |= R300_PTE_UNSNOOPED;
- return addr;
-}
-
-void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t entry)
-{
- void __iomem *ptr = rdev->gart.ptr;
+ volatile uint32_t *ptr = rdev->gart.ptr;
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+ addr = (lower_32_bits(addr) >> 8) |
+ ((upper_32_bits(addr) & 0xff) << 24) |
+ R300_PTE_WRITEABLE | R300_PTE_READABLE;
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
- writel(entry, ((void __iomem *)ptr) + (i * 4));
+ ptr += i;
+ *ptr = (uint32_t)addr;
+ return 0;
}
int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -137,7 +106,6 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev);
}
@@ -155,6 +123,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
@@ -324,7 +293,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring,
R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
}
static void r300_errata(struct radeon_device *rdev)
@@ -429,7 +398,9 @@ int r300_asic_reset(struct radeon_device *rdev)
WREG32(RADEON_CP_RB_WPTR, 0);
WREG32(RADEON_CP_RB_CNTL, tmp);
/* save PCI state */
+#ifdef notyet
pci_save_state(rdev->pdev);
+#endif
/* disable bus mastering */
r100_bm_disable(rdev);
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
@@ -453,7 +424,9 @@ int r300_asic_reset(struct radeon_device *rdev)
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
+#ifdef notyet
pci_restore_state(rdev->pdev);
+#endif
r100_enable_bm(rdev);
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
@@ -627,7 +600,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_bo_list *reloc;
+ struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp, tile_flags = 0;
@@ -646,7 +619,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
break;
@@ -661,30 +634,30 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_RB3D_COLOROFFSET2:
case R300_RB3D_COLOROFFSET3:
i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
track->cb[i].robj = reloc->robj;
track->cb[i].offset = idx_value;
track->cb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_ZB_DEPTHOFFSET:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
track->zb.robj = reloc->robj;
track->zb.offset = idx_value;
track->zb_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_TX_OFFSET_0:
case R300_TX_OFFSET_0+4:
@@ -703,26 +676,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_OFFSET_0+56:
case R300_TX_OFFSET_0+60:
i = (reg - R300_TX_OFFSET_0) >> 2;
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
- ((idx_value & ~31) + (u32)reloc->gpu_offset);
+ ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
} else {
- if (reloc->tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_TXO_MACRO_TILE;
- if (reloc->tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_TXO_MICRO_TILE;
- else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
- tmp = idx_value + ((u32)reloc->gpu_offset);
+ tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
tmp |= tile_flags;
ib[idx] = tmp;
}
@@ -776,19 +749,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
- if (reloc->tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_COLOR_TILE_ENABLE;
- if (reloc->tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
- else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
@@ -861,19 +834,19 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x4F24:
/* ZB_DEPTHPITCH */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
- if (reloc->tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_DEPTHMACROTILE_ENABLE;
- if (reloc->tiling_flags & RADEON_TILING_MICRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_DEPTHMICROTILE_TILED;
- else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
tmp = idx_value & ~(0x7 << 16);
@@ -1076,14 +1049,14 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->tex_dirty = true;
break;
case R300_ZB_ZPASS_ADDR:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case 0x4e0c:
/* RB3D_COLOR_CHANNEL_MASK */
@@ -1118,17 +1091,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->cb_dirty = true;
break;
case R300_RB3D_AARESOLVE_OFFSET:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
track->aa.robj = reloc->robj;
track->aa.offset = idx_value;
track->aa_dirty = true;
- ib[idx] = idx_value + ((u32)reloc->gpu_offset);
+ ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_RB3D_AARESOLVE_PITCH:
track->aa.pitch = idx_value & 0x3FFE;
@@ -1171,7 +1144,7 @@ fail:
static int r300_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_bo_list *reloc;
+ struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
unsigned idx;
@@ -1187,13 +1160,13 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
return r;
break;
case PACKET3_INDX_BUFFER:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
- radeon_cs_dump_packet(p, pkt);
+ r100_cs_dump_packet(p, pkt);
return r;
}
- ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
+ ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) {
return r;
@@ -1288,21 +1261,21 @@ int r300_cs_parse(struct radeon_cs_parser *p)
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
- r = radeon_cs_packet_parse(p, &pkt, p->idx);
+ r = r100_cs_packet_parse(p, &pkt, p->idx);
if (r) {
return r;
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case RADEON_PACKET_TYPE0:
+ case PACKET_TYPE0:
r = r100_cs_parse_packet0(p, &pkt,
p->rdev->config.r300.reg_safe_bm,
p->rdev->config.r300.reg_safe_bm_size,
&r300_packet0_check);
break;
- case RADEON_PACKET_TYPE2:
+ case PACKET_TYPE2:
break;
- case RADEON_PACKET_TYPE3:
+ case PACKET_TYPE3:
r = r300_packet3_check(p, &pkt);
break;
default:
@@ -1312,7 +1285,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
if (r) {
return r;
}
- } while (p->idx < p->chunk_ib->length_dw);
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
return 0;
}
@@ -1471,7 +1444,6 @@ int r300_resume(struct radeon_device *rdev)
int r300_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -1484,7 +1456,6 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -1571,13 +1542,10 @@ int r300_init(struct radeon_device *rdev)
}
r300_set_reg_safe(rdev);
- /* Initialize power management */
- radeon_pm_init(rdev);
-
rdev->accel_working = true;
r = r300_startup(rdev);
if (r) {
- /* Something went wrong with the accel init, so stop accel */
+ /* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
diff --git a/sys/dev/pci/drm/radeon/r300_reg.h b/sys/dev/pci/drm/radeon/r300_reg.h
index 00c0d2ba22d..9e6bdf9384c 100644
--- a/sys/dev/pci/drm/radeon/r300_reg.h
+++ b/sys/dev/pci/drm/radeon/r300_reg.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: r300_reg.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2005 Nicolai Haehnle et al.
* Copyright 2008 Advanced Micro Devices, Inc.
diff --git a/sys/dev/pci/drm/radeon/r300d.h b/sys/dev/pci/drm/radeon/r300d.h
index ff229a00d27..0ba74ea4b6c 100644
--- a/sys/dev/pci/drm/radeon/r300d.h
+++ b/sys/dev/pci/drm/radeon/r300d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: r300d.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -65,6 +66,17 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+
/* Registers */
#define R_000148_MC_FB_LOCATION 0x000148
#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
diff --git a/sys/dev/pci/drm/radeon/r420.c b/sys/dev/pci/drm/radeon/r420.c
index 1089aebee66..3c231602683 100644
--- a/sys/dev/pci/drm/radeon/r420.c
+++ b/sys/dev/pci/drm/radeon/r420.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: r420.c,v 1.7 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -158,25 +159,18 @@ void r420_pipes_init(struct radeon_device *rdev)
u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
{
- unsigned long flags;
u32 r;
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
r = RREG32(R_0001FC_MC_IND_DATA);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
S_0001F8_MC_IND_WR_EN(1));
WREG32(R_0001FC_MC_IND_DATA, v);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void r420_debugfs(struct radeon_device *rdev)
@@ -217,7 +211,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(ring, rdev->config.r300.resync_scratch);
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
}
static void r420_cp_errata_fini(struct radeon_device *rdev)
@@ -230,7 +224,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev)
radeon_ring_lock(rdev, ring, 8);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FINISH);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
@@ -333,7 +327,6 @@ int r420_resume(struct radeon_device *rdev)
int r420_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -347,7 +340,6 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -444,9 +436,6 @@ int r420_init(struct radeon_device *rdev)
}
r420_set_reg_safe(rdev);
- /* Initialize power management */
- radeon_pm_init(rdev);
-
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
diff --git a/sys/dev/pci/drm/radeon/r420d.h b/sys/dev/pci/drm/radeon/r420d.h
index fc78d31a0b4..74f79636f0d 100644
--- a/sys/dev/pci/drm/radeon/r420d.h
+++ b/sys/dev/pci/drm/radeon/r420d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: r420d.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
diff --git a/sys/dev/pci/drm/radeon/r500_reg.h b/sys/dev/pci/drm/radeon/r500_reg.h
index 136b7bc7cd2..eead3d85392 100644
--- a/sys/dev/pci/drm/radeon/r500_reg.h
+++ b/sys/dev/pci/drm/radeon/r500_reg.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: r500_reg.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -355,7 +356,6 @@
# define AVIVO_D1CRTC_V_BLANK (1 << 0)
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
-#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0
@@ -402,7 +402,6 @@
* block and vice versa. This applies to GRPH, CUR, etc.
*/
#define AVIVO_D1GRPH_LUT_SEL 0x6108
-# define AVIVO_LUT_10BIT_BYPASS_EN (1 << 8)
#define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/sys/dev/pci/drm/radeon/r520.c b/sys/dev/pci/drm/radeon/r520.c
index 3f51ef2028b..9da1f5d3320 100644
--- a/sys/dev/pci/drm/radeon/r520.c
+++ b/sys/dev/pci/drm/radeon/r520.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: r520.c,v 1.6 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -312,9 +313,6 @@ int r520_init(struct radeon_device *rdev)
return r;
rv515_set_safe_registers(rdev);
- /* Initialize power management */
- radeon_pm_init(rdev);
-
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
diff --git a/sys/dev/pci/drm/radeon/r520d.h b/sys/dev/pci/drm/radeon/r520d.h
index 61af61f644b..d008fdffc47 100644
--- a/sys/dev/pci/drm/radeon/r520d.h
+++ b/sys/dev/pci/drm/radeon/r520d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: r520d.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
diff --git a/sys/dev/pci/drm/radeon/r600.c b/sys/dev/pci/drm/radeon/r600.c
index 05ca3ffbb65..d64a6fd0bab 100644
--- a/sys/dev/pci/drm/radeon/r600.c
+++ b/sys/dev/pci/drm/radeon/r600.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: r600.c,v 1.21 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -29,12 +30,22 @@
#include <dev/pci/drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
-#include "radeon_audio.h"
#include "radeon_mode.h"
#include "r600d.h"
#include "atom.h"
#include "avivod.h"
-#include "radeon_ucode.h"
+
+#define PFP_UCODE_SIZE 576
+#define PM4_UCODE_SIZE 1792
+#define RLC_UCODE_SIZE 768
+#define R700_PFP_UCODE_SIZE 848
+#define R700_PM4_UCODE_SIZE 1360
+#define R700_RLC_UCODE_SIZE 1024
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define CAYMAN_RLC_UCODE_SIZE 1024
+#define ARUBA_RLC_UCODE_SIZE 1536
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -53,32 +64,24 @@ MODULE_FIRMWARE("radeon/RS780_pfp.bin");
MODULE_FIRMWARE("radeon/RS780_me.bin");
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
MODULE_FIRMWARE("radeon/RV770_me.bin");
-MODULE_FIRMWARE("radeon/RV770_smc.bin");
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
MODULE_FIRMWARE("radeon/RV730_me.bin");
-MODULE_FIRMWARE("radeon/RV730_smc.bin");
-MODULE_FIRMWARE("radeon/RV740_smc.bin");
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
MODULE_FIRMWARE("radeon/RV710_me.bin");
-MODULE_FIRMWARE("radeon/RV710_smc.bin");
MODULE_FIRMWARE("radeon/R600_rlc.bin");
MODULE_FIRMWARE("radeon/R700_rlc.bin");
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
-MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
-MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
-MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
-MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
MODULE_FIRMWARE("radeon/PALM_me.bin");
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
@@ -87,12 +90,6 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin");
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
-static const u32 crtc_offsets[2] =
-{
- 0,
- AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
-};
-
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
/* r600,rv610,rv630,rv620,rv635,rv670 */
@@ -101,240 +98,6 @@ static void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
-extern int evergreen_rlc_resume(struct radeon_device *rdev);
-extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
-
-/*
- * Indirect registers accessor
- */
-u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
- WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
- r = RREG32(R600_RCU_DATA);
- spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
- return r;
-}
-
-void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
- WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
- WREG32(R600_RCU_DATA, (v));
- spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
-}
-
-u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
- WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
- r = RREG32(R600_UVD_CTX_DATA);
- spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
- return r;
-}
-
-void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
- WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
- WREG32(R600_UVD_CTX_DATA, (v));
- spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
-}
-
-/**
- * r600_get_allowed_info_register - fetch the register for the info ioctl
- *
- * @rdev: radeon_device pointer
- * @reg: register offset in bytes
- * @val: register value
- *
- * Returns 0 for success or -EINVAL for an invalid register
- *
- */
-int r600_get_allowed_info_register(struct radeon_device *rdev,
- u32 reg, u32 *val)
-{
- switch (reg) {
- case GRBM_STATUS:
- case GRBM_STATUS2:
- case R_000E50_SRBM_STATUS:
- case DMA_STATUS_REG:
- case UVD_STATUS:
- *val = RREG32(reg);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-/**
- * r600_get_xclk - get the xclk
- *
- * @rdev: radeon_device pointer
- *
- * Returns the reference clock used by the gfx engine
- * (r6xx, IGPs, APUs).
- */
-u32 r600_get_xclk(struct radeon_device *rdev)
-{
- return rdev->clock.spll.reference_freq;
-}
-
-int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
-{
- unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0;
- int r;
-
- /* bypass vclk and dclk with bclk */
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
- ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
-
- /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~(
- UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK));
-
- if (rdev->family >= CHIP_RS780)
- WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL,
- ~UPLL_BYPASS_CNTL);
-
- if (!vclk || !dclk) {
- /* keep the Bypass mode, put PLL to sleep */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
- return 0;
- }
-
- if (rdev->clock.spll.reference_freq == 10000)
- ref_div = 34;
- else
- ref_div = 4;
-
- r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
- ref_div + 1, 0xFFF, 2, 30, ~0,
- &fb_div, &vclk_div, &dclk_div);
- if (r)
- return r;
-
- if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780)
- fb_div >>= 1;
- else
- fb_div |= 1;
-
- r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
- if (r)
- return r;
-
- /* assert PLL_RESET */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
-
- /* For RS780 we have to choose ref clk */
- if (rdev->family >= CHIP_RS780)
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK,
- ~UPLL_REFCLK_SRC_SEL_MASK);
-
- /* set the required fb, ref and post divder values */
- WREG32_P(CG_UPLL_FUNC_CNTL,
- UPLL_FB_DIV(fb_div) |
- UPLL_REF_DIV(ref_div),
- ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK));
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- UPLL_SW_HILEN(vclk_div >> 1) |
- UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
- UPLL_SW_HILEN2(dclk_div >> 1) |
- UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) |
- UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK,
- ~UPLL_SW_MASK);
-
- /* give the PLL some time to settle */
- mdelay(15);
-
- /* deassert PLL_RESET */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
-
- mdelay(15);
-
- /* deassert BYPASS EN */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
-
- if (rdev->family >= CHIP_RS780)
- WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL);
-
- r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
- if (r)
- return r;
-
- /* switch VCLK and DCLK selection */
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
- ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
-
- mdelay(100);
-
- return 0;
-}
-
-void dce3_program_fmt(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- int bpc = 0;
- u32 tmp = 0;
- enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- bpc = radeon_get_monitor_bpc(connector);
- dither = radeon_connector->dither;
- }
-
- /* LVDS FMT is set up by atom */
- if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
- return;
-
- /* not needed for analog */
- if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
- (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
- return;
-
- if (bpc == 0)
- return;
-
- switch (bpc) {
- case 6:
- if (dither == RADEON_FMT_DITHER_ENABLE)
- /* XXX sort out optimal dither settings */
- tmp |= FMT_SPATIAL_DITHER_EN;
- else
- tmp |= FMT_TRUNCATE_EN;
- break;
- case 8:
- if (dither == RADEON_FMT_DITHER_ENABLE)
- /* XXX sort out optimal dither settings */
- tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH);
- else
- tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH);
- break;
- case 10:
- default:
- /* not needed */
- break;
- }
-
- WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
-}
/* get temperature in millidegrees */
int rv6xx_get_temp(struct radeon_device *rdev)
@@ -1067,7 +830,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
/* flush hdp cache so updates hit vram */
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
!(rdev->flags & RADEON_IS_AGP)) {
- void __iomem *ptr = (void *)rdev->gart.ptr;
+ volatile uint32_t *ptr = rdev->gart.ptr;
u32 tmp;
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
@@ -1076,7 +839,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
* method for them.
*/
WREG32(HDP_DEBUG1, 0);
- tmp = readl((void __iomem *)ptr);
+ tmp = *ptr;
} else
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
@@ -1126,6 +889,7 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -1150,8 +914,6 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
- WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
- WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
@@ -1202,8 +964,6 @@ static void r600_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
- WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp);
- WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp);
radeon_gart_table_vram_unpin(rdev);
}
@@ -1263,31 +1023,6 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg)
-{
- unsigned long flags;
- uint32_t r;
-
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
- WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg));
- r = RREG32(R_0028FC_MC_DATA);
- WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
- return r;
-}
-
-void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
- WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) |
- S_0028F8_MC_IND_WR_EN(1));
- WREG32(R_0028FC_MC_DATA, v);
- WREG32(R_0028F8_MC_INDEX, 0x7F);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
-}
-
static void r600_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
@@ -1387,7 +1122,7 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
- size_af = mc->mc_mask - mc->gtt_end;
+ size_af = 0xFFFFFFFF - mc->gtt_end;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
@@ -1423,8 +1158,6 @@ static int r600_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
- uint32_t h_addr, l_addr;
- unsigned long long k8_addr;
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
@@ -1465,30 +1198,7 @@ static int r600_mc_init(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_IGP) {
rs690_pm_info(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
-
- if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
- /* Use K8 direct mapping for fast fb access. */
- rdev->fastfb_working = false;
- h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL));
- l_addr = RREG32_MC(R_000011_K8_FB_LOCATION);
- k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
- if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
-#endif
- {
- /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
- * memory is present.
- */
- if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
- DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
- (unsigned long long)rdev->mc.aper_base, k8_addr);
- rdev->mc.aper_base = (resource_size_t)k8_addr;
- rdev->fastfb_working = true;
- }
- }
- }
}
-
radeon_update_bandwidth_info(rdev);
return 0;
}
@@ -1500,7 +1210,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- 0, NULL, NULL, &rdev->vram_scratch.robj);
+ NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
@@ -1540,381 +1250,207 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->vram_scratch.robj);
}
-void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
+/* We doesn't check that the GPU really needs a reset we simply do the
+ * reset, it's up to the caller to determine if the GPU needs one. We
+ * might add an helper function to check that.
+ */
+static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
-
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
+ S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
+ S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
+ S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
+ S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
+ S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
+ S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
+ S_008010_GUI_ACTIVE(1);
+ u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
+ S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
+ S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
+ S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
+ S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
+ S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
+ S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
+ S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
+ u32 tmp;
- WREG32(R600_BIOS_3_SCRATCH, tmp);
-}
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ return;
-static void r600_print_gpu_status_regs(struct radeon_device *rdev)
-{
dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
- RREG32(R_008010_GRBM_STATUS));
+ RREG32(R_008010_GRBM_STATUS));
dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
- RREG32(R_008014_GRBM_STATUS2));
+ RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
- RREG32(R_000E50_SRBM_STATUS));
+ RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
- RREG32(CP_STALLED_STAT1));
+ RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
- RREG32(CP_STALLED_STAT2));
+ RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
- RREG32(CP_BUSY_STAT));
+ RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
- RREG32(CP_STAT));
- dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
- RREG32(DMA_STATUS_REG));
-}
-
-static bool r600_is_display_hung(struct radeon_device *rdev)
-{
- u32 crtc_hung = 0;
- u32 crtc_status[2];
- u32 i, j, tmp;
-
- for (i = 0; i < rdev->num_crtc; i++) {
- if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
- crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- crtc_hung |= (1 << i);
- }
- }
-
- for (j = 0; j < 10; j++) {
- for (i = 0; i < rdev->num_crtc; i++) {
- if (crtc_hung & (1 << i)) {
- tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- if (tmp != crtc_status[i])
- crtc_hung &= ~(1 << i);
- }
- }
- if (crtc_hung == 0)
- return false;
- udelay(100);
- }
+ RREG32(CP_STAT));
- return true;
-}
-
-u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
-{
- u32 reset_mask = 0;
- u32 tmp;
+ /* Disable CP parsing/prefetching */
+ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
- /* GRBM_STATUS */
- tmp = RREG32(R_008010_GRBM_STATUS);
- if (rdev->family >= CHIP_RV770) {
- if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
- G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
- G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
- G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
- G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
- reset_mask |= RADEON_RESET_GFX;
- } else {
- if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
- G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
- G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
- G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
- G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
- reset_mask |= RADEON_RESET_GFX;
+ /* Check if any of the rendering block is busy and reset it */
+ if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
+ (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
+ tmp = S_008020_SOFT_RESET_CR(1) |
+ S_008020_SOFT_RESET_DB(1) |
+ S_008020_SOFT_RESET_CB(1) |
+ S_008020_SOFT_RESET_PA(1) |
+ S_008020_SOFT_RESET_SC(1) |
+ S_008020_SOFT_RESET_SMX(1) |
+ S_008020_SOFT_RESET_SPI(1) |
+ S_008020_SOFT_RESET_SX(1) |
+ S_008020_SOFT_RESET_SH(1) |
+ S_008020_SOFT_RESET_TC(1) |
+ S_008020_SOFT_RESET_TA(1) |
+ S_008020_SOFT_RESET_VC(1) |
+ S_008020_SOFT_RESET_VGT(1);
+ dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(R_008020_GRBM_SOFT_RESET, 0);
}
+ /* Reset CP (we always reset CP) */
+ tmp = S_008020_SOFT_RESET_CP(1);
+ dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(R_008020_GRBM_SOFT_RESET, tmp);
+ RREG32(R_008020_GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(R_008020_GRBM_SOFT_RESET, 0);
- if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
- G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
- reset_mask |= RADEON_RESET_CP;
-
- if (G_008010_GRBM_EE_BUSY(tmp))
- reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
-
- /* DMA_STATUS_REG */
- tmp = RREG32(DMA_STATUS_REG);
- if (!(tmp & DMA_IDLE))
- reset_mask |= RADEON_RESET_DMA;
-
- /* SRBM_STATUS */
- tmp = RREG32(R_000E50_SRBM_STATUS);
- if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
- reset_mask |= RADEON_RESET_RLC;
-
- if (G_000E50_IH_BUSY(tmp))
- reset_mask |= RADEON_RESET_IH;
-
- if (G_000E50_SEM_BUSY(tmp))
- reset_mask |= RADEON_RESET_SEM;
-
- if (G_000E50_GRBM_RQ_PENDING(tmp))
- reset_mask |= RADEON_RESET_GRBM;
-
- if (G_000E50_VMC_BUSY(tmp))
- reset_mask |= RADEON_RESET_VMC;
-
- if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
- G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
- G_000E50_MCDW_BUSY(tmp))
- reset_mask |= RADEON_RESET_MC;
-
- if (r600_is_display_hung(rdev))
- reset_mask |= RADEON_RESET_DISPLAY;
-
- /* Skip MC reset as it's mostly likely not hung, just busy */
- if (reset_mask & RADEON_RESET_MC) {
- DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
- reset_mask &= ~RADEON_RESET_MC;
- }
+ dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
+ RREG32(R_008010_GRBM_STATUS));
+ dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
+ RREG32(R_008014_GRBM_STATUS2));
+ dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
+ RREG32(R_000E50_SRBM_STATUS));
+ dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT1));
+ dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
+ RREG32(CP_STALLED_STAT2));
+ dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
+ RREG32(CP_BUSY_STAT));
+ dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
+ RREG32(CP_STAT));
- return reset_mask;
}
-static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
{
- struct rv515_mc_save save;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
- if (reset_mask == 0)
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
return;
- dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
- r600_print_gpu_status_regs(rdev);
+ /* Disable DMA */
+ tmp = RREG32(DMA_RB_CNTL);
+ tmp &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, tmp);
- /* Disable CP parsing/prefetching */
+ /* Reset dma */
if (rdev->family >= CHIP_RV770)
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
else
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
-
- /* disable the RLC */
- WREG32(RLC_CNTL, 0);
-
- if (reset_mask & RADEON_RESET_DMA) {
- /* Disable DMA */
- tmp = RREG32(DMA_RB_CNTL);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, tmp);
- }
-
- mdelay(50);
-
- rv515_mc_stop(rdev, &save);
- if (r600_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
-
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
- if (rdev->family >= CHIP_RV770)
- grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
- S_008020_SOFT_RESET_CB(1) |
- S_008020_SOFT_RESET_PA(1) |
- S_008020_SOFT_RESET_SC(1) |
- S_008020_SOFT_RESET_SPI(1) |
- S_008020_SOFT_RESET_SX(1) |
- S_008020_SOFT_RESET_SH(1) |
- S_008020_SOFT_RESET_TC(1) |
- S_008020_SOFT_RESET_TA(1) |
- S_008020_SOFT_RESET_VC(1) |
- S_008020_SOFT_RESET_VGT(1);
- else
- grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
- S_008020_SOFT_RESET_DB(1) |
- S_008020_SOFT_RESET_CB(1) |
- S_008020_SOFT_RESET_PA(1) |
- S_008020_SOFT_RESET_SC(1) |
- S_008020_SOFT_RESET_SMX(1) |
- S_008020_SOFT_RESET_SPI(1) |
- S_008020_SOFT_RESET_SX(1) |
- S_008020_SOFT_RESET_SH(1) |
- S_008020_SOFT_RESET_TC(1) |
- S_008020_SOFT_RESET_TA(1) |
- S_008020_SOFT_RESET_VC(1) |
- S_008020_SOFT_RESET_VGT(1);
- }
-
- if (reset_mask & RADEON_RESET_CP) {
- grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
- S_008020_SOFT_RESET_VGT(1);
-
- srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
- }
-
- if (reset_mask & RADEON_RESET_DMA) {
- if (rdev->family >= CHIP_RV770)
- srbm_soft_reset |= RV770_SOFT_RESET_DMA;
- else
- srbm_soft_reset |= SOFT_RESET_DMA;
- }
-
- if (reset_mask & RADEON_RESET_RLC)
- srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
-
- if (reset_mask & RADEON_RESET_SEM)
- srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
-
- if (reset_mask & RADEON_RESET_IH)
- srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
-
- if (reset_mask & RADEON_RESET_GRBM)
- srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
-
- if (!(rdev->flags & RADEON_IS_IGP)) {
- if (reset_mask & RADEON_RESET_MC)
- srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
- }
-
- if (reset_mask & RADEON_RESET_VMC)
- srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
-
- if (grbm_soft_reset) {
- tmp = RREG32(R_008020_GRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- tmp = RREG32(R_008020_GRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~grbm_soft_reset;
- WREG32(R_008020_GRBM_SOFT_RESET, tmp);
- tmp = RREG32(R_008020_GRBM_SOFT_RESET);
- }
-
- if (srbm_soft_reset) {
- tmp = RREG32(SRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
- }
-
- /* Wait a little for things to settle down */
- mdelay(1);
-
- rv515_mc_resume(rdev, &save);
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
- r600_print_gpu_status_regs(rdev);
+ dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
}
-static void r600_gpu_pci_config_reset(struct radeon_device *rdev)
+static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
struct rv515_mc_save save;
- u32 tmp, i;
- dev_info(rdev->dev, "GPU pci config reset\n");
-
- /* disable dpm? */
-
- /* Disable CP parsing/prefetching */
- if (rdev->family >= CHIP_RV770)
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
- else
- WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
- /* disable the RLC */
- WREG32(RLC_CNTL, 0);
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
- /* Disable DMA */
- tmp = RREG32(DMA_RB_CNTL);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, tmp);
+ if (reset_mask == 0)
+ return 0;
- mdelay(50);
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
- /* set mclk/sclk to bypass */
- if (rdev->family >= CHIP_RV770)
- rv770_set_clk_bypass_mode(rdev);
- /* disable BM */
- pci_clear_master(rdev->pdev);
- /* disable mem access */
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- /* BIF reset workaround. Not sure if this is needed on 6xx */
- tmp = RREG32(BUS_CNTL);
- tmp |= VGA_COHE_SPEC_TIMER_DIS;
- WREG32(BUS_CNTL, tmp);
-
- tmp = RREG32(BIF_SCRATCH0);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ r600_gpu_soft_reset_gfx(rdev);
- /* reset */
- radeon_pci_config_reset(rdev);
- mdelay(1);
+ if (reset_mask & RADEON_RESET_DMA)
+ r600_gpu_soft_reset_dma(rdev);
- /* BIF reset workaround. Not sure if this is needed on 6xx */
- tmp = SOFT_RESET_BIF;
- WREG32(SRBM_SOFT_RESET, tmp);
+ /* Wait a little for things to settle down */
mdelay(1);
- WREG32(SRBM_SOFT_RESET, 0);
- /* wait for asic to come out of reset */
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
- break;
- udelay(1);
- }
+ rv515_mc_resume(rdev, &save);
+ return 0;
}
-int r600_asic_reset(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 reset_mask;
-
- reset_mask = r600_gpu_check_soft_reset(rdev);
-
- if (reset_mask)
- r600_set_bios_scratch_engine_hung(rdev, true);
-
- /* try soft reset */
- r600_gpu_soft_reset(rdev, reset_mask);
-
- reset_mask = r600_gpu_check_soft_reset(rdev);
-
- /* try pci config reset */
- if (reset_mask && radeon_hard_reset)
- r600_gpu_pci_config_reset(rdev);
-
- reset_mask = r600_gpu_check_soft_reset(rdev);
-
- if (!reset_mask)
- r600_set_bios_scratch_engine_hung(rdev, false);
-
- return 0;
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status2;
+
+ srbm_status = RREG32(R_000E50_SRBM_STATUS);
+ grbm_status = RREG32(R_008010_GRBM_STATUS);
+ grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
+ if (!G_008010_GUI_ACTIVE(grbm_status)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
/**
- * r600_gfx_is_lockup - Check if the GFX engine is locked up
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
- * Check if the GFX engine is locked up.
+ * Check if the async DMA engine is locked up (r6xx-evergreen).
* Returns true if the engine appears to be locked up, false if not.
*/
-bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+ u32 dma_status_reg;
- if (!(reset_mask & (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_CP))) {
- radeon_ring_lockup_update(rdev, ring);
+ dma_status_reg = RREG32(DMA_STATUS_REG);
+ if (dma_status_reg & DMA_IDLE) {
+ radeon_ring_lockup_update(ring);
return false;
}
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
+int r600_asic_reset(struct radeon_device *rdev)
+{
+ return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
+}
+
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -1974,6 +1510,7 @@ static void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
+ u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 tmp;
int i, j;
@@ -2100,20 +1637,26 @@ static void r600_gpu_init(struct radeon_device *rdev)
}
tiling_config |= BANK_SWAPS(1);
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ tmp = R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
+ if (tmp < rdev->config.r600.max_backends) {
+ rdev->config.r600.max_backends = tmp;
+ }
+
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
- tmp = rdev->config.r600.max_simds -
+ tmp = R6XX_MAX_PIPES -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
+ if (tmp < rdev->config.r600.max_pipes) {
+ rdev->config.r600.max_pipes = tmp;
+ }
+ tmp = R6XX_MAX_SIMDS -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
- rdev->config.r600.active_simds = tmp;
+ if (tmp < rdev->config.r600.max_simds) {
+ rdev->config.r600.max_simds = tmp;
+ }
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
- tmp = 0;
- for (i = 0; i < rdev->config.r600.max_backends; i++)
- tmp |= (1 << i);
- /* if all the backends are disabled, fix it up here */
- if ((disabled_rb_mask & tmp) == tmp) {
- for (i = 0; i < rdev->config.r600.max_backends; i++)
- disabled_rb_mask &= ~(1 << i);
- }
tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
R6XX_MAX_BACKENDS, disabled_rb_mask);
@@ -2378,27 +1921,20 @@ static void r600_gpu_init(struct radeon_device *rdev)
*/
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
{
- unsigned long flags;
u32 r;
- spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
r = RREG32(PCIE_PORT_DATA);
- spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
return r;
}
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
(void)RREG32(PCIE_PORT_INDEX);
WREG32(PCIE_PORT_DATA, (v));
(void)RREG32(PCIE_PORT_DATA);
- spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
}
/*
@@ -2406,8 +1942,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
*/
void r600_cp_stop(struct radeon_device *rdev)
{
- if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -2417,103 +1952,92 @@ int r600_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
const char *rlc_chip_name;
- const char *smc_chip_name = "RV770";
- size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0;
+ size_t pfp_req_size, me_req_size, rlc_req_size;
char fw_name[30];
int err;
DRM_DEBUG("\n");
+#if 0
+ pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+ err = IS_ERR(pdev);
+ if (err) {
+ printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+ return -EINVAL;
+ }
+#endif
+
switch (rdev->family) {
case CHIP_R600:
- chip_name = "R600";
- rlc_chip_name = "R600";
+ chip_name = "r600";
+ rlc_chip_name = "r600";
break;
case CHIP_RV610:
- chip_name = "RV610";
- rlc_chip_name = "R600";
+ chip_name = "rv610";
+ rlc_chip_name = "r600";
break;
case CHIP_RV630:
- chip_name = "RV630";
- rlc_chip_name = "R600";
+ chip_name = "rv630";
+ rlc_chip_name = "r600";
break;
case CHIP_RV620:
- chip_name = "RV620";
- rlc_chip_name = "R600";
+ chip_name = "rv620";
+ rlc_chip_name = "r600";
break;
case CHIP_RV635:
- chip_name = "RV635";
- rlc_chip_name = "R600";
+ chip_name = "rv635";
+ rlc_chip_name = "r600";
break;
case CHIP_RV670:
- chip_name = "RV670";
- rlc_chip_name = "R600";
+ chip_name = "rv670";
+ rlc_chip_name = "r600";
break;
case CHIP_RS780:
case CHIP_RS880:
- chip_name = "RS780";
- rlc_chip_name = "R600";
+ chip_name = "rs780";
+ rlc_chip_name = "r600";
break;
case CHIP_RV770:
- chip_name = "RV770";
- rlc_chip_name = "R700";
- smc_chip_name = "RV770";
- smc_req_size = roundup2(RV770_SMC_UCODE_SIZE, 4);
+ chip_name = "rv770";
+ rlc_chip_name = "r700";
break;
case CHIP_RV730:
- chip_name = "RV730";
- rlc_chip_name = "R700";
- smc_chip_name = "RV730";
- smc_req_size = roundup2(RV730_SMC_UCODE_SIZE, 4);
+ case CHIP_RV740:
+ chip_name = "rv730";
+ rlc_chip_name = "r700";
break;
case CHIP_RV710:
- chip_name = "RV710";
- rlc_chip_name = "R700";
- smc_chip_name = "RV710";
- smc_req_size = roundup2(RV710_SMC_UCODE_SIZE, 4);
- break;
- case CHIP_RV740:
- chip_name = "RV730";
- rlc_chip_name = "R700";
- smc_chip_name = "RV740";
- smc_req_size = roundup2(RV740_SMC_UCODE_SIZE, 4);
+ chip_name = "rv710";
+ rlc_chip_name = "r700";
break;
case CHIP_CEDAR:
- chip_name = "CEDAR";
- rlc_chip_name = "CEDAR";
- smc_chip_name = "CEDAR";
- smc_req_size = roundup2(CEDAR_SMC_UCODE_SIZE, 4);
+ chip_name = "cedar";
+ rlc_chip_name = "cedar";
break;
case CHIP_REDWOOD:
- chip_name = "REDWOOD";
- rlc_chip_name = "REDWOOD";
- smc_chip_name = "REDWOOD";
- smc_req_size = roundup2(REDWOOD_SMC_UCODE_SIZE, 4);
+ chip_name = "redwood";
+ rlc_chip_name = "redwood";
break;
case CHIP_JUNIPER:
- chip_name = "JUNIPER";
- rlc_chip_name = "JUNIPER";
- smc_chip_name = "JUNIPER";
- smc_req_size = roundup2(JUNIPER_SMC_UCODE_SIZE, 4);
+ chip_name = "juniper";
+ rlc_chip_name = "juniper";
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
- chip_name = "CYPRESS";
- rlc_chip_name = "CYPRESS";
- smc_chip_name = "CYPRESS";
- smc_req_size = roundup2(CYPRESS_SMC_UCODE_SIZE, 4);
+ chip_name = "cypress";
+ rlc_chip_name = "cypress";
break;
case CHIP_PALM:
- chip_name = "PALM";
- rlc_chip_name = "SUMO";
+ chip_name = "palm";
+ rlc_chip_name = "sumo";
break;
case CHIP_SUMO:
- chip_name = "SUMO";
- rlc_chip_name = "SUMO";
+ chip_name = "sumo";
+ rlc_chip_name = "sumo";
break;
case CHIP_SUMO2:
- chip_name = "SUMO2";
- rlc_chip_name = "SUMO";
+ chip_name = "sumo2";
+ rlc_chip_name = "sumo";
break;
default: BUG();
}
@@ -2527,113 +2051,69 @@ int r600_init_microcode(struct radeon_device *rdev)
me_req_size = R700_PM4_UCODE_SIZE * 4;
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
} else {
- pfp_req_size = R600_PFP_UCODE_SIZE * 4;
- me_req_size = R600_PM4_UCODE_SIZE * 12;
- rlc_req_size = R600_RLC_UCODE_SIZE * 4;
+ pfp_req_size = PFP_UCODE_SIZE * 4;
+ me_req_size = PM4_UCODE_SIZE * 12;
+ rlc_req_size = RLC_UCODE_SIZE * 4;
}
DRM_INFO("Loading %s Microcode\n", chip_name);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
- err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_pfp", chip_name);
+ err = loadfirmware(fw_name, &rdev->pfp_fw, &rdev->pfp_fw_size);
if (err)
goto out;
- if (rdev->pfp_fw->size != pfp_req_size) {
- printk(KERN_ERR
+ if (rdev->pfp_fw_size != pfp_req_size) {
+ DRM_ERROR(
"r600_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->pfp_fw->size, fw_name);
+ rdev->pfp_fw_size, fw_name);
err = -EINVAL;
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
- err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_me", chip_name);
+ err = loadfirmware(fw_name, &rdev->me_fw, &rdev->me_fw_size);
if (err)
goto out;
- if (rdev->me_fw->size != me_req_size) {
- printk(KERN_ERR
+ if (rdev->me_fw_size != me_req_size) {
+ DRM_ERROR(
"r600_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
+ rdev->me_fw_size, fw_name);
err = -EINVAL;
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
- err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_rlc", rlc_chip_name);
+ err = loadfirmware(fw_name, &rdev->rlc_fw, &rdev->rlc_fw_size);
if (err)
goto out;
- if (rdev->rlc_fw->size != rlc_req_size) {
- printk(KERN_ERR
+ if (rdev->rlc_fw_size != rlc_req_size) {
+ DRM_ERROR(
"r600_rlc: Bogus length %zu in firmware \"%s\"\n",
- rdev->rlc_fw->size, fw_name);
+ rdev->rlc_fw_size, fw_name);
err = -EINVAL;
}
- if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
- err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err) {
- printk(KERN_ERR
- "smc: error loading firmware \"%s\"\n",
- fw_name);
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
- err = 0;
- } else if (rdev->smc_fw->size != smc_req_size) {
- printk(KERN_ERR
- "smc: Bogus length %zu in firmware \"%s\"\n",
- rdev->smc_fw->size, fw_name);
- err = -EINVAL;
- }
- }
-
out:
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
"r600_cp: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(rdev->pfp_fw);
- rdev->pfp_fw = NULL;
- release_firmware(rdev->me_fw);
- rdev->me_fw = NULL;
- release_firmware(rdev->rlc_fw);
- rdev->rlc_fw = NULL;
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
+ if (rdev->pfp_fw) {
+ free(rdev->pfp_fw, M_DEVBUF, 0);
+ rdev->pfp_fw = NULL;
+ }
+ if (rdev->me_fw) {
+ free(rdev->me_fw, M_DEVBUF, 0);
+ rdev->me_fw = NULL;
+ }
+ if (rdev->rlc_fw) {
+ free(rdev->rlc_fw, M_DEVBUF, 0);
+ rdev->rlc_fw = NULL;
+ }
}
return err;
}
-u32 r600_gfx_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- u32 rptr;
-
- if (rdev->wb.enabled)
- rptr = rdev->wb.wb[ring->rptr_offs/4];
- else
- rptr = RREG32(R600_CP_RB_RPTR);
-
- return rptr;
-}
-
-u32 r600_gfx_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- u32 wptr;
-
- wptr = RREG32(R600_CP_RB_WPTR);
-
- return wptr;
-}
-
-void r600_gfx_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- WREG32(R600_CP_RB_WPTR, ring->wptr);
- (void)RREG32(R600_CP_RB_WPTR);
-}
-
static int r600_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -2658,15 +2138,15 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
WREG32(CP_ME_RAM_WADDR, 0);
- fw_data = (const __be32 *)rdev->me_fw->data;
+ fw_data = (const __be32 *)rdev->me_fw;
WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++)
+ for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
WREG32(CP_ME_RAM_DATA,
be32_to_cpup(fw_data++));
- fw_data = (const __be32 *)rdev->pfp_fw->data;
+ fw_data = (const __be32 *)rdev->pfp_fw;
WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < R600_PFP_UCODE_SIZE; i++)
+ for (i = 0; i < PFP_UCODE_SIZE; i++)
WREG32(CP_PFP_UCODE_DATA,
be32_to_cpup(fw_data++));
@@ -2699,7 +2179,7 @@ int r600_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2720,8 +2200,8 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
/* Set ring buffer size */
- rb_bufsz = order_base_2(ring->ring_size / 8);
- tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = drm_order(ring->ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -2756,6 +2236,8 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
+ ring->rptr = RREG32(CP_RB_RPTR);
+
r600_cp_start(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
@@ -2763,10 +2245,6 @@ int r600_cp_resume(struct radeon_device *rdev)
ring->ready = false;
return r;
}
-
- if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
return 0;
}
@@ -2776,7 +2254,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
int r;
/* Align ring size */
- rb_bufsz = order_base_2(ring_size / 8);
+ rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size;
ring->align_mask = 16 - 1;
@@ -2799,6 +2277,133 @@ void r600_cp_fini(struct radeon_device *rdev)
}
/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ */
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = drm_order(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR, 0);
+ WREG32(DMA_RB_WPTR, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI,
+ upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO,
+ ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_RV770)
+ WREG32(DMA_MODE, 1);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+ WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+ r600_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
+
+/*
* GPU scratch registers helpers function.
*/
void r600_scratch_init(struct radeon_device *rdev)
@@ -2835,7 +2440,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
@@ -2853,6 +2458,60 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ volatile uint32_t *ptr = rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ *ptr = tmp;
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = *ptr;
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
/*
* CP fences/semaphores
*/
@@ -2878,7 +2537,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
- radeon_ring_write(ring, lower_32_bits(addr));
+ radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
@@ -2905,18 +2564,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
-/**
- * r600_semaphore_ring_emit - emit a semaphore on the CP ring
- *
- * @rdev: radeon_device pointer
- * @ring: radeon ring buffer object
- * @semaphore: radeon semaphore object
- * @emit_wait: Is this a sempahore wait?
- *
- * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
- * from running ahead of semaphore waits.
- */
-bool r600_semaphore_ring_emit(struct radeon_device *rdev,
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
@@ -2928,21 +2576,84 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
- radeon_ring_write(ring, lower_32_bits(addr));
+ radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+}
- /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
- if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
- /* Prevent the PFP from running ahead of the semaphore wait */
- radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
- radeon_ring_write(ring, 0x0);
- }
+/*
+ * DMA fences/semaphores
+ */
- return true;
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, lower_32_bits(fence->seq));
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
}
/**
- * r600_copy_cpdma - copy pages using the CP DMA engine
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 s = emit_wait ? 0 : 1;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
+int r600_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ struct radeon_sa_bo *vb = NULL;
+ int r;
+
+ r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
+ if (r) {
+ return r;
+ }
+ r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
+ r600_blit_done_copy(rdev, fence, vb, sem);
+ return 0;
+}
+
+/**
+ * r600_copy_dma - copy pages using the DMA engine
*
* @rdev: radeon_device pointer
* @src_offset: src GPU address
@@ -2950,72 +2661,69 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
* @num_gpu_pages: number of GPU pages to xfer
* @fence: radeon fence object
*
- * Copy GPU paging using the CP DMA engine (r6xx+).
+ * Copy GPU paging using the DMA engine (r6xx).
* Used by the radeon ttm implementation to move pages if
* registered as the asic copy callback.
*/
-struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv)
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
{
- struct radeon_fence *fence;
- struct radeon_sync sync;
- int ring_index = rdev->asic->copy.blit_ring_index;
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_bytes, cur_size_in_bytes, tmp;
+ u32 size_in_dw, cur_size_in_dw;
int i, num_loops;
int r = 0;
- radeon_sync_create(&sync);
-
- size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
- num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
+ r = radeon_semaphore_create(rdev, &sem);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_sync_free(rdev, &sync, NULL);
- return ERR_PTR(r);
+ return r;
}
- radeon_sync_resv(rdev, &sync, resv, false);
- radeon_sync_rings(rdev, &sync, ring->idx);
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(ring, WAIT_3D_IDLE_bit);
- for (i = 0; i < num_loops; i++) {
- cur_size_in_bytes = size_in_bytes;
- if (cur_size_in_bytes > 0x1fffff)
- cur_size_in_bytes = 0x1fffff;
- size_in_bytes -= cur_size_in_bytes;
- tmp = upper_32_bits(src_offset) & 0xff;
- if (size_in_bytes == 0)
- tmp |= PACKET3_CP_DMA_CP_SYNC;
- radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4));
- radeon_ring_write(ring, lower_32_bits(src_offset));
- radeon_ring_write(ring, tmp);
- radeon_ring_write(ring, lower_32_bits(dst_offset));
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, cur_size_in_bytes);
- src_offset += cur_size_in_bytes;
- dst_offset += cur_size_in_bytes;
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
}
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit);
- r = radeon_fence_emit(rdev, &fence, ring->idx);
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFE)
+ cur_size_in_dw = 0xFFFE;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+ (upper_32_bits(src_offset) & 0xff)));
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
- radeon_sync_free(rdev, &sync, NULL);
- return ERR_PTR(r);
+ return r;
}
- radeon_ring_unlock_commit(rdev, ring, false);
- radeon_sync_free(rdev, &sync, fence);
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
- return fence;
+ return r;
}
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@ -3039,13 +2747,20 @@ static int r600_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
r600_pcie_gen2_enable(rdev);
- /* scratch needs to be initialized before MC */
+ r600_mc_program(rdev);
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
r = r600_vram_scratch_init(rdev);
if (r)
return r;
- r600_mc_program(rdev);
-
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
} else {
@@ -3054,6 +2769,12 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
r600_gpu_init(rdev);
+ r = r600_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy.copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -3066,16 +2787,10 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
- if (rdev->has_uvd) {
- r = uvd_v1_0_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
- if (r) {
- dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
- }
- }
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
+ r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
+ return r;
}
/* Enable IRQ */
@@ -3095,7 +2810,15 @@ static int r600_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- RADEON_CP_PACKET2);
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+ if (r)
+ return r;
+
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -3106,17 +2829,9 @@ static int r600_startup(struct radeon_device *rdev)
if (r)
return r;
- if (rdev->has_uvd) {
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- RADEON_CP_PACKET2);
- if (!r)
- r = uvd_v1_0_init(rdev);
- if (r)
- DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
- }
- }
+ r = r600_dma_resume(rdev);
+ if (r)
+ return r;
r = radeon_ib_pool_init(rdev);
if (r) {
@@ -3124,7 +2839,7 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_audio_init(rdev);
+ r = r600_audio_init(rdev);
if (r) {
DRM_ERROR("radeon: audio init failed\n");
return r;
@@ -3158,9 +2873,6 @@ int r600_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- if (rdev->pm.pm_method == PM_METHOD_DPM)
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
@@ -3174,13 +2886,9 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
- radeon_audio_fini(rdev);
+ r600_audio_fini(rdev);
r600_cp_stop(rdev);
- if (rdev->has_uvd) {
- uvd_v1_0_fini(rdev);
- radeon_uvd_suspend(rdev);
- }
+ r600_dma_stop(rdev);
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
@@ -3246,27 +2954,11 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
- /* Initialize power management */
- radeon_pm_init(rdev);
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
- if (rdev->has_uvd) {
- r = radeon_uvd_init(rdev);
- if (!r) {
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
- }
- }
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3280,6 +2972,7 @@ int r600_init(struct radeon_device *rdev)
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -3293,14 +2986,11 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
- radeon_audio_fini(rdev);
+ r600_audio_fini(rdev);
+ r600_blit_fini(rdev);
r600_cp_fini(rdev);
+ r600_dma_fini(rdev);
r600_irq_fini(rdev);
- if (rdev->has_uvd) {
- uvd_v1_0_fini(rdev);
- radeon_uvd_fini(rdev);
- }
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
@@ -3372,7 +3062,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
- r = radeon_ib_schedule(rdev, &ib, NULL, false);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
@@ -3402,6 +3092,104 @@ free_scratch:
return r;
}
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ volatile uint32_t *ptr = rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ *ptr = tmp;
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+ ib.ptr[3] = 0xDEADBEEF;
+ ib.length_dw = 4;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = *ptr;
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
/*
* Interrupts
*
@@ -3418,7 +3206,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
u32 rb_bufsz;
/* Align ring size */
- rb_bufsz = order_base_2(ring_size / 4);
+ rb_bufsz = drm_order(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
rdev->ih.ring_size = ring_size;
rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
@@ -3433,8 +3221,8 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, 0,
- NULL, NULL, &rdev->ih.ring_obj);
+ RADEON_GEM_DOMAIN_GTT,
+ NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
return r;
@@ -3498,7 +3286,7 @@ static void r600_rlc_start(struct radeon_device *rdev)
WREG32(RLC_CNTL, RLC_ENABLE);
}
-static int r600_rlc_resume(struct radeon_device *rdev)
+static int r600_rlc_init(struct radeon_device *rdev)
{
u32 i;
const __be32 *fw_data;
@@ -3510,22 +3298,45 @@ static int r600_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_HB_CNTL, 0);
- WREG32(RLC_HB_BASE, 0);
- WREG32(RLC_HB_RPTR, 0);
- WREG32(RLC_HB_WPTR, 0);
- WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
- WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ if (rdev->family == CHIP_ARUBA) {
+ WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+ }
+ if (rdev->family <= CHIP_CAYMAN) {
+ WREG32(RLC_HB_BASE, 0);
+ WREG32(RLC_HB_RPTR, 0);
+ WREG32(RLC_HB_WPTR, 0);
+ }
+ if (rdev->family <= CHIP_CAICOS) {
+ WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+ WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ }
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
- fw_data = (const __be32 *)rdev->rlc_fw->data;
- if (rdev->family >= CHIP_RV770) {
+ fw_data = (const __be32 *)rdev->rlc_fw;
+ if (rdev->family >= CHIP_ARUBA) {
+ for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_CAYMAN) {
+ for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_CEDAR) {
+ for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_RV770) {
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else {
- for (i = 0; i < R600_RLC_UCODE_SIZE; i++) {
+ for (i = 0; i < RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
@@ -3633,10 +3444,7 @@ int r600_irq_init(struct radeon_device *rdev)
r600_disable_interrupts(rdev);
/* init rlc */
- if (rdev->family >= CHIP_CEDAR)
- ret = evergreen_rlc_resume(rdev);
- else
- ret = r600_rlc_resume(rdev);
+ ret = r600_rlc_init(rdev);
if (ret) {
r600_ih_ring_fini(rdev);
return ret;
@@ -3655,7 +3463,7 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
+ rb_bufsz = drm_order(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
@@ -3688,7 +3496,9 @@ int r600_irq_init(struct radeon_device *rdev)
r600_disable_interrupt_state(rdev);
/* at this point everything should be setup correctly to enable master */
+#ifdef notyet
pci_set_master(rdev->pdev);
+#endif
/* enable irqs */
r600_enable_interrupts(rdev);
@@ -3715,8 +3525,8 @@ int r600_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 hdmi0, hdmi1;
+ u32 d1grph = 0, d2grph = 0;
u32 dma_cntl;
- u32 thermal_int = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3751,21 +3561,8 @@ int r600_irq_set(struct radeon_device *rdev)
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
-
dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
- if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
- thermal_int = RREG32(CG_THERMAL_INT) &
- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
- } else if (rdev->family >= CHIP_RV770) {
- thermal_int = RREG32(RV770_CG_THERMAL_INT) &
- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
- }
- if (rdev->irq.dpm_thermal) {
- DRM_DEBUG("dpm thermal\n");
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- }
-
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
@@ -3823,8 +3620,8 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DMA_CNTL, dma_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
- WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
- WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK);
+ WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
+ WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
if (ASIC_IS_DCE3(rdev)) {
WREG32(DC_HPD1_INT_CONTROL, hpd1);
@@ -3847,14 +3644,6 @@ int r600_irq_set(struct radeon_device *rdev)
WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
- if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) {
- WREG32(CG_THERMAL_INT, thermal_int);
- } else if (rdev->family >= CHIP_RV770) {
- WREG32(RV770_CG_THERMAL_INT, thermal_int);
- }
-
- /* posting read */
- RREG32(R_000E50_SRBM_STATUS);
return 0;
}
@@ -3941,7 +3730,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD6_INT_CONTROL);
+ tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -3994,13 +3783,12 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
- wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
@@ -4047,7 +3835,6 @@ int r600_irq_process(struct radeon_device *rdev)
u32 ring_index;
bool queue_hotplug = false;
bool queue_hdmi = false;
- bool queue_thermal = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -4058,6 +3845,8 @@ int r600_irq_process(struct radeon_device *rdev)
wptr = r600_get_ih_wptr(rdev);
+ if (wptr == rdev->ih.rptr)
+ return IRQ_NONE;
restart_ih:
/* is somebody else already processing irqs? */
if (atomic_xchg(&rdev->ih.lock, 1))
@@ -4082,27 +3871,23 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
-
break;
case 1: /* D1 vline */
- if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
- DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
-
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4112,93 +3897,72 @@ restart_ih:
case 5: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
-
break;
case 1: /* D1 vline */
- if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
- DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
-
+ if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
- case 9: /* D1 pflip */
- DRM_DEBUG("IH: D1 flip\n");
- if (radeon_use_pflipirq > 0)
- radeon_crtc_handle_flip(rdev, 0);
- break;
- case 11: /* D2 pflip */
- DRM_DEBUG("IH: D2 flip\n");
- if (radeon_use_pflipirq > 0)
- radeon_crtc_handle_flip(rdev, 1);
- break;
case 19: /* HPD/DAC hotplug */
switch (src_data) {
case 0:
- if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
- DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
break;
case 1:
- if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
- DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
+ if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
break;
case 4:
- if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
- DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
break;
case 5:
- if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
- DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
+ if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
break;
case 10:
- if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
- DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
break;
case 12:
- if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
- DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
-
+ if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4208,32 +3972,24 @@ restart_ih:
case 21: /* hdmi */
switch (src_data) {
case 4:
- if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
- DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI0\n");
-
+ if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI0\n");
+ }
break;
case 5:
- if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
- DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
- queue_hdmi = true;
- DRM_DEBUG("IH: HDMI1\n");
-
+ if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+ rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+ queue_hdmi = true;
+ DRM_DEBUG("IH: HDMI1\n");
+ }
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
- case 124: /* UVD */
- DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
- radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
- break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
@@ -4248,16 +4004,6 @@ restart_ih:
DRM_DEBUG("IH: DMA trap\n");
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
break;
- case 230: /* thermal low to high */
- DRM_DEBUG("IH: thermal low to high\n");
- rdev->pm.dpm.thermal.high_to_low = false;
- queue_thermal = true;
- break;
- case 231: /* thermal high to low */
- DRM_DEBUG("IH: thermal high to low\n");
- rdev->pm.dpm.thermal.high_to_low = true;
- queue_thermal = true;
- break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
@@ -4269,15 +4015,13 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
- WREG32(IH_RB_RPTR, rptr);
}
if (queue_hotplug)
- schedule_delayed_work(&rdev->hotplug_work, 0);
+ task_add(systq, &rdev->hotplug_task);
if (queue_hdmi)
- schedule_work(&rdev->audio_work);
- if (queue_thermal && rdev->pm.dpm_enabled)
- schedule_work(&rdev->pm.dpm.thermal.work);
+ task_add(systq, &rdev->audio_task);
rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
@@ -4319,15 +4063,16 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
}
/**
- * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
+ * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
* rdev: radeon device structure
+ * bo: buffer object struct which userspace is waiting for idle
*
- * Some R6XX/R7XX don't seem to take into account HDP flushes performed
- * through the ring buffer. This leads to corruption in rendering, see
- * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
- * directly perform the HDP flush by writing the register through MMIO.
+ * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
+ * through ring buffer, this leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
+ * directly perform HDP flush by writing register through MMIO.
*/
-void r600_mmio_hdp_flush(struct radeon_device *rdev)
+void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
{
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
@@ -4336,18 +4081,18 @@ void r600_mmio_hdp_flush(struct radeon_device *rdev)
*/
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ volatile uint32_t *ptr = rdev->vram_scratch.ptr;
u32 tmp;
WREG32(HDP_DEBUG1, 0);
- tmp = readl((void __iomem *)ptr);
+ tmp = *ptr;
} else
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
}
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
{
- u32 link_width_cntl, mask;
+ u32 link_width_cntl, mask, target_reg;
if (rdev->flags & RADEON_IS_IGP)
return;
@@ -4359,7 +4104,7 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
if (ASIC_IS_X2(rdev))
return;
- radeon_gui_idle(rdev);
+ /* FIXME wait for idle */
switch (lanes) {
case 0:
@@ -4378,24 +4123,53 @@ void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
break;
case 12:
- /* not actually supported */
mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
break;
case 16:
+ default:
mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
break;
- default:
- DRM_ERROR("invalid pcie lane request: %d\n", lanes);
- return;
}
- link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
- link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK;
- link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT;
- link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW |
- R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+ (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+ return;
+
+ if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
+ return;
+
+ link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+ RADEON_PCIE_LC_RECONFIG_NOW |
+ R600_PCIE_LC_RENEGOTIATE_EN |
+ R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+ link_width_cntl |= mask;
+
+ WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+ /* some northbridges can renegotiate the link rather than requiring
+ * a complete re-config.
+ * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
+ */
+ if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
+ link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
+ else
+ link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
+
+ WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+ RADEON_PCIE_LC_RECONFIG_NOW));
+
+ if (rdev->family >= CHIP_RV770)
+ target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
+ else
+ target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
+
+ /* wait for lane set to complete */
+ link_width_cntl = RREG32(target_reg);
+ while (link_width_cntl == 0xffffffff)
+ link_width_cntl = RREG32(target_reg);
- WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
int r600_get_pcie_lanes(struct radeon_device *rdev)
@@ -4412,11 +4186,13 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
if (ASIC_IS_X2(rdev))
return 0;
- radeon_gui_idle(rdev);
+ /* FIXME wait for idle */
- link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ return 0;
case RADEON_PCIE_LC_LINK_WIDTH_X1:
return 1;
case RADEON_PCIE_LC_LINK_WIDTH_X2:
@@ -4425,10 +4201,6 @@ int r600_get_pcie_lanes(struct radeon_device *rdev)
return 4;
case RADEON_PCIE_LC_LINK_WIDTH_X8:
return 8;
- case RADEON_PCIE_LC_LINK_WIDTH_X12:
- /* not actually supported */
- return 12;
- case RADEON_PCIE_LC_LINK_WIDTH_X0:
case RADEON_PCIE_LC_LINK_WIDTH_X16:
default:
return 16;
@@ -4440,6 +4212,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2;
u32 mask;
+ int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -4458,13 +4231,14 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if (rdev->family <= CHIP_R600)
return;
- if (drm_pcie_get_speed_cap_mask(rdev->ddev, &mask))
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
return;
- if (!(mask & (DRM_PCIE_SPEED_50|DRM_PCIE_SPEED_80)))
+ if (!(mask & DRM_PCIE_SPEED_50))
return;
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
@@ -4477,23 +4251,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RV635)) {
/* advertise upconfig capability */
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
LC_RECONFIG_ARC_MISSING_ESCAPE);
link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
} else {
link_width_cntl |= LC_UPCONFIGURE_DIS;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
@@ -4514,7 +4288,7 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
tmp = RREG32(0x541c);
WREG32(0x541c, tmp | 0x8);
@@ -4528,39 +4302,39 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
if ((rdev->family == CHIP_RV670) ||
(rdev->family == CHIP_RV620) ||
(rdev->family == CHIP_RV635)) {
- training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL);
+ training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
training_cntl &= ~LC_POINT_7_PLUS_EN;
- WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl);
+ WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
} else {
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
}
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
/**
- * r600_get_gpu_clock_counter - return GPU clock counter snapshot
+ * r600_get_gpu_clock - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (R6xx-cayman).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
{
uint64_t clock;
diff --git a/sys/dev/pci/drm/radeon/r600_blit_shaders.c b/sys/dev/pci/drm/radeon/r600_blit_shaders.c
index 2497bad5fec..96abf766810 100644
--- a/sys/dev/pci/drm/radeon/r600_blit_shaders.c
+++ b/sys/dev/pci/drm/radeon/r600_blit_shaders.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: r600_blit_shaders.c,v 1.4 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2009 Advanced Micro Devices, Inc.
*
@@ -24,13 +25,14 @@
* Alex Deucher <alexander.deucher@amd.com>
*/
-#include <dev/pci/drm/drm_linux.h>
+#include <sys/param.h>
+#include <dev/pci/drm/drmP.h>
/*
* R6xx+ cards need to use the 3D engine to blit data which requires
* quite a bit of hw state setup. Rather than pull the whole 3D driver
* (which normally generates the 3D state) into the DRM, we opt to use
- * statically generated state tables. The register state and shaders
+ * statically generated state tables. The regsiter state and shaders
* were hand generated to support blitting functionality. See the 3D
* driver or documentation for descriptions of the registers and
* shader instructions.
diff --git a/sys/dev/pci/drm/radeon/r600_blit_shaders.h b/sys/dev/pci/drm/radeon/r600_blit_shaders.h
index f437d36dd98..7cb1ac4cf7e 100644
--- a/sys/dev/pci/drm/radeon/r600_blit_shaders.h
+++ b/sys/dev/pci/drm/radeon/r600_blit_shaders.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: r600_blit_shaders.h,v 1.3 2018/04/20 16:09:36 deraadt Exp $ */
/*
* Copyright 2009 Advanced Micro Devices, Inc.
* Copyright 2009 Red Hat Inc.
@@ -35,4 +36,5 @@ extern const u32 r6xx_default_state[];
extern const u32 r6xx_ps_size, r6xx_vs_size;
extern const u32 r6xx_default_size, r7xx_default_size;
+__pure uint32_t int2float(uint32_t x);
#endif
diff --git a/sys/dev/pci/drm/radeon/r600_cs.c b/sys/dev/pci/drm/radeon/r600_cs.c
index 8004b49f542..4fb1557d05c 100644
--- a/sys/dev/pci/drm/radeon/r600_cs.c
+++ b/sys/dev/pci/drm/radeon/r600_cs.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: r600_cs.c,v 1.9 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -30,7 +31,12 @@
#include "r600d.h"
#include "r600_reg_safe.h"
-static int r600_nomm;
+static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
+static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc);
+typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
+static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
@@ -781,29 +787,170 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
/**
- * r600_cs_packet_parse_vline() - parse userspace VLINE packet
+ * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet informations
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+static int r600_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = CP_PACKET_GET_TYPE(header);
+ pkt->count = CP_PACKET_GET_COUNT(header);
+ pkt->one_reg_wr = 0;
+ switch (pkt->type) {
+ case PACKET_TYPE0:
+ pkt->reg = CP_PACKET0_GET_REG(header);
+ break;
+ case PACKET_TYPE3:
+ pkt->opcode = CP_PACKET3_GET_OPCODE(header);
+ break;
+ case PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
* @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
*
- * This is an R600-specific function for parsing VLINE packets.
- * Real work is done by r600_cs_common_vline_parse function.
- * Here we just set up ASIC-specific register table and call
- * the common implementation function.
- */
-static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = r600_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}
+
+/**
+ * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = r600_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return r;
+ }
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ return -EINVAL;
+ }
+ *cs_reloc = p->relocs;
+ (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
+ (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+ return 0;
+}
+
+/**
+ * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
+ * @parser: parser structure holding parsing context.
+ *
+ * Check next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
{
- static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
- AVIVO_D2MODE_VLINE_START_END};
- static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
- AVIVO_D2MODE_VLINE_STATUS};
+ struct radeon_cs_packet p3reloc;
+ int r;
- return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
+ r = r600_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r) {
+ return 0;
+ }
+ if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
+ return 0;
+ }
+ return 1;
}
/**
- * r600_cs_common_vline_parse() - common vline parser
+ * r600_cs_packet_next_vline() - parse userspace VLINE packet
* @parser: parser structure holding parsing context.
- * @vline_start_end: table of vline_start_end registers
- * @vline_status: table of vline_status registers
*
* Userspace sends a special sequence for VLINE waits.
* PACKET0 - VLINE_START_END + value
@@ -813,17 +960,11 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
* This function parses this and relocates the VLINE START END
* and WAIT_REG_MEM packets to the correct crtc.
* It also detects a switched off crtc and nulls out the
- * wait in that case. This function is common for all ASICs that
- * are R600 and newer. The parsing algorithm is the same, and only
- * differs in which registers are used.
- *
- * Caller is the ASIC-specific function which passes the parser
- * context and ASIC-specific register table
+ * wait in that case.
*/
-int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
- uint32_t *vline_start_end,
- uint32_t *vline_status)
+static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
+ struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_cs_packet p3reloc, wait_reg_mem;
@@ -835,12 +976,12 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
ib = p->ib.ptr;
/* parse the WAIT_REG_MEM */
- r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
+ r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
if (r)
return r;
/* check its a WAIT_REG_MEM */
- if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
+ if (wait_reg_mem.type != PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
return -EINVAL;
@@ -849,12 +990,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
- return -EINVAL;
- }
- /* bit 8 is me (0) or pfp (1) */
- if (wait_reg_mem_info & 0x100) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
+ DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
return -EINVAL;
}
/* waiting for value to be equal */
@@ -862,18 +998,18 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
return -EINVAL;
}
- if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
+ if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
return -EINVAL;
}
- if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
+ if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
return -EINVAL;
}
/* jump over the NOP */
- r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
+ r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
if (r)
return r;
@@ -883,18 +1019,19 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
- reg = R600_CP_PACKET0_GET_REG(header);
+ reg = CP_PACKET0_GET_REG(header);
- crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
- if (!crtc) {
+ obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- return -ENOENT;
+ return -EINVAL;
}
+ crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
crtc_id = radeon_crtc->crtc_id;
if (!crtc->enabled) {
- /* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
+ /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
ib[h_idx + 2] = PACKET2(0);
ib[h_idx + 3] = PACKET2(0);
ib[h_idx + 4] = PACKET2(0);
@@ -902,15 +1039,20 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
ib[h_idx + 6] = PACKET2(0);
ib[h_idx + 7] = PACKET2(0);
ib[h_idx + 8] = PACKET2(0);
- } else if (reg == vline_start_end[0]) {
- header &= ~R600_CP_PACKET0_REG_MASK;
- header |= vline_start_end[crtc_id] >> 2;
+ } else if (crtc_id == 1) {
+ switch (reg) {
+ case AVIVO_D1MODE_VLINE_START_END:
+ header &= ~R600_CP_PACKET0_REG_MASK;
+ header |= AVIVO_D2MODE_VLINE_START_END >> 2;
+ break;
+ default:
+ DRM_ERROR("unknown crtc reloc\n");
+ return -EINVAL;
+ }
ib[h_idx] = header;
- ib[h_idx + 4] = vline_status[crtc_id] >> 2;
- } else {
- DRM_ERROR("unknown crtc reloc\n");
- return -EINVAL;
+ ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
}
+
return 0;
}
@@ -968,7 +1110,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct r600_cs_track *track = (struct r600_cs_track *)p->track;
- struct radeon_bo_list *reloc;
+ struct radeon_cs_reloc *reloc;
u32 m, i, tmp, *ib;
int r;
@@ -1013,13 +1155,13 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_GSTMP_RING_BASE:
case SQ_PSTMP_RING_BASE:
case SQ_VSTMP_RING_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SQ_CONFIG:
track->sq_config = radeon_get_ib_value(p, idx);
@@ -1030,8 +1172,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case R_028010_DB_DEPTH_INFO:
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- radeon_cs_packet_next_is_pkt3_nop(p)) {
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r600_cs_packet_next_is_pkt3_nop(p)) {
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1040,7 +1182,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_depth_info = radeon_get_ib_value(p, idx);
ib[idx] &= C_028010_ARRAY_MODE;
track->db_depth_info &= C_028010_ARRAY_MODE;
- if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
} else {
@@ -1073,7 +1215,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_1:
case VGT_STRMOUT_BUFFER_BASE_2:
case VGT_STRMOUT_BUFFER_BASE_3:
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1081,9 +1223,9 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->vgt_strmout_bo[tmp] = reloc->robj;
- track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset;
+ track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
track->streamout_dirty = true;
break;
case VGT_STRMOUT_BUFFER_SIZE_0:
@@ -1096,13 +1238,13 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->streamout_dirty = true;
break;
case CP_COHER_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case R_028238_CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
@@ -1131,18 +1273,18 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
- radeon_cs_packet_next_is_pkt3_nop(p)) {
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r600_cs_packet_next_is_pkt3_nop(p)) {
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (reloc->tiling_flags & RADEON_TILING_MACRO) {
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
- } else if (reloc->tiling_flags & RADEON_TILING_MICRO) {
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
}
@@ -1195,7 +1337,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280F8_CB_COLOR6_FRAG:
case R_0280FC_CB_COLOR7_FRAG:
tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
- if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
if (!track->cb_color_base_last[tmp]) {
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
@@ -1204,14 +1346,14 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
track->cb_color_frag_bo[tmp] = reloc->robj;
track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
}
if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
track->cb_dirty = true;
@@ -1226,7 +1368,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280D8_CB_COLOR6_TILE:
case R_0280DC_CB_COLOR7_TILE:
tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
- if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
if (!track->cb_color_base_last[tmp]) {
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
return -EINVAL;
@@ -1235,14 +1377,14 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
ib[idx] = track->cb_color_base_last[tmp];
} else {
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
return -EINVAL;
}
track->cb_color_tile_bo[tmp] = reloc->robj;
track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
}
if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
track->cb_dirty = true;
@@ -1270,7 +1412,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR5_BASE:
case CB_COLOR6_BASE:
case CB_COLOR7_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
@@ -1278,34 +1420,34 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
}
tmp = (reg - CB_COLOR0_BASE) / 4;
track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
- track->cb_color_bo_mc[tmp] = reloc->gpu_offset;
+ track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
track->cb_dirty = true;
break;
case DB_DEPTH_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
track->db_offset = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_bo = reloc->robj;
- track->db_bo_mc = reloc->gpu_offset;
+ track->db_bo_mc = reloc->lobj.gpu_offset;
track->db_dirty = true;
break;
case DB_HTILE_DATA_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
track->htile_offset = radeon_get_ib_value(p, idx) << 8;
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->htile_bo = reloc->robj;
track->db_dirty = true;
break;
@@ -1368,22 +1510,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_VS_13:
case SQ_ALU_CONST_CACHE_VS_14:
case SQ_ALU_CONST_CACHE_VS_15:
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SX_MEMORY_EXPORT_BASE:
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONFIG_REG "
"0x%04X\n", reg);
return -EINVAL;
}
- ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SX_MISC:
track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
@@ -1625,7 +1767,7 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_bo_list *reloc;
+ struct radeon_cs_reloc *reloc;
struct r600_cs_track *track;
volatile u32 *ib;
unsigned idx;
@@ -1663,13 +1805,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET PREDICATION\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
(idx_value & 0xfffffff0) +
((u64)(tmp & 0xff) << 32);
@@ -1704,13 +1846,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad DRAW_INDEX\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
idx_value +
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
@@ -1756,21 +1898,18 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x10) {
uint64_t offset;
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad WAIT_REG_MEM\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
ib[idx+2] = upper_32_bits(offset) & 0xff;
- } else if (idx_value & 0x100) {
- DRM_ERROR("cannot use PFP on REG wait\n");
- return -EINVAL;
}
break;
case PACKET3_CP_DMA:
@@ -1793,7 +1932,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
/* src address space is memory */
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad CP DMA SRC\n");
return -EINVAL;
@@ -1802,7 +1941,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
tmp = radeon_get_ib_value(p, idx) +
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
- offset = reloc->gpu_offset + tmp;
+ offset = reloc->lobj.gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
@@ -1823,7 +1962,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("CP DMA DAIC only supported for registers\n");
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad CP DMA DST\n");
return -EINVAL;
@@ -1832,7 +1971,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
tmp = radeon_get_ib_value(p, idx+2) +
((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
- offset = reloc->gpu_offset + tmp;
+ offset = reloc->lobj.gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
@@ -1853,12 +1992,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* 0xffffffff/0x0 is flush all cache flag */
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
radeon_get_ib_value(p, idx + 2) != 0) {
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SURFACE_SYNC\n");
return -EINVAL;
}
- ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
}
break;
case PACKET3_EVENT_WRITE:
@@ -1869,12 +2008,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (pkt->count) {
uint64_t offset;
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -1890,13 +2029,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad EVENT_WRITE\n");
return -EINVAL;
}
- offset = reloc->gpu_offset +
+ offset = reloc->lobj.gpu_offset +
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
@@ -1956,32 +2095,32 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
}
- base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
- if (reloc->tiling_flags & RADEON_TILING_MACRO)
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
- else if (reloc->tiling_flags & RADEON_TILING_MICRO)
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
}
texture = reloc->robj;
/* tex mip base */
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
}
- mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
mipmap = reloc->robj;
r = r600_check_texture_resource(p, idx+(i*7)+1,
texture, mipmap,
base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
- reloc->tiling_flags);
+ reloc->lobj.tiling_flags);
if (r)
return r;
ib[idx+1+(i*7)+2] += base_offset;
@@ -1991,7 +2130,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset64;
/* vtx base */
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
@@ -2005,7 +2144,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
}
- offset64 = reloc->gpu_offset + offset;
+ offset64 = reloc->lobj.gpu_offset + offset;
ib[idx+1+(i*8)+0] = offset64;
ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
(upper_32_bits(offset64) & 0xff);
@@ -2092,7 +2231,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
u64 offset;
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
return -EINVAL;
@@ -2115,7 +2254,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
}
break;
case PACKET3_SURFACE_BASE_UPDATE:
@@ -2136,7 +2275,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* Updating memory at DST_ADDRESS. */
if (idx_value & 0x1) {
u64 offset;
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
@@ -2148,14 +2287,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->gpu_offset;
+ offset += reloc->lobj.gpu_offset;
ib[idx+1] = offset;
ib[idx+2] = upper_32_bits(offset) & 0xff;
}
/* Reading data from SRC_ADDRESS. */
if (((idx_value >> 1) & 0x3) == 2) {
u64 offset;
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
@@ -2167,7 +2306,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->gpu_offset;
+ offset += reloc->lobj.gpu_offset;
ib[idx+3] = offset;
ib[idx+4] = upper_32_bits(offset) & 0xff;
}
@@ -2180,7 +2319,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
@@ -2196,7 +2335,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->gpu_offset;
+ offset += reloc->lobj.gpu_offset;
ib[idx+0] = offset;
ib[idx+1] = upper_32_bits(offset) & 0xff;
break;
@@ -2209,7 +2348,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x1) {
u64 offset;
/* SRC is memory. */
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad COPY_DW (missing src reloc)\n");
return -EINVAL;
@@ -2221,7 +2360,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->gpu_offset;
+ offset += reloc->lobj.gpu_offset;
ib[idx+1] = offset;
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else {
@@ -2233,7 +2372,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if (idx_value & 0x2) {
u64 offset;
/* DST is memory. */
- r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
+ r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
@@ -2245,7 +2384,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
- offset += reloc->gpu_offset;
+ offset += reloc->lobj.gpu_offset;
ib[idx+3] = offset;
ib[idx+4] = upper_32_bits(offset) & 0xff;
} else {
@@ -2288,7 +2427,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
p->track = track;
}
do {
- r = radeon_cs_packet_parse(p, &pkt, p->idx);
+ r = r600_cs_packet_parse(p, &pkt, p->idx);
if (r) {
kfree(p->track);
p->track = NULL;
@@ -2296,12 +2435,12 @@ int r600_cs_parse(struct radeon_cs_parser *p)
}
p->idx += pkt.count + 2;
switch (pkt.type) {
- case RADEON_PACKET_TYPE0:
+ case PACKET_TYPE0:
r = r600_cs_parse_packet0(p, &pkt);
break;
- case RADEON_PACKET_TYPE2:
+ case PACKET_TYPE2:
break;
- case RADEON_PACKET_TYPE3:
+ case PACKET_TYPE3:
r = r600_packet3_check(p, &pkt);
break;
default:
@@ -2315,7 +2454,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
p->track = NULL;
return r;
}
- } while (p->idx < p->chunk_ib->length_dw);
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
for (r = 0; r < p->ib.length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2327,7 +2466,19 @@ int r600_cs_parse(struct radeon_cs_parser *p)
return 0;
}
-#ifdef CONFIG_DRM_RADEON_UMS
+/* don't these need UMS functions */
+#if 0
+static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
+{
+ if (p->chunk_relocs_idx == -1) {
+ return 0;
+ }
+ p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ if (p->relocs == NULL) {
+ return -ENOMEM;
+ }
+ return 0;
+}
/**
* cs_parser_fini() - clean parser states
@@ -2342,24 +2493,17 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
unsigned i;
kfree(parser->relocs);
- for (i = 0; i < parser->nchunks; i++)
- drm_free_large(parser->chunks[i].kdata);
+ for (i = 0; i < parser->nchunks; i++) {
+ kfree(parser->chunks[i].kdata);
+ if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
+ }
kfree(parser->chunks);
kfree(parser->chunks_array);
}
-static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
-{
- if (p->chunk_relocs == NULL) {
- return 0;
- }
- p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
- if (p->relocs == NULL) {
- return -ENOMEM;
- }
- return 0;
-}
-
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
unsigned family, u32 *ib, int *l)
{
@@ -2377,7 +2521,9 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
- parser.dev = &dev->pdev->dev;
+#ifdef notyet
+ parser.dev = dev;
+#endif
parser.rdev = NULL;
parser.family = family;
parser.track = track;
@@ -2397,15 +2543,16 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
/* Copy the packet into the IB, the parser will read from the
* input memory (cached) and write to the IB (which can be
* uncached). */
- ib_chunk = parser.chunk_ib;
+ ib_chunk = &parser.chunks[parser.chunk_ib_idx];
parser.ib.length_dw = ib_chunk->length_dw;
*l = parser.ib.length_dw;
- if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
- r = -EFAULT;
+ r = r600_cs_parse(&parser);
+ if (r) {
+ DRM_ERROR("Invalid command stream !\n");
r600_cs_parser_fini(&parser, r);
return r;
}
- r = r600_cs_parse(&parser);
+ r = radeon_cs_finish_pages(&parser);
if (r) {
DRM_ERROR("Invalid command stream !\n");
r600_cs_parser_fini(&parser, r);
@@ -2414,14 +2561,13 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
r600_cs_parser_fini(&parser, r);
return r;
}
+#endif
void r600_cs_legacy_init(void)
{
- r600_nomm = 1;
+ r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
}
-#endif
-
/*
* DMA
*/
@@ -2434,24 +2580,24 @@ void r600_cs_legacy_init(void)
* GPU offset using the provided start.
**/
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
- struct radeon_bo_list **cs_reloc)
+ struct radeon_cs_reloc **cs_reloc)
{
struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
*cs_reloc = NULL;
- if (p->chunk_relocs == NULL) {
+ if (p->chunk_relocs_idx == -1) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
- relocs_chunk = p->chunk_relocs;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
idx = p->dma_reloc_idx;
if (idx >= p->nrelocs) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, p->nrelocs);
return -EINVAL;
}
- *cs_reloc = &p->relocs[idx];
+ *cs_reloc = p->relocs_ptr[idx];
p->dma_reloc_idx++;
return 0;
}
@@ -2471,8 +2617,8 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
**/
int r600_dma_cs_parse(struct radeon_cs_parser *p)
{
- struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
- struct radeon_bo_list *src_reloc, *dst_reloc;
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_reloc *src_reloc, *dst_reloc;
u32 header, cmd, count, tiled;
volatile u32 *ib = p->ib.ptr;
u32 idx, idx_value;
@@ -2502,14 +2648,14 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
p->idx += count + 5;
} else {
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
- ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
p->idx += count + 3;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
@@ -2536,22 +2682,22 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
/* tiled src, linear dst */
src_offset = radeon_get_ib_value(p, idx+1);
src_offset <<= 8;
- ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
+ ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
dst_offset = radeon_get_ib_value(p, idx+5);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
- ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
+ ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
} else {
/* linear src, tiled dst */
src_offset = radeon_get_ib_value(p, idx+5);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
- ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
- ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+ ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset <<= 8;
- ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
}
p->idx += 7;
} else {
@@ -2561,10 +2707,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
- ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
- ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
p->idx += 5;
} else {
src_offset = radeon_get_ib_value(p, idx+2);
@@ -2572,10 +2718,10 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
- ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
- ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
- ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
p->idx += 4;
}
}
@@ -2607,8 +2753,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
- ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
- ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
+ ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
+ ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
p->idx += 4;
break;
case DMA_PACKET_NOP:
@@ -2618,7 +2764,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
- } while (p->idx < p->chunk_ib->length_dw);
+ } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
#if 0
for (r = 0; r < p->ib->length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
diff --git a/sys/dev/pci/drm/radeon/r600_hdmi.c b/sys/dev/pci/drm/radeon/r600_hdmi.c
index 07df68f80b8..9871a2e00f6 100644
--- a/sys/dev/pci/drm/radeon/r600_hdmi.c
+++ b/sys/dev/pci/drm/radeon/r600_hdmi.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: r600_hdmi.c,v 1.4 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -23,12 +24,10 @@
*
* Authors: Christian König
*/
-#include <dev/pci/drm/linux_hdmi.h>
#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
-#include "radeon_audio.h"
#include "r600d.h"
#include "atom.h"
@@ -55,168 +54,154 @@ enum r600_hdmi_iec_status_bits {
AUDIO_STATUS_LEVEL = 0x80
};
-static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
-{
- struct r600_audio_pin status;
- uint32_t value;
-
- value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
-
- /* number of channels */
- status.channels = (value & 0x7) + 1;
-
- /* bits per sample */
- switch ((value & 0xF0) >> 4) {
- case 0x0:
- status.bits_per_sample = 8;
- break;
- case 0x1:
- status.bits_per_sample = 16;
- break;
- case 0x2:
- status.bits_per_sample = 20;
- break;
- case 0x3:
- status.bits_per_sample = 24;
- break;
- case 0x4:
- status.bits_per_sample = 32;
- break;
- default:
- dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
- (int)value);
- status.bits_per_sample = 16;
- }
-
- /* current sampling rate in HZ */
- if (value & 0x4000)
- status.rate = 44100;
- else
- status.rate = 48000;
- status.rate *= ((value >> 11) & 0x7) + 1;
- status.rate /= ((value >> 8) & 0x7) + 1;
-
- value = RREG32(R600_AUDIO_STATUS_BITS);
-
- /* iec 60958 status bits */
- status.status_bits = value & 0xff;
-
- /* iec 60958 category code */
- status.category_code = (value >> 8) & 0xff;
-
- return status;
-}
+static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
+ /* 32kHz 44.1kHz 48kHz */
+ /* Clock N CTS N CTS N CTS */
+ { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
+ { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
+ { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
+ { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
+ { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
+ { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
+ { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
+ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
+ { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
+ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
+ { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
+};
/*
- * update all hdmi interfaces with current audio parameters
+ * calculate CTS value if it's not found in the table
*/
-void r600_audio_update_hdmi(struct work_struct *work)
+static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
{
- struct radeon_device *rdev = container_of(work, struct radeon_device,
- audio_work);
- struct drm_device *dev = rdev->ddev;
- struct r600_audio_pin audio_status = r600_audio_status(rdev);
- struct drm_encoder *encoder;
- bool changed = false;
-
- if (rdev->audio.pin[0].channels != audio_status.channels ||
- rdev->audio.pin[0].rate != audio_status.rate ||
- rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
- rdev->audio.pin[0].status_bits != audio_status.status_bits ||
- rdev->audio.pin[0].category_code != audio_status.category_code) {
- rdev->audio.pin[0] = audio_status;
- changed = true;
- }
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (!radeon_encoder_is_digital(encoder))
- continue;
- if (changed || r600_hdmi_buffer_status_changed(encoder))
- r600_hdmi_update_audio_settings(encoder);
- }
+ if (*CTS == 0)
+ *CTS = clock * N / (128 * freq) * 1000;
+ DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
+ N, *CTS, freq);
}
-/* enable the audio stream */
-void r600_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- u8 enable_mask)
+struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
{
- u32 tmp = RREG32(AZ_HOT_PLUG_CONTROL);
+ struct radeon_hdmi_acr res;
+ u8 i;
- if (!pin)
- return;
+ for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
+ r600_hdmi_predefined_acr[i].clock != 0; i++)
+ ;
+ res = r600_hdmi_predefined_acr[i];
- if (enable_mask) {
- tmp |= AUDIO_ENABLED;
- if (enable_mask & 1)
- tmp |= PIN0_AUDIO_ENABLED;
- if (enable_mask & 2)
- tmp |= PIN1_AUDIO_ENABLED;
- if (enable_mask & 4)
- tmp |= PIN2_AUDIO_ENABLED;
- if (enable_mask & 8)
- tmp |= PIN3_AUDIO_ENABLED;
- } else {
- tmp &= ~(AUDIO_ENABLED |
- PIN0_AUDIO_ENABLED |
- PIN1_AUDIO_ENABLED |
- PIN2_AUDIO_ENABLED |
- PIN3_AUDIO_ENABLED);
- }
-
- WREG32(AZ_HOT_PLUG_CONTROL, tmp);
-}
+ /* In case some CTS are missing */
+ r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
+ r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
+ r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
-struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
-{
- /* only one pin on 6xx-NI */
- return &rdev->audio.pin[0];
+ return res;
}
-void r600_hdmi_update_acr(struct drm_encoder *encoder, long offset,
- const struct radeon_hdmi_acr *acr)
+/*
+ * update the N and CTS parameters for a given pixel clock rate
+ */
+static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
+ WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
+
+ WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
+ WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
+
+ WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
+ WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
+}
- /* DCE 3.0 uses register that's normally for CRC_CONTROL */
- uint32_t acr_ctl = ASIC_IS_DCE3(rdev) ? DCE3_HDMI0_ACR_PACKET_CONTROL :
- HDMI0_ACR_PACKET_CONTROL;
- WREG32_P(acr_ctl + offset,
- HDMI0_ACR_SOURCE | /* select SW CTS value */
- HDMI0_ACR_AUTO_SEND, /* allow hw to sent ACR packets when required */
- ~(HDMI0_ACR_SOURCE |
- HDMI0_ACR_AUTO_SEND));
-
- WREG32_P(HDMI0_ACR_32_0 + offset,
- HDMI0_ACR_CTS_32(acr->cts_32khz),
- ~HDMI0_ACR_CTS_32_MASK);
- WREG32_P(HDMI0_ACR_32_1 + offset,
- HDMI0_ACR_N_32(acr->n_32khz),
- ~HDMI0_ACR_N_32_MASK);
-
- WREG32_P(HDMI0_ACR_44_0 + offset,
- HDMI0_ACR_CTS_44(acr->cts_44_1khz),
- ~HDMI0_ACR_CTS_44_MASK);
- WREG32_P(HDMI0_ACR_44_1 + offset,
- HDMI0_ACR_N_44(acr->n_44_1khz),
- ~HDMI0_ACR_N_44_MASK);
-
- WREG32_P(HDMI0_ACR_48_0 + offset,
- HDMI0_ACR_CTS_48(acr->cts_48khz),
- ~HDMI0_ACR_CTS_48_MASK);
- WREG32_P(HDMI0_ACR_48_1 + offset,
- HDMI0_ACR_N_48(acr->n_48khz),
- ~HDMI0_ACR_N_48_MASK);
+/*
+ * calculate the crc for a given info frame
+ */
+static void r600_hdmi_infoframe_checksum(uint8_t packetType,
+ uint8_t versionNumber,
+ uint8_t length,
+ uint8_t *frame)
+{
+ int i;
+ frame[0] = packetType + versionNumber + length;
+ for (i = 1; i <= length; i++)
+ frame[0] += frame[i];
+ frame[0] = 0x100 - frame[0];
}
/*
* build a HDMI Video Info Frame
*/
-void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
- unsigned char *buffer, size_t size)
+static void r600_hdmi_videoinfoframe(
+ struct drm_encoder *encoder,
+ enum r600_hdmi_color_format color_format,
+ int active_information_present,
+ uint8_t active_format_aspect_ratio,
+ uint8_t scan_information,
+ uint8_t colorimetry,
+ uint8_t ex_colorimetry,
+ uint8_t quantization,
+ int ITC,
+ uint8_t picture_aspect_ratio,
+ uint8_t video_format_identification,
+ uint8_t pixel_repetition,
+ uint8_t non_uniform_picture_scaling,
+ uint8_t bar_info_data_valid,
+ uint16_t top_bar,
+ uint16_t bottom_bar,
+ uint16_t left_bar,
+ uint16_t right_bar
+)
{
- uint8_t *frame = buffer + 3;
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset = dig->afmt->offset;
+
+ uint8_t frame[14];
+
+ frame[0x0] = 0;
+ frame[0x1] =
+ (scan_information & 0x3) |
+ ((bar_info_data_valid & 0x3) << 2) |
+ ((active_information_present & 0x1) << 4) |
+ ((color_format & 0x3) << 5);
+ frame[0x2] =
+ (active_format_aspect_ratio & 0xF) |
+ ((picture_aspect_ratio & 0x3) << 4) |
+ ((colorimetry & 0x3) << 6);
+ frame[0x3] =
+ (non_uniform_picture_scaling & 0x3) |
+ ((quantization & 0x3) << 2) |
+ ((ex_colorimetry & 0x7) << 4) |
+ ((ITC & 0x1) << 7);
+ frame[0x4] = (video_format_identification & 0x7F);
+ frame[0x5] = (pixel_repetition & 0xF);
+ frame[0x6] = (top_bar & 0xFF);
+ frame[0x7] = (top_bar >> 8);
+ frame[0x8] = (bottom_bar & 0xFF);
+ frame[0x9] = (bottom_bar >> 8);
+ frame[0xA] = (left_bar & 0xFF);
+ frame[0xB] = (left_bar >> 8);
+ frame[0xC] = (right_bar & 0xFF);
+ frame[0xD] = (right_bar >> 8);
+
+ r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+ /* Our header values (type, version, length) should be alright, Intel
+ * is using the same. Checksum function also seems to be OK, it works
+ * fine for audio infoframe. However calculated value is always lower
+ * by 2 in comparison to fglrx. It breaks displaying anything in case
+ * of TVs that strictly check the checksum. Hack it manually here to
+ * workaround this issue. */
+ frame[0x0] += 2;
WREG32(HDMI0_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -225,29 +210,45 @@ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
WREG32(HDMI0_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(HDMI0_AVI_INFO3 + offset,
- frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
-
- WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
- HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
-
- WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
- HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
-
+ frame[0xC] | (frame[0xD] << 8));
}
/*
* build a Audio Info Frame
*/
-static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder,
- const void *buffer, size_t size)
+static void r600_hdmi_audioinfoframe(
+ struct drm_encoder *encoder,
+ uint8_t channel_count,
+ uint8_t coding_type,
+ uint8_t sample_size,
+ uint8_t sample_frequency,
+ uint8_t format,
+ uint8_t channel_allocation,
+ uint8_t level_shift,
+ int downmix_inhibit
+)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
- const u8 *frame = buffer + 3;
+
+ uint8_t frame[11];
+
+ frame[0x0] = 0;
+ frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
+ frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
+ frame[0x3] = format;
+ frame[0x4] = channel_allocation;
+ frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
+ frame[0x6] = 0;
+ frame[0x7] = 0;
+ frame[0x8] = 0;
+ frame[0x9] = 0;
+ frame[0xA] = 0;
+
+ r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
WREG32(HDMI0_AUDIO_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -291,7 +292,7 @@ int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
/*
* write the audio workaround status to the hardware
*/
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
+static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -310,102 +311,86 @@ void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
value, ~HDMI0_AUDIO_TEST_EN);
}
-void r600_hdmi_audio_set_dto(struct radeon_device *rdev,
- struct radeon_crtc *crtc, unsigned int clock)
+
+/*
+ * update the info frames with the data from the current display mode
+ */
+void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
- struct radeon_encoder *radeon_encoder;
- struct radeon_encoder_atom_dig *dig;
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
- if (!crtc)
+ if (!dig || !dig->afmt)
return;
- radeon_encoder = to_radeon_encoder(crtc->encoder);
- dig = radeon_encoder->enc_priv;
-
- if (!dig)
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (!dig->afmt->enabled)
return;
+ offset = dig->afmt->offset;
+
+ r600_audio_set_clock(encoder, mode->clock);
- if (dig->dig_encoder == 0) {
- WREG32(DCCG_AUDIO_DTO0_PHASE, 24000 * 100);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
- WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND); /* send null packets when required */
+
+ WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
+
+ if (ASIC_IS_DCE32(rdev)) {
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+ WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
+ AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
} else {
- WREG32(DCCG_AUDIO_DTO1_PHASE, 24000 * 100);
- WREG32(DCCG_AUDIO_DTO1_MODULE, clock * 100);
- WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
+ HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
+ HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
+ HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
+ HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
}
-}
-void r600_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
+ WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+ HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
+ HDMI0_ACR_SOURCE); /* select SW CTS value */
- WREG32_OR(HDMI0_VBI_PACKET_CONTROL + offset,
- HDMI0_NULL_SEND | /* send null packets when required */
- HDMI0_GC_SEND | /* send general control packets */
- HDMI0_GC_CONT); /* send general control packets every frame */
-}
+ WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
+ HDMI0_NULL_SEND | /* send null packets when required */
+ HDMI0_GC_SEND | /* send general control packets */
+ HDMI0_GC_CONT); /* send general control packets every frame */
-void r600_set_audio_packet(struct drm_encoder *encoder, u32 offset)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
+ /* TODO: HDMI0_AUDIO_INFO_UPDATE */
+ WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
+ HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
- WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
- HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
- HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
- HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
- HDMI0_60958_CS_UPDATE, /* allow 60958 channel status fields to be updated */
- ~(HDMI0_AUDIO_SAMPLE_SEND |
- HDMI0_AUDIO_DELAY_EN_MASK |
- HDMI0_AUDIO_PACKETS_PER_LINE_MASK |
- HDMI0_60958_CS_UPDATE));
-
- WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
- HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
- HDMI0_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
-
- WREG32_P(HDMI0_INFOFRAME_CONTROL1 + offset,
- HDMI0_AUDIO_INFO_LINE(2), /* anything other than 0 */
- ~HDMI0_AUDIO_INFO_LINE_MASK);
-
- WREG32_AND(HDMI0_GENERIC_PACKET_CONTROL + offset,
- ~(HDMI0_GENERIC0_SEND |
- HDMI0_GENERIC0_CONT |
- HDMI0_GENERIC0_UPDATE |
- HDMI0_GENERIC1_SEND |
- HDMI0_GENERIC1_CONT |
- HDMI0_GENERIC0_LINE_MASK |
- HDMI0_GENERIC1_LINE_MASK));
-
- WREG32_P(HDMI0_60958_0 + offset,
- HDMI0_60958_CS_CHANNEL_NUMBER_L(1),
- ~(HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK |
- HDMI0_60958_CS_CLOCK_ACCURACY_MASK));
-
- WREG32_P(HDMI0_60958_1 + offset,
- HDMI0_60958_CS_CHANNEL_NUMBER_R(2),
- ~HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK);
-}
+ WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
+ HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
-void r600_set_mute(struct drm_encoder *encoder, u32 offset, bool mute)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
+ WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
- if (mute)
- WREG32_OR(HDMI0_GC + offset, HDMI0_GC_AVMUTE);
- else
- WREG32_AND(HDMI0_GC + offset, ~HDMI0_GC_AVMUTE);
+ r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+
+ r600_hdmi_update_ACR(encoder, mode->clock);
+
+ /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
+ WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
+ WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
+ WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
+ WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
+
+ r600_hdmi_audio_workaround(encoder);
}
-/**
- * r600_hdmi_update_audio_settings - Update audio infoframe
- *
- * @encoder: drm encoder
- *
- * Gets info about current audio stream and updates audio infoframe.
+/*
+ * update settings with current parameters from audio engine
*/
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
{
@@ -413,12 +398,9 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct r600_audio_pin audio = r600_audio_status(rdev);
- uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
- struct hdmi_audio_infoframe frame;
+ struct r600_audio audio = r600_audio_status(rdev);
uint32_t offset;
- uint32_t value;
- ssize_t err;
+ uint32_t iec;
if (!dig->afmt || !dig->afmt->enabled)
return;
@@ -430,105 +412,180 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
(int)audio.status_bits, (int)audio.category_code);
- err = hdmi_audio_infoframe_init(&frame);
- if (err < 0) {
- DRM_ERROR("failed to setup audio infoframe\n");
- return;
+ iec = 0;
+ if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
+ iec |= 1 << 0;
+ if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
+ iec |= 1 << 1;
+ if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
+ iec |= 1 << 2;
+ if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
+ iec |= 1 << 3;
+
+ iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
+
+ switch (audio.rate) {
+ case 32000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
+ break;
+ case 44100:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
+ break;
+ case 48000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
+ break;
+ case 88200:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
+ break;
+ case 96000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
+ break;
+ case 176400:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
+ break;
+ case 192000:
+ iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
+ break;
}
- frame.channels = audio.channels;
+ WREG32(HDMI0_60958_0 + offset, iec);
- err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err < 0) {
- DRM_ERROR("failed to pack audio infoframe\n");
- return;
+ iec = 0;
+ switch (audio.bits_per_sample) {
+ case 16:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
+ break;
+ case 20:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
+ break;
+ case 24:
+ iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
+ break;
}
+ if (audio.status_bits & AUDIO_STATUS_V)
+ iec |= 0x5 << 16;
+ WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
- value = RREG32(HDMI0_AUDIO_PACKET_CONTROL + offset);
- if (value & HDMI0_AUDIO_TEST_EN)
- WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
- value & ~HDMI0_AUDIO_TEST_EN);
-
- WREG32_OR(HDMI0_CONTROL + offset,
- HDMI0_ERROR_ACK);
-
- WREG32_AND(HDMI0_INFOFRAME_CONTROL0 + offset,
- ~HDMI0_AUDIO_INFO_SOURCE);
+ r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
+ 0);
- r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
-
- WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
- HDMI0_AUDIO_INFO_CONT |
- HDMI0_AUDIO_INFO_UPDATE);
+ r600_hdmi_audio_workaround(encoder);
}
/*
* enable the HDMI engine
*/
-void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
+void r600_hdmi_enable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- u32 hdmi = HDMI0_ERROR_ACK;
+ uint32_t offset;
+ u32 hdmi;
if (!dig || !dig->afmt)
return;
+ if (ASIC_IS_DCE6(rdev))
+ return;
+
+ /* Silent, r600_hdmi_enable will raise WARN for us */
+ if (dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
/* Older chipsets require setting HDMI and routing manually */
- if (!ASIC_IS_DCE3(rdev)) {
- if (enable)
- hdmi |= HDMI0_ENABLE;
+ if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
+ hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- if (enable) {
- WREG32_OR(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN);
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
- } else {
- WREG32_AND(AVIVO_TMDSA_CNTL, ~AVIVO_TMDSA_CNTL_HDMI_EN);
- }
+ WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
+ ~AVIVO_TMDSA_CNTL_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- if (enable) {
- WREG32_OR(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN);
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
- } else {
- WREG32_AND(AVIVO_LVTMA_CNTL, ~AVIVO_LVTMA_CNTL_HDMI_EN);
- }
+ WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
+ ~AVIVO_LVTMA_CNTL_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
- if (enable) {
- WREG32_OR(DDIA_CNTL, DDIA_HDMI_EN);
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
- } else {
- WREG32_AND(DDIA_CNTL, ~DDIA_HDMI_EN);
- }
+ WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- if (enable)
- hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
+ hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
break;
default:
dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
radeon_encoder->encoder_id);
break;
}
- WREG32(HDMI0_CONTROL + dig->afmt->offset, hdmi);
+ WREG32(HDMI0_CONTROL + offset, hdmi);
}
if (rdev->irq.installed) {
/* if irq is available use it */
- /* XXX: shouldn't need this on any asics. Double check DCE2/3 */
- if (enable)
- radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
- else
- radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
+ radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
}
- dig->afmt->enabled = enable;
+ dig->afmt->enabled = true;
- DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
- enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
+ DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ offset, radeon_encoder->encoder_id);
}
+/*
+ * disable the HDMI engine
+ */
+void r600_hdmi_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ uint32_t offset;
+
+ if (ASIC_IS_DCE6(rdev))
+ return;
+
+ /* Called for ATOM_ENCODER_MODE_HDMI only */
+ if (!dig || !dig->afmt) {
+ return;
+ }
+ if (!dig->afmt->enabled)
+ return;
+ offset = dig->afmt->offset;
+
+ DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ offset, radeon_encoder->encoder_id);
+
+ /* disable irq */
+ radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
+
+ /* Older chipsets not handled by AtomBIOS */
+ if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, 0,
+ ~AVIVO_TMDSA_CNTL_HDMI_EN);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32_P(AVIVO_LVTMA_CNTL, 0,
+ ~AVIVO_LVTMA_CNTL_HDMI_EN);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+ break;
+ default:
+ dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
+ radeon_encoder->encoder_id);
+ break;
+ }
+ WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
+ }
+
+ dig->afmt->enabled = false;
+}
diff --git a/sys/dev/pci/drm/radeon/r600_reg.h b/sys/dev/pci/drm/radeon/r600_reg.h
index 3ef202629e7..afce6e25e43 100644
--- a/sys/dev/pci/drm/radeon/r600_reg.h
+++ b/sys/dev/pci/drm/radeon/r600_reg.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: r600_reg.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -31,12 +32,6 @@
#define R600_PCIE_PORT_INDEX 0x0038
#define R600_PCIE_PORT_DATA 0x003c
-#define R600_RCU_INDEX 0x0100
-#define R600_RCU_DATA 0x0104
-
-#define R600_UVD_CTX_INDEX 0xf4a0
-#define R600_UVD_CTX_DATA 0xf4a4
-
#define R600_MC_VM_FB_LOCATION 0x2180
#define R600_MC_FB_BASE_MASK 0x0000FFFF
#define R600_MC_FB_BASE_SHIFT 0
diff --git a/sys/dev/pci/drm/radeon/r600d.h b/sys/dev/pci/drm/radeon/r600d.h
index 1e8495cca41..c35244263cf 100644
--- a/sys/dev/pci/drm/radeon/r600d.h
+++ b/sys/dev/pci/drm/radeon/r600d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: r600d.h,v 1.5 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2009 Advanced Micro Devices, Inc.
* Copyright 2009 Red Hat Inc.
@@ -44,6 +45,13 @@
#define R6XX_MAX_PIPES 8
#define R6XX_MAX_PIPES_MASK 0xff
+/* PTE flags */
+#define PTE_VALID (1 << 0)
+#define PTE_SYSTEM (1 << 1)
+#define PTE_SNOOPED (1 << 2)
+#define PTE_READABLE (1 << 5)
+#define PTE_WRITEABLE (1 << 6)
+
/* tiling bits */
#define ARRAY_LINEAR_GENERAL 0x00000000
#define ARRAY_LINEAR_ALIGNED 0x00000001
@@ -175,8 +183,6 @@
#define CP_COHER_BASE 0x85F8
#define CP_DEBUG 0xC1FC
#define R_0086D8_CP_ME_CNTL 0x86D8
-#define S_0086D8_CP_PFP_HALT(x) (((x) & 1)<<26)
-#define C_0086D8_CP_PFP_HALT(x) ((x) & 0xFBFFFFFF)
#define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28)
#define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF)
#define CP_ME_RAM_DATA 0xC160
@@ -295,25 +301,10 @@
#define GRBM_SOFT_RESET 0x8020
#define SOFT_RESET_CP (1<<0)
-#define CG_THERMAL_CTRL 0x7F0
-#define DIG_THERM_DPM(x) ((x) << 12)
-#define DIG_THERM_DPM_MASK 0x000FF000
-#define DIG_THERM_DPM_SHIFT 12
#define CG_THERMAL_STATUS 0x7F4
#define ASIC_T(x) ((x) << 0)
#define ASIC_T_MASK 0x1FF
#define ASIC_T_SHIFT 0
-#define CG_THERMAL_INT 0x7F8
-#define DIG_THERM_INTH(x) ((x) << 8)
-#define DIG_THERM_INTH_MASK 0x0000FF00
-#define DIG_THERM_INTH_SHIFT 8
-#define DIG_THERM_INTL(x) ((x) << 16)
-#define DIG_THERM_INTL_MASK 0x00FF0000
-#define DIG_THERM_INTL_SHIFT 16
-#define THERM_INT_MASK_HIGH (1 << 24)
-#define THERM_INT_MASK_LOW (1 << 25)
-
-#define RV770_CG_THERMAL_INT 0x734
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
@@ -323,12 +314,11 @@
#define HDP_TILING_CONFIG 0x2F3C
#define HDP_DEBUG1 0x2F34
-#define MC_CONFIG 0x2000
#define MC_VM_AGP_TOP 0x2184
#define MC_VM_AGP_BOT 0x2188
#define MC_VM_AGP_BASE 0x218C
#define MC_VM_FB_LOCATION 0x2180
-#define MC_VM_L1_TLB_MCB_RD_UVD_CNTL 0x2124
+#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C
#define ENABLE_L1_TLB (1 << 0)
#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
#define ENABLE_L1_STRICT_ORDERING (1 << 2)
@@ -348,14 +338,12 @@
#define EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 7) << 15)
#define EFFECTIVE_L1_QUEUE_SIZE_MASK 0x00038000
#define EFFECTIVE_L1_QUEUE_SIZE_SHIFT 15
-#define MC_VM_L1_TLB_MCD_RD_A_CNTL 0x219C
#define MC_VM_L1_TLB_MCD_RD_B_CNTL 0x21A0
#define MC_VM_L1_TLB_MCB_RD_GFX_CNTL 0x21FC
#define MC_VM_L1_TLB_MCB_RD_HDP_CNTL 0x2204
#define MC_VM_L1_TLB_MCB_RD_PDMA_CNTL 0x2208
#define MC_VM_L1_TLB_MCB_RD_SEM_CNTL 0x220C
#define MC_VM_L1_TLB_MCB_RD_SYS_CNTL 0x2200
-#define MC_VM_L1_TLB_MCB_WR_UVD_CNTL 0x212c
#define MC_VM_L1_TLB_MCD_WR_A_CNTL 0x21A4
#define MC_VM_L1_TLB_MCD_WR_B_CNTL 0x21A8
#define MC_VM_L1_TLB_MCB_WR_GFX_CNTL 0x2210
@@ -369,8 +357,6 @@
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2194
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x2198
-#define RS_DQ_RD_RET_CONF 0x2348
-
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
@@ -600,7 +586,6 @@
#define L2_BUSY (1 << 0)
#define WAIT_UNTIL 0x8040
-#define WAIT_CP_DMA_IDLE_bit (1 << 8)
#define WAIT_2D_IDLE_bit (1 << 14)
#define WAIT_3D_IDLE_bit (1 << 15)
#define WAIT_2D_IDLECLEAN_bit (1 << 16)
@@ -698,19 +683,15 @@
#define RLC_UCODE_ADDR 0x3f2c
#define RLC_UCODE_DATA 0x3f30
+/* new for TN */
+#define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10
+#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
+
#define SRBM_SOFT_RESET 0xe60
-# define SOFT_RESET_BIF (1 << 1)
# define SOFT_RESET_DMA (1 << 12)
# define SOFT_RESET_RLC (1 << 13)
-# define SOFT_RESET_UVD (1 << 18)
# define RV770_SOFT_RESET_DMA (1 << 20)
-#define BIF_SCRATCH0 0x5438
-
-#define BUS_CNTL 0x5420
-# define BIOS_ROM_DIS (1 << 1)
-# define VGA_COHE_SPEC_TIMER_DIS (1 << 9)
-
#define CP_INT_CNTL 0xc124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
@@ -927,37 +908,12 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
-/* Audio */
-#define AZ_HOT_PLUG_CONTROL 0x7300
-# define AZ_FORCE_CODEC_WAKE (1 << 0)
-# define JACK_DETECTION_ENABLE (1 << 4)
-# define UNSOLICITED_RESPONSE_ENABLE (1 << 8)
-# define CODEC_HOT_PLUG_ENABLE (1 << 12)
-# define AUDIO_ENABLED (1 << 31)
-/* DCE3 adds */
-# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
-# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
-# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
-# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
-# define PIN0_AUDIO_ENABLED (1 << 24)
-# define PIN1_AUDIO_ENABLED (1 << 25)
-# define PIN2_AUDIO_ENABLED (1 << 26)
-# define PIN3_AUDIO_ENABLED (1 << 27)
-
-/* Audio clocks DCE 2.0/3.0 */
-#define AUDIO_DTO 0x7340
-# define AUDIO_DTO_PHASE(x) (((x) & 0xffff) << 0)
-# define AUDIO_DTO_MODULE(x) (((x) & 0xffff) << 16)
-
-/* Audio clocks DCE 3.2 */
+/* Audio clocks */
#define DCCG_AUDIO_DTO0_PHASE 0x0514
#define DCCG_AUDIO_DTO0_MODULE 0x0518
#define DCCG_AUDIO_DTO0_LOAD 0x051c
# define DTO_LOAD (1 << 31)
#define DCCG_AUDIO_DTO0_CNTL 0x0520
-# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
-# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
-# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
#define DCCG_AUDIO_DTO1_PHASE 0x0524
#define DCCG_AUDIO_DTO1_MODULE 0x0528
@@ -982,42 +938,6 @@
# define DIG_MODE_SDVO 4
#define DIG1_CNTL 0x79a0
-#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x71bc
-#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
-#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
-#define SPEAKER_ALLOCATION_SHIFT 0
-#define HDMI_CONNECTION (1 << 16)
-#define DP_CONNECTION (1 << 17)
-
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
-#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
-# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
-/* max channels minus one. 7 = 8 channels */
-# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
-# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
-# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
-/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
- * bit0 = 32 kHz
- * bit1 = 44.1 kHz
- * bit2 = 48 kHz
- * bit3 = 88.2 kHz
- * bit4 = 96 kHz
- * bit5 = 176.4 kHz
- * bit6 = 192 kHz
- */
-
/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
* instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
* different due to the new DIG blocks, but also have 2 instances.
@@ -1044,18 +964,15 @@
#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
-# define HDMI0_AUDIO_DELAY_EN_MASK (3 << 4)
# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
# define HDMI0_AUDIO_TEST_EN (1 << 12)
# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
-# define HDMI0_AUDIO_PACKETS_PER_LINE_MASK (0x1f << 16)
# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
# define HDMI0_60958_CS_UPDATE (1 << 26)
# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
#define HDMI0_AUDIO_CRC_CONTROL 0x740c
# define HDMI0_AUDIO_CRC_EN (1 << 0)
-#define DCE3_HDMI0_ACR_PACKET_CONTROL 0x740c
#define HDMI0_VBI_PACKET_CONTROL 0x7410
# define HDMI0_NULL_SEND (1 << 0)
# define HDMI0_GC_SEND (1 << 4)
@@ -1065,16 +982,14 @@
# define HDMI0_AVI_INFO_CONT (1 << 1)
# define HDMI0_AUDIO_INFO_SEND (1 << 4)
# define HDMI0_AUDIO_INFO_CONT (1 << 5)
-# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
+# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
# define HDMI0_MPEG_INFO_SEND (1 << 8)
# define HDMI0_MPEG_INFO_CONT (1 << 9)
# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
#define HDMI0_INFOFRAME_CONTROL1 0x7418
# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
-# define HDMI0_AVI_INFO_LINE_MASK (0x3f << 0)
# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
-# define HDMI0_AUDIO_INFO_LINE_MASK (0x3f << 8)
# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
# define HDMI0_GENERIC0_SEND (1 << 0)
@@ -1083,9 +998,7 @@
# define HDMI0_GENERIC1_SEND (1 << 4)
# define HDMI0_GENERIC1_CONT (1 << 5)
# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
-# define HDMI0_GENERIC0_LINE_MASK (0x3f << 16)
# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
-# define HDMI0_GENERIC1_LINE_MASK (0x3f << 24)
#define HDMI0_GC 0x7428
# define HDMI0_GC_AVMUTE (1 << 0)
#define HDMI0_AVI_INFO0 0x7454
@@ -1141,22 +1054,16 @@
#define HDMI0_GENERIC1_6 0x74a8
#define HDMI0_ACR_32_0 0x74ac
# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
-# define HDMI0_ACR_CTS_32_MASK (0xfffff << 12)
#define HDMI0_ACR_32_1 0x74b0
# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
-# define HDMI0_ACR_N_32_MASK (0xfffff << 0)
#define HDMI0_ACR_44_0 0x74b4
# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
-# define HDMI0_ACR_CTS_44_MASK (0xfffff << 12)
#define HDMI0_ACR_44_1 0x74b8
# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
-# define HDMI0_ACR_N_44_MASK (0xfffff << 0)
#define HDMI0_ACR_48_0 0x74bc
# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
-# define HDMI0_ACR_CTS_48_MASK (0xfffff << 12)
#define HDMI0_ACR_48_1 0x74c0
# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
-# define HDMI0_ACR_N_48_MASK (0xfffff << 0)
#define HDMI0_ACR_STATUS_0 0x74c4
#define HDMI0_ACR_STATUS_1 0x74c8
#define HDMI0_AUDIO_INFO0 0x74cc
@@ -1176,17 +1083,14 @@
# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
-# define HDMI0_60958_CS_CHANNEL_NUMBER_L_MASK (0xf << 20)
# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
-# define HDMI0_60958_CS_CLOCK_ACCURACY_MASK (3 << 28)
#define HDMI0_60958_1 0x74d8
# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
-# define HDMI0_60958_CS_CHANNEL_NUMBER_R_MASK (0xf << 20)
#define HDMI0_ACR_PACKET_CONTROL 0x74dc
# define HDMI0_ACR_SEND (1 << 0)
# define HDMI0_ACR_CONT (1 << 1)
@@ -1197,7 +1101,6 @@
# define HDMI0_ACR_48 3
# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
# define HDMI0_ACR_AUTO_SEND (1 << 12)
-#define DCE3_HDMI0_AUDIO_CRC_CONTROL 0x74dc
#define HDMI0_RAMP_CONTROL0 0x74e0
# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
#define HDMI0_RAMP_CONTROL1 0x74e4
@@ -1238,352 +1141,22 @@
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
-/* DCE3 FMT blocks */
-#define FMT_CONTROL 0x6700
-# define FMT_PIXEL_ENCODING (1 << 16)
- /* 0 = RGB 4:4:4 or YCbCr 4:4:4, 1 = YCbCr 4:2:2 */
-#define FMT_BIT_DEPTH_CONTROL 0x6710
-# define FMT_TRUNCATE_EN (1 << 0)
-# define FMT_TRUNCATE_DEPTH (1 << 4)
-# define FMT_SPATIAL_DITHER_EN (1 << 8)
-# define FMT_SPATIAL_DITHER_MODE(x) ((x) << 9)
-# define FMT_SPATIAL_DITHER_DEPTH (1 << 12)
-# define FMT_FRAME_RANDOM_ENABLE (1 << 13)
-# define FMT_RGB_RANDOM_ENABLE (1 << 14)
-# define FMT_HIGHPASS_RANDOM_ENABLE (1 << 15)
-# define FMT_TEMPORAL_DITHER_EN (1 << 16)
-# define FMT_TEMPORAL_DITHER_DEPTH (1 << 20)
-# define FMT_TEMPORAL_DITHER_OFFSET(x) ((x) << 21)
-# define FMT_TEMPORAL_LEVEL (1 << 24)
-# define FMT_TEMPORAL_DITHER_RESET (1 << 25)
-# define FMT_25FRC_SEL(x) ((x) << 26)
-# define FMT_50FRC_SEL(x) ((x) << 28)
-# define FMT_75FRC_SEL(x) ((x) << 30)
-#define FMT_CLAMP_CONTROL 0x672c
-# define FMT_CLAMP_DATA_EN (1 << 0)
-# define FMT_CLAMP_COLOR_FORMAT(x) ((x) << 16)
-# define FMT_CLAMP_6BPC 0
-# define FMT_CLAMP_8BPC 1
-# define FMT_CLAMP_10BPC 2
-
-/* Power management */
-#define CG_SPLL_FUNC_CNTL 0x600
-# define SPLL_RESET (1 << 0)
-# define SPLL_SLEEP (1 << 1)
-# define SPLL_REF_DIV(x) ((x) << 2)
-# define SPLL_REF_DIV_MASK (7 << 2)
-# define SPLL_FB_DIV(x) ((x) << 5)
-# define SPLL_FB_DIV_MASK (0xff << 5)
-# define SPLL_PULSEEN (1 << 13)
-# define SPLL_PULSENUM(x) ((x) << 14)
-# define SPLL_PULSENUM_MASK (3 << 14)
-# define SPLL_SW_HILEN(x) ((x) << 16)
-# define SPLL_SW_HILEN_MASK (0xf << 16)
-# define SPLL_SW_LOLEN(x) ((x) << 20)
-# define SPLL_SW_LOLEN_MASK (0xf << 20)
-# define SPLL_DIVEN (1 << 24)
-# define SPLL_BYPASS_EN (1 << 25)
-# define SPLL_CHG_STATUS (1 << 29)
-# define SPLL_CTLREQ (1 << 30)
-# define SPLL_CTLACK (1 << 31)
-
-#define GENERAL_PWRMGT 0x618
-# define GLOBAL_PWRMGT_EN (1 << 0)
-# define STATIC_PM_EN (1 << 1)
-# define MOBILE_SU (1 << 2)
-# define THERMAL_PROTECTION_DIS (1 << 3)
-# define THERMAL_PROTECTION_TYPE (1 << 4)
-# define ENABLE_GEN2PCIE (1 << 5)
-# define SW_GPIO_INDEX(x) ((x) << 6)
-# define SW_GPIO_INDEX_MASK (3 << 6)
-# define LOW_VOLT_D2_ACPI (1 << 8)
-# define LOW_VOLT_D3_ACPI (1 << 9)
-# define VOLT_PWRMGT_EN (1 << 10)
-#define CG_TPC 0x61c
-# define TPCC(x) ((x) << 0)
-# define TPCC_MASK (0x7fffff << 0)
-# define TPU(x) ((x) << 23)
-# define TPU_MASK (0x1f << 23)
-#define SCLK_PWRMGT_CNTL 0x620
-# define SCLK_PWRMGT_OFF (1 << 0)
-# define SCLK_TURNOFF (1 << 1)
-# define SPLL_TURNOFF (1 << 2)
-# define SU_SCLK_USE_BCLK (1 << 3)
-# define DYNAMIC_GFX_ISLAND_PWR_DOWN (1 << 4)
-# define DYNAMIC_GFX_ISLAND_PWR_LP (1 << 5)
-# define CLK_TURN_ON_STAGGER (1 << 6)
-# define CLK_TURN_OFF_STAGGER (1 << 7)
-# define FIR_FORCE_TREND_SEL (1 << 8)
-# define FIR_TREND_MODE (1 << 9)
-# define DYN_GFX_CLK_OFF_EN (1 << 10)
-# define VDDC3D_TURNOFF_D1 (1 << 11)
-# define VDDC3D_TURNOFF_D2 (1 << 12)
-# define VDDC3D_TURNOFF_D3 (1 << 13)
-# define SPLL_TURNOFF_D2 (1 << 14)
-# define SCLK_LOW_D1 (1 << 15)
-# define DYN_GFX_CLK_OFF_MC_EN (1 << 16)
-#define MCLK_PWRMGT_CNTL 0x624
-# define MPLL_PWRMGT_OFF (1 << 0)
-# define YCLK_TURNOFF (1 << 1)
-# define MPLL_TURNOFF (1 << 2)
-# define SU_MCLK_USE_BCLK (1 << 3)
-# define DLL_READY (1 << 4)
-# define MC_BUSY (1 << 5)
-# define MC_INT_CNTL (1 << 7)
-# define MRDCKA_SLEEP (1 << 8)
-# define MRDCKB_SLEEP (1 << 9)
-# define MRDCKC_SLEEP (1 << 10)
-# define MRDCKD_SLEEP (1 << 11)
-# define MRDCKE_SLEEP (1 << 12)
-# define MRDCKF_SLEEP (1 << 13)
-# define MRDCKG_SLEEP (1 << 14)
-# define MRDCKH_SLEEP (1 << 15)
-# define MRDCKA_RESET (1 << 16)
-# define MRDCKB_RESET (1 << 17)
-# define MRDCKC_RESET (1 << 18)
-# define MRDCKD_RESET (1 << 19)
-# define MRDCKE_RESET (1 << 20)
-# define MRDCKF_RESET (1 << 21)
-# define MRDCKG_RESET (1 << 22)
-# define MRDCKH_RESET (1 << 23)
-# define DLL_READY_READ (1 << 24)
-# define USE_DISPLAY_GAP (1 << 25)
-# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
-# define USE_DISPLAY_GAP_CTXSW (1 << 27)
-# define MPLL_TURNOFF_D2 (1 << 28)
-# define USE_DISPLAY_URGENT_CTXSW (1 << 29)
-
-#define MPLL_TIME 0x634
-# define MPLL_LOCK_TIME(x) ((x) << 0)
-# define MPLL_LOCK_TIME_MASK (0xffff << 0)
-# define MPLL_RESET_TIME(x) ((x) << 16)
-# define MPLL_RESET_TIME_MASK (0xffff << 16)
-
-#define SCLK_FREQ_SETTING_STEP_0_PART1 0x648
-# define STEP_0_SPLL_POST_DIV(x) ((x) << 0)
-# define STEP_0_SPLL_POST_DIV_MASK (0xff << 0)
-# define STEP_0_SPLL_FB_DIV(x) ((x) << 8)
-# define STEP_0_SPLL_FB_DIV_MASK (0xff << 8)
-# define STEP_0_SPLL_REF_DIV(x) ((x) << 16)
-# define STEP_0_SPLL_REF_DIV_MASK (7 << 16)
-# define STEP_0_SPLL_STEP_TIME(x) ((x) << 19)
-# define STEP_0_SPLL_STEP_TIME_MASK (0x1fff << 19)
-#define SCLK_FREQ_SETTING_STEP_0_PART2 0x64c
-# define STEP_0_PULSE_HIGH_CNT(x) ((x) << 0)
-# define STEP_0_PULSE_HIGH_CNT_MASK (0x1ff << 0)
-# define STEP_0_POST_DIV_EN (1 << 9)
-# define STEP_0_SPLL_STEP_ENABLE (1 << 30)
-# define STEP_0_SPLL_ENTRY_VALID (1 << 31)
-
-#define VID_RT 0x6f8
-# define VID_CRT(x) ((x) << 0)
-# define VID_CRT_MASK (0x1fff << 0)
-# define VID_CRTU(x) ((x) << 13)
-# define VID_CRTU_MASK (7 << 13)
-# define SSTU(x) ((x) << 16)
-# define SSTU_MASK (7 << 16)
-#define CTXSW_PROFILE_INDEX 0x6fc
-# define CTXSW_FREQ_VIDS_CFG_INDEX(x) ((x) << 0)
-# define CTXSW_FREQ_VIDS_CFG_INDEX_MASK (3 << 0)
-# define CTXSW_FREQ_VIDS_CFG_INDEX_SHIFT 0
-# define CTXSW_FREQ_MCLK_CFG_INDEX(x) ((x) << 2)
-# define CTXSW_FREQ_MCLK_CFG_INDEX_MASK (3 << 2)
-# define CTXSW_FREQ_MCLK_CFG_INDEX_SHIFT 2
-# define CTXSW_FREQ_SCLK_CFG_INDEX(x) ((x) << 4)
-# define CTXSW_FREQ_SCLK_CFG_INDEX_MASK (0x1f << 4)
-# define CTXSW_FREQ_SCLK_CFG_INDEX_SHIFT 4
-# define CTXSW_FREQ_STATE_SPLL_RESET_EN (1 << 9)
-# define CTXSW_FREQ_STATE_ENABLE (1 << 10)
-# define CTXSW_FREQ_DISPLAY_WATERMARK (1 << 11)
-# define CTXSW_FREQ_GEN2PCIE_VOLT (1 << 12)
-
-#define TARGET_AND_CURRENT_PROFILE_INDEX 0x70c
-# define TARGET_PROFILE_INDEX_MASK (3 << 0)
-# define TARGET_PROFILE_INDEX_SHIFT 0
-# define CURRENT_PROFILE_INDEX_MASK (3 << 2)
-# define CURRENT_PROFILE_INDEX_SHIFT 2
-# define DYN_PWR_ENTER_INDEX(x) ((x) << 4)
-# define DYN_PWR_ENTER_INDEX_MASK (3 << 4)
-# define DYN_PWR_ENTER_INDEX_SHIFT 4
-# define CURR_MCLK_INDEX_MASK (3 << 6)
-# define CURR_MCLK_INDEX_SHIFT 6
-# define CURR_SCLK_INDEX_MASK (0x1f << 8)
-# define CURR_SCLK_INDEX_SHIFT 8
-# define CURR_VID_INDEX_MASK (3 << 13)
-# define CURR_VID_INDEX_SHIFT 13
-
-#define LOWER_GPIO_ENABLE 0x710
-#define UPPER_GPIO_ENABLE 0x714
-#define CTXSW_VID_LOWER_GPIO_CNTL 0x718
-
-#define VID_UPPER_GPIO_CNTL 0x740
-#define CG_CTX_CGTT3D_R 0x744
-# define PHC(x) ((x) << 0)
-# define PHC_MASK (0x1ff << 0)
-# define SDC(x) ((x) << 9)
-# define SDC_MASK (0x3fff << 9)
-#define CG_VDDC3D_OOR 0x748
-# define SU(x) ((x) << 23)
-# define SU_MASK (0xf << 23)
-#define CG_FTV 0x74c
-#define CG_FFCT_0 0x750
-# define UTC_0(x) ((x) << 0)
-# define UTC_0_MASK (0x3ff << 0)
-# define DTC_0(x) ((x) << 10)
-# define DTC_0_MASK (0x3ff << 10)
-
-#define CG_BSP 0x78c
-# define BSP(x) ((x) << 0)
-# define BSP_MASK (0xffff << 0)
-# define BSU(x) ((x) << 16)
-# define BSU_MASK (0xf << 16)
-#define CG_RT 0x790
-# define FLS(x) ((x) << 0)
-# define FLS_MASK (0xffff << 0)
-# define FMS(x) ((x) << 16)
-# define FMS_MASK (0xffff << 16)
-#define CG_LT 0x794
-# define FHS(x) ((x) << 0)
-# define FHS_MASK (0xffff << 0)
-#define CG_GIT 0x798
-# define CG_GICST(x) ((x) << 0)
-# define CG_GICST_MASK (0xffff << 0)
-# define CG_GIPOT(x) ((x) << 16)
-# define CG_GIPOT_MASK (0xffff << 16)
-
-#define CG_SSP 0x7a8
-# define CG_SST(x) ((x) << 0)
-# define CG_SST_MASK (0xffff << 0)
-# define CG_SSTU(x) ((x) << 16)
-# define CG_SSTU_MASK (0xf << 16)
-
-#define CG_RLC_REQ_AND_RSP 0x7c4
-# define RLC_CG_REQ_TYPE_MASK 0xf
-# define RLC_CG_REQ_TYPE_SHIFT 0
-# define CG_RLC_RSP_TYPE_MASK 0xf0
-# define CG_RLC_RSP_TYPE_SHIFT 4
-
-#define CG_FC_T 0x7cc
-# define FC_T(x) ((x) << 0)
-# define FC_T_MASK (0xffff << 0)
-# define FC_TU(x) ((x) << 16)
-# define FC_TU_MASK (0x1f << 16)
-
-#define GPIOPAD_MASK 0x1798
-#define GPIOPAD_A 0x179c
-#define GPIOPAD_EN 0x17a0
-
-#define GRBM_PWR_CNTL 0x800c
-# define REQ_TYPE_MASK 0xf
-# define REQ_TYPE_SHIFT 0
-# define RSP_TYPE_MASK 0xf0
-# define RSP_TYPE_SHIFT 4
-
-/*
- * UVD
- */
-#define UVD_SEMA_ADDR_LOW 0xef00
-#define UVD_SEMA_ADDR_HIGH 0xef04
-#define UVD_SEMA_CMD 0xef08
-
-#define UVD_GPCOM_VCPU_CMD 0xef0c
-#define UVD_GPCOM_VCPU_DATA0 0xef10
-#define UVD_GPCOM_VCPU_DATA1 0xef14
-#define UVD_ENGINE_CNTL 0xef18
-
-#define UVD_SEMA_CNTL 0xf400
-#define UVD_RB_ARB_CTRL 0xf480
-
-#define UVD_LMI_EXT40_ADDR 0xf498
-#define UVD_CGC_GATE 0xf4a8
-#define UVD_LMI_CTRL2 0xf4f4
-#define UVD_MASTINT_EN 0xf500
-#define UVD_FW_START 0xf51C
-#define UVD_LMI_ADDR_EXT 0xf594
-#define UVD_LMI_CTRL 0xf598
-#define UVD_LMI_SWAP_CNTL 0xf5b4
-#define UVD_MP_SWAP_CNTL 0xf5bC
-#define UVD_MPC_CNTL 0xf5dC
-#define UVD_MPC_SET_MUXA0 0xf5e4
-#define UVD_MPC_SET_MUXA1 0xf5e8
-#define UVD_MPC_SET_MUXB0 0xf5eC
-#define UVD_MPC_SET_MUXB1 0xf5f0
-#define UVD_MPC_SET_MUX 0xf5f4
-#define UVD_MPC_SET_ALU 0xf5f8
-
-#define UVD_VCPU_CACHE_OFFSET0 0xf608
-#define UVD_VCPU_CACHE_SIZE0 0xf60c
-#define UVD_VCPU_CACHE_OFFSET1 0xf610
-#define UVD_VCPU_CACHE_SIZE1 0xf614
-#define UVD_VCPU_CACHE_OFFSET2 0xf618
-#define UVD_VCPU_CACHE_SIZE2 0xf61c
-
-#define UVD_VCPU_CNTL 0xf660
-#define UVD_SOFT_RESET 0xf680
-#define RBC_SOFT_RESET (1<<0)
-#define LBSI_SOFT_RESET (1<<1)
-#define LMI_SOFT_RESET (1<<2)
-#define VCPU_SOFT_RESET (1<<3)
-#define CSM_SOFT_RESET (1<<5)
-#define CXW_SOFT_RESET (1<<6)
-#define TAP_SOFT_RESET (1<<7)
-#define LMI_UMC_SOFT_RESET (1<<13)
-#define UVD_RBC_IB_BASE 0xf684
-#define UVD_RBC_IB_SIZE 0xf688
-#define UVD_RBC_RB_BASE 0xf68c
-#define UVD_RBC_RB_RPTR 0xf690
-#define UVD_RBC_RB_WPTR 0xf694
-#define UVD_RBC_RB_WPTR_CNTL 0xf698
-
-#define UVD_STATUS 0xf6bc
-
-#define UVD_SEMA_TIMEOUT_STATUS 0xf6c0
-#define UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL 0xf6c4
-#define UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL 0xf6c8
-#define UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL 0xf6cc
-
-#define UVD_RBC_RB_CNTL 0xf6a4
-#define UVD_RBC_RB_RPTR_ADDR 0xf6a8
-
-#define UVD_CONTEXT_ID 0xf6f4
-
-/* rs780 only */
-#define GFX_MACRO_BYPASS_CNTL 0x30c0
-#define SPLL_BYPASS_CNTL (1 << 0)
-#define UPLL_BYPASS_CNTL (1 << 1)
-
-#define CG_UPLL_FUNC_CNTL 0x7e0
-# define UPLL_RESET_MASK 0x00000001
-# define UPLL_SLEEP_MASK 0x00000002
-# define UPLL_BYPASS_EN_MASK 0x00000004
-# define UPLL_CTLREQ_MASK 0x00000008
-# define UPLL_FB_DIV(x) ((x) << 4)
-# define UPLL_FB_DIV_MASK 0x0000FFF0
-# define UPLL_REF_DIV(x) ((x) << 16)
-# define UPLL_REF_DIV_MASK 0x003F0000
-# define UPLL_REFCLK_SRC_SEL_MASK 0x20000000
-# define UPLL_CTLACK_MASK 0x40000000
-# define UPLL_CTLACK2_MASK 0x80000000
-#define CG_UPLL_FUNC_CNTL_2 0x7e4
-# define UPLL_SW_HILEN(x) ((x) << 0)
-# define UPLL_SW_LOLEN(x) ((x) << 4)
-# define UPLL_SW_HILEN2(x) ((x) << 8)
-# define UPLL_SW_LOLEN2(x) ((x) << 12)
-# define UPLL_DIVEN_MASK 0x00010000
-# define UPLL_DIVEN2_MASK 0x00020000
-# define UPLL_SW_MASK 0x0003FFFF
-# define VCLK_SRC_SEL(x) ((x) << 20)
-# define VCLK_SRC_SEL_MASK 0x01F00000
-# define DCLK_SRC_SEL(x) ((x) << 25)
-# define DCLK_SRC_SEL_MASK 0x3E000000
-
/*
* PM4
*/
-#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
-#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
@@ -1646,7 +1219,6 @@
*/
# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
-#define PACKET3_PFP_SYNC_ME 0x42 /* r7xx+ only */
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
@@ -1709,14 +1281,6 @@
#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
#define PACKET3_SURFACE_BASE_UPDATE 0x73
-#define R_000011_K8_FB_LOCATION 0x11
-#define R_000012_MC_MISC_UMA_CNTL 0x12
-#define G_000012_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
-#define R_0028F8_MC_INDEX 0x28F8
-#define S_0028F8_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
-#define C_0028F8_MC_IND_ADDR 0xFFFFFE00
-#define S_0028F8_MC_IND_WR_EN(x) (((x) & 0x1) << 9)
-#define R_0028FC_MC_DATA 0x28FC
#define R_008020_GRBM_SOFT_RESET 0x8020
#define S_008020_SOFT_RESET_CP(x) (((x) & 1) << 0)
@@ -1766,7 +1330,6 @@
#define G_008010_VC_BUSY(x) (((x) >> 11) & 1)
#define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1)
#define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1)
-#define G_008010_TA_BUSY(x) (((x) >> 14) & 1)
#define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1)
#define G_008010_VGT_BUSY(x) (((x) >> 17) & 1)
#define G_008010_TA03_BUSY(x) (((x) >> 18) & 1)
@@ -1834,7 +1397,6 @@
#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
-#define G_000E50_IH_BUSY(x) (((x) >> 17) & 1)
#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1)
#define R_000E60_SRBM_SOFT_RESET 0x0E60
#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
diff --git a/sys/dev/pci/drm/radeon/radeon.h b/sys/dev/pci/drm/radeon/radeon.h
index 2acad16f0e9..748cf91ce2c 100644
--- a/sys/dev/pci/drm/radeon/radeon.h
+++ b/sys/dev/pci/drm/radeon/radeon.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon.h,v 1.20 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -60,16 +61,12 @@
* are considered as fatal)
*/
-#include <dev/pci/drm/drm_linux.h>
-
#include <dev/pci/drm/ttm/ttm_bo_api.h>
#include <dev/pci/drm/ttm/ttm_bo_driver.h>
#include <dev/pci/drm/ttm/ttm_placement.h>
#include <dev/pci/drm/ttm/ttm_module.h>
#include <dev/pci/drm/ttm/ttm_execbuf_util.h>
-#include <dev/pci/drm/drmP.h>
-
#include <dev/wscons/wsconsio.h>
#include <dev/wscons/wsdisplayvar.h>
#include <dev/rasops/rasops.h>
@@ -102,32 +99,26 @@ extern int radeon_hw_i2c;
extern int radeon_pcie_gen2;
extern int radeon_msi;
extern int radeon_lockup_timeout;
-extern int radeon_fastfb;
-extern int radeon_dpm;
-extern int radeon_aspm;
-extern int radeon_runtime_pm;
-extern int radeon_hard_reset;
-extern int radeon_vm_size;
-extern int radeon_vm_block_size;
-extern int radeon_deep_color;
-extern int radeon_use_pflipirq;
-extern int radeon_bapm;
-extern int radeon_backlight;
extern int radeon_auxch;
-extern int radeon_mst;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
-#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
+#define RADEON_FENCE_JIFFIES_TIMEOUT (hz / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_COMPONENTS 32
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
+/* max number of rings */
+#define RADEON_NUM_RINGS 5
+
+/* fence seq are set to this number when signaled */
+#define RADEON_FENCE_SIGNALED_SEQ 0LL
+
/* internal ring indices */
/* r1xx+ has gfx CP ring */
#define RADEON_RING_TYPE_GFX_INDEX 0
@@ -141,88 +132,15 @@ extern int radeon_mst;
/* cayman add a second async dma ring */
#define CAYMAN_RING_TYPE_DMA1_INDEX 4
-/* R600+ */
-#define R600_RING_TYPE_UVD_INDEX 5
-
-/* TN+ */
-#define TN_RING_TYPE_VCE1_INDEX 6
-#define TN_RING_TYPE_VCE2_INDEX 7
-
-/* max number of rings */
-#define RADEON_NUM_RINGS 8
-
-/* number of hw syncs before falling back on blocking */
-#define RADEON_NUM_SYNCS 4
-
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10)
-/* hard reset data */
-#define RADEON_ASIC_RESET_DATA 0x39d5e86b
-
/* reset flags */
#define RADEON_RESET_GFX (1 << 0)
#define RADEON_RESET_COMPUTE (1 << 1)
#define RADEON_RESET_DMA (1 << 2)
-#define RADEON_RESET_CP (1 << 3)
-#define RADEON_RESET_GRBM (1 << 4)
-#define RADEON_RESET_DMA1 (1 << 5)
-#define RADEON_RESET_RLC (1 << 6)
-#define RADEON_RESET_SEM (1 << 7)
-#define RADEON_RESET_IH (1 << 8)
-#define RADEON_RESET_VMC (1 << 9)
-#define RADEON_RESET_MC (1 << 10)
-#define RADEON_RESET_DISPLAY (1 << 11)
-
-/* CG block flags */
-#define RADEON_CG_BLOCK_GFX (1 << 0)
-#define RADEON_CG_BLOCK_MC (1 << 1)
-#define RADEON_CG_BLOCK_SDMA (1 << 2)
-#define RADEON_CG_BLOCK_UVD (1 << 3)
-#define RADEON_CG_BLOCK_VCE (1 << 4)
-#define RADEON_CG_BLOCK_HDP (1 << 5)
-#define RADEON_CG_BLOCK_BIF (1 << 6)
-
-/* CG flags */
-#define RADEON_CG_SUPPORT_GFX_MGCG (1 << 0)
-#define RADEON_CG_SUPPORT_GFX_MGLS (1 << 1)
-#define RADEON_CG_SUPPORT_GFX_CGCG (1 << 2)
-#define RADEON_CG_SUPPORT_GFX_CGLS (1 << 3)
-#define RADEON_CG_SUPPORT_GFX_CGTS (1 << 4)
-#define RADEON_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
-#define RADEON_CG_SUPPORT_GFX_CP_LS (1 << 6)
-#define RADEON_CG_SUPPORT_GFX_RLC_LS (1 << 7)
-#define RADEON_CG_SUPPORT_MC_LS (1 << 8)
-#define RADEON_CG_SUPPORT_MC_MGCG (1 << 9)
-#define RADEON_CG_SUPPORT_SDMA_LS (1 << 10)
-#define RADEON_CG_SUPPORT_SDMA_MGCG (1 << 11)
-#define RADEON_CG_SUPPORT_BIF_LS (1 << 12)
-#define RADEON_CG_SUPPORT_UVD_MGCG (1 << 13)
-#define RADEON_CG_SUPPORT_VCE_MGCG (1 << 14)
-#define RADEON_CG_SUPPORT_HDP_LS (1 << 15)
-#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
-
-/* PG flags */
-#define RADEON_PG_SUPPORT_GFX_PG (1 << 0)
-#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
-#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
-#define RADEON_PG_SUPPORT_UVD (1 << 3)
-#define RADEON_PG_SUPPORT_VCE (1 << 4)
-#define RADEON_PG_SUPPORT_CP (1 << 5)
-#define RADEON_PG_SUPPORT_GDS (1 << 6)
-#define RADEON_PG_SUPPORT_RLC_SMU_HS (1 << 7)
-#define RADEON_PG_SUPPORT_SDMA (1 << 8)
-#define RADEON_PG_SUPPORT_ACP (1 << 9)
-#define RADEON_PG_SUPPORT_SAMU (1 << 10)
-
-/* max cursor sizes (in pixels) */
-#define CURSOR_WIDTH 64
-#define CURSOR_HEIGHT 64
-
-#define CIK_CURSOR_WIDTH 128
-#define CIK_CURSOR_HEIGHT 128
/*
* Errata workarounds.
@@ -246,9 +164,8 @@ bool radeon_get_bios(struct radeon_device *rdev);
* Dummy page
*/
struct radeon_dummy_page {
- uint64_t entry;
struct drm_dmamem *dmah;
- dma_addr_t addr;
+ bus_addr_t addr;
};
int radeon_dummy_page_init(struct radeon_device *rdev);
void radeon_dummy_page_fini(struct radeon_device *rdev);
@@ -267,85 +184,27 @@ struct radeon_clock {
uint32_t default_mclk;
uint32_t default_sclk;
uint32_t default_dispclk;
- uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
- uint32_t vco_freq;
};
/*
* Power management
*/
int radeon_pm_init(struct radeon_device *rdev);
-int radeon_pm_late_init(struct radeon_device *rdev);
void radeon_pm_fini(struct radeon_device *rdev);
void radeon_pm_compute_clocks(struct radeon_device *rdev);
void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
-int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
- u8 clock_type,
- u32 clock,
- bool strobe_mode,
- struct atom_clock_dividers *dividers);
-int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
- u32 clock,
- bool strobe_mode,
- struct atom_mpll_param *mpll_param);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
-int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
- u16 voltage_level, u8 voltage_type,
- u32 *gpio_value, u32 *gpio_mask);
-void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
- u32 eng_clock, u32 mem_clock);
-int radeon_atom_get_voltage_step(struct radeon_device *rdev,
- u8 voltage_type, u16 *voltage_step);
-int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
- u16 voltage_id, u16 *voltage);
-int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
- u16 *voltage,
- u16 leakage_idx);
-int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
- u16 *leakage_id);
-int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
- u16 *vddc, u16 *vddci,
- u16 virtual_voltage_id,
- u16 vbios_voltage_id);
-int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
- u16 virtual_voltage_id,
- u16 *voltage);
-int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
- u8 voltage_type,
- u16 nominal_voltage,
- u16 *true_voltage);
-int radeon_atom_get_min_voltage(struct radeon_device *rdev,
- u8 voltage_type, u16 *min_voltage);
-int radeon_atom_get_max_voltage(struct radeon_device *rdev,
- u8 voltage_type, u16 *max_voltage);
-int radeon_atom_get_voltage_table(struct radeon_device *rdev,
- u8 voltage_type, u8 voltage_mode,
- struct atom_voltage_table *voltage_table);
-bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
- u8 voltage_type, u8 voltage_mode);
-int radeon_atom_get_svi2_info(struct radeon_device *rdev,
- u8 voltage_type,
- u8 *svd_gpio_id, u8 *svc_gpio_id);
-void radeon_atom_update_memory_dll(struct radeon_device *rdev,
- u32 mem_clock);
-void radeon_atom_set_ac_timing(struct radeon_device *rdev,
- u32 mem_clock);
-int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
- u8 module_index,
- struct atom_mc_reg_table *reg_table);
-int radeon_atom_get_memory_info(struct radeon_device *rdev,
- u8 module_index, struct atom_memory_info *mem_info);
-int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
- bool gddr5, u8 module_index,
- struct atom_memory_clock_range_table *mclk_range_table);
-int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
- u16 voltage_id, u16 *voltage);
void rs690_pm_info(struct radeon_device *rdev);
+extern int rv6xx_get_temp(struct radeon_device *rdev);
+extern int rv770_get_temp(struct radeon_device *rdev);
+extern int evergreen_get_temp(struct radeon_device *rdev);
+extern int sumo_get_temp(struct radeon_device *rdev);
+extern int si_get_temp(struct radeon_device *rdev);
extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split);
@@ -354,39 +213,35 @@ extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
* Fences.
*/
struct radeon_fence_driver {
- struct radeon_device *rdev;
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq;
- bool initialized, delayed_irq;
- struct delayed_work lockup_work;
+ unsigned long last_activity;
+ bool initialized;
};
struct radeon_fence {
- struct fence base;
-
- struct radeon_device *rdev;
- uint64_t seq;
+ struct radeon_device *rdev;
+ struct kref kref;
+ /* protected by radeon_fence.lock */
+ uint64_t seq;
/* RB, DMA, etc. */
- unsigned ring;
- bool is_vm_update;
-
- wait_queue_t fence_wake;
+ unsigned ring;
};
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
-void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
+void radeon_fence_driver_force_completion(struct radeon_device *rdev);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
@@ -449,33 +304,20 @@ struct radeon_mman {
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
-
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *vram;
- struct dentry *gtt;
-#endif
-};
-
-struct radeon_bo_list {
- struct radeon_bo *robj;
- struct ttm_validate_buffer tv;
- uint64_t gpu_offset;
- unsigned prefered_domains;
- unsigned allowed_domains;
- uint32_t tiling_flags;
};
/* bo virtual address in a specific vm */
struct radeon_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
+ uint64_t soffset;
+ uint64_t eoffset;
uint32_t flags;
- struct radeon_fence *last_pt_update;
+ bool valid;
unsigned ref_count;
- /* protected by vm mutex */
- struct interval_tree_node it;
- struct list_head vm_status;
+ /* protected by vm rwlock */
+ struct list_head vm_list;
/* constant after initialization */
struct radeon_vm *vm;
@@ -483,15 +325,14 @@ struct radeon_bo_va {
};
struct radeon_bo {
- /* Protected by gem.mutex */
+ struct drm_gem_object gem_base;
+ /* Protected by gem.rwlock */
struct list_head list;
/* Protected by tbo.reserved */
- u32 initial_domain;
- struct ttm_place placements[4];
+ u32 placements[3];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
- u32 flags;
unsigned pin_count;
void *kptr;
u32 tiling_flags;
@@ -503,17 +344,20 @@ struct radeon_bo {
struct list_head va;
/* Constant after initialization */
struct radeon_device *rdev;
- struct drm_gem_object gem_base;
-
- struct ttm_bo_kmap_obj dma_buf_vmap;
- pid_t pid;
- struct radeon_mn *mn;
- struct list_head mn_list;
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ int vmapping_count;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
-int radeon_gem_debugfs_init(struct radeon_device *rdev);
+struct radeon_bo_list {
+ struct ttm_validate_buffer tv;
+ struct radeon_bo *bo;
+ uint64_t gpu_offset;
+ unsigned rdomain;
+ unsigned wdomain;
+ u32 tiling_flags;
+};
/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
@@ -573,9 +417,9 @@ struct radeon_gem {
int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev);
-int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
int alignment, int initial_domain,
- u32 flags, bool kernel,
+ bool discardable, bool kernel,
struct drm_gem_object **obj);
int radeon_mode_dumb_create(struct drm_file *file_priv,
@@ -584,49 +428,34 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
/*
* Semaphores.
*/
+/* everything here is constant */
struct radeon_semaphore {
- struct radeon_sa_bo *sa_bo;
- signed waiters;
- uint64_t gpu_addr;
+ struct radeon_sa_bo *sa_bo;
+ signed waiters;
+ uint64_t gpu_addr;
};
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
-bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
-bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ int signaler, int waiter);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
/*
- * Synchronization
- */
-struct radeon_sync {
- struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS];
- struct radeon_fence *sync_to[RADEON_NUM_RINGS];
- struct radeon_fence *last_vm_update;
-};
-
-void radeon_sync_create(struct radeon_sync *sync);
-void radeon_sync_fence(struct radeon_sync *sync,
- struct radeon_fence *fence);
-int radeon_sync_resv(struct radeon_device *rdev,
- struct radeon_sync *sync,
- struct reservation_object *resv,
- bool shared);
-int radeon_sync_rings(struct radeon_device *rdev,
- struct radeon_sync *sync,
- int waiting_ring);
-void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
- struct radeon_fence *fence);
-
-/*
* GART structures, functions & helpers
*/
struct radeon_mc;
@@ -636,14 +465,8 @@ struct radeon_mc;
#define RADEON_GPU_PAGE_SHIFT 12
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
-#define RADEON_GART_PAGE_DUMMY 0
-#define RADEON_GART_PAGE_VALID (1 << 0)
-#define RADEON_GART_PAGE_READ (1 << 1)
-#define RADEON_GART_PAGE_WRITE (1 << 2)
-#define RADEON_GART_PAGE_SNOOP (1 << 3)
-
struct radeon_gart {
- dma_addr_t table_addr;
+ bus_addr_t table_addr;
struct drm_dmamem *dmah;
struct radeon_bo *robj;
void *ptr;
@@ -651,7 +474,7 @@ struct radeon_gart {
unsigned num_cpu_pages;
unsigned table_size;
struct vm_page **pages;
- uint64_t *pages_entry;
+ bus_addr_t *pages_addr;
bool ready;
};
@@ -667,16 +490,17 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct vm_page **pagelist,
- dma_addr_t *dma_addr, uint32_t flags);
+ bus_addr_t *dma_addr);
+void radeon_gart_restore(struct radeon_device *rdev);
/*
* GPU MC structures, functions & helpers
*/
struct radeon_mc {
- resource_size_t aper_size;
- resource_size_t aper_base;
- resource_size_t agp_base;
+ bus_size_t aper_size;
+ bus_addr_t aper_base;
+ bus_addr_t agp_base;
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
@@ -692,7 +516,6 @@ struct radeon_mc {
bool vram_is_ddr;
bool igp_sideport_enabled;
u64 gtt_base_align;
- u64 mc_mask;
};
bool radeon_combios_sideport_present(struct radeon_device *rdev);
@@ -711,40 +534,19 @@ struct radeon_scratch {
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
-/*
- * GPU doorbell structures, functions & helpers
- */
-#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
-
-struct radeon_doorbell {
- /* doorbell mmio */
- resource_size_t base;
- resource_size_t size;
- bus_space_handle_t bsh;
- u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
- DECLARE_BITMAP(used, RADEON_MAX_DOORBELLS);
-};
-
-int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
-void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
-void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset);
/*
* IRQS.
*/
-struct radeon_flip_work {
- struct work_struct flip_work;
- struct work_struct unpin_work;
- struct radeon_device *rdev;
- int crtc_id;
- uint64_t base;
+struct radeon_unpin_work {
+ struct task task;
+ struct radeon_device *rdev;
+ int crtc_id;
+ struct radeon_fence *fence;
struct drm_pending_vblank_event *event;
- struct radeon_bo *old_rbo;
- struct fence *fence;
+ struct radeon_bo *old_rbo;
+ u64 new_crtc_base;
};
struct r500_irq_stat_regs {
@@ -783,29 +585,16 @@ struct evergreen_irq_stat_regs {
u32 afmt_status6;
};
-struct cik_irq_stat_regs {
- u32 disp_int;
- u32 disp_int_cont;
- u32 disp_int_cont2;
- u32 disp_int_cont3;
- u32 disp_int_cont4;
- u32 disp_int_cont5;
- u32 disp_int_cont6;
- u32 d1grph_int;
- u32 d2grph_int;
- u32 d3grph_int;
- u32 d4grph_int;
- u32 d5grph_int;
- u32 d6grph_int;
-};
-
union radeon_irq_stat_regs {
struct r500_irq_stat_regs r500;
struct r600_irq_stat_regs r600;
struct evergreen_irq_stat_regs evergreen;
- struct cik_irq_stat_regs cik;
};
+#define RADEON_MAX_HPD_PINS 6
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_AFMT_BLOCKS 6
+
struct radeon_irq {
bool installed;
spinlock_t lock;
@@ -816,13 +605,11 @@ struct radeon_irq {
bool hpd[RADEON_MAX_HPD_PINS];
bool afmt[RADEON_MAX_AFMT_BLOCKS];
union radeon_irq_stat_regs stat_regs;
- bool dpm_thermal;
};
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
-bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
@@ -830,6 +617,7 @@ void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
+bool radeon_msi_ok(struct radeon_device *rdev);
/*
* CP & rings.
@@ -844,46 +632,37 @@ struct radeon_ib {
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
- struct radeon_sync sync;
+ struct radeon_fence *sync_to[RADEON_NUM_RINGS];
+ struct radeon_semaphore *semaphore;
};
struct radeon_ring {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
+ unsigned rptr;
unsigned rptr_offs;
+ unsigned rptr_reg;
unsigned rptr_save_reg;
u64 next_rptr_gpu_addr;
volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
+ unsigned wptr_reg;
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
- atomic_t last_rptr;
- atomic64_t last_activity;
+ unsigned long last_activity;
+ unsigned last_rptr;
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
bool ready;
+ u32 ptr_reg_shift;
+ u32 ptr_reg_mask;
u32 nop;
u32 idx;
u64 last_semaphore_signal_addr;
u64 last_semaphore_wait_addr;
- /* for CIK queues */
- u32 me;
- u32 pipe;
- u32 queue;
- struct radeon_bo *mqd_obj;
- u32 doorbell_index;
- unsigned wptr_offs;
-};
-
-struct radeon_mec {
- struct radeon_bo *hpd_eop_obj;
- u64 hpd_eop_gpu_addr;
- u32 num_pipe;
- u32 num_mec;
- u32 num_queue;
};
/*
@@ -893,75 +672,38 @@ struct radeon_mec {
/* maximum number of VMIDs */
#define RADEON_NUM_VM 16
-/* number of entries in page table */
-#define RADEON_VM_PTE_COUNT (1 << radeon_vm_block_size)
-
-/* PTBs (Page Table Blocks) need to be aligned to 32K */
-#define RADEON_VM_PTB_ALIGN_SIZE 32768
-#define RADEON_VM_PTB_ALIGN_MASK (RADEON_VM_PTB_ALIGN_SIZE - 1)
-#define RADEON_VM_PTB_ALIGN(a) (((a) + RADEON_VM_PTB_ALIGN_MASK) & ~RADEON_VM_PTB_ALIGN_MASK)
-
-#define R600_PTE_VALID (1 << 0)
-#define R600_PTE_SYSTEM (1 << 1)
-#define R600_PTE_SNOOPED (1 << 2)
-#define R600_PTE_READABLE (1 << 5)
-#define R600_PTE_WRITEABLE (1 << 6)
-
-/* PTE (Page Table Entry) fragment field for different page sizes */
-#define R600_PTE_FRAG_4KB (0 << 7)
-#define R600_PTE_FRAG_64KB (4 << 7)
-#define R600_PTE_FRAG_256KB (6 << 7)
+/* defines number of bits in page table versus page directory,
+ * a page is 4KB so we have 12 bits offset, 9 bits in the page
+ * table and the remaining 19 bits are in the page directory */
+#define RADEON_VM_BLOCK_SIZE 9
-/* flags needed to be set so we can copy directly from the GART table */
-#define R600_PTE_GART_MASK ( R600_PTE_READABLE | R600_PTE_WRITEABLE | \
- R600_PTE_SYSTEM | R600_PTE_VALID )
-
-struct radeon_vm_pt {
- struct radeon_bo *bo;
- uint64_t addr;
-};
-
-struct radeon_vm_id {
- unsigned id;
- uint64_t pd_gpu_addr;
- /* last flushed PD/PT update */
- struct radeon_fence *flushed_updates;
- /* last use of vmid */
- struct radeon_fence *last_id_use;
-};
+/* number of entries in page table */
+#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
struct radeon_vm {
- struct rwlock mutex;
-
- struct rb_root va;
-
- /* protecting invalidated and freed */
- spinlock_t status_lock;
-
- /* BOs moved, but not yet updated in the PT */
- struct list_head invalidated;
-
- /* BOs freed, but not yet updated in the PT */
- struct list_head freed;
-
- /* BOs cleared in the PT */
- struct list_head cleared;
+ struct list_head list;
+ struct list_head va;
+ unsigned id;
/* contains the page directory */
- struct radeon_bo *page_directory;
- unsigned max_pde_used;
+ struct radeon_sa_bo *page_directory;
+ uint64_t pd_gpu_addr;
/* array of page tables, one for each page directory entry */
- struct radeon_vm_pt *page_tables;
+ struct radeon_sa_bo **page_tables;
- struct radeon_bo_va *ib_bo_va;
-
- /* for id and flush management per ring */
- struct radeon_vm_id ids[RADEON_NUM_RINGS];
+ struct rwlock mutex;
+ /* last fence for cs using this vm */
+ struct radeon_fence *fence;
+ /* last flush or NULL if we still need to flush */
+ struct radeon_fence *last_flush;
};
struct radeon_vm_manager {
+ struct rwlock lock;
+ struct list_head lru_vm;
struct radeon_fence *active[RADEON_NUM_VM];
+ struct radeon_sa_manager sa_manager;
uint32_t max_pfn;
/* number of VMIDs */
unsigned nvm;
@@ -969,8 +711,6 @@ struct radeon_vm_manager {
u64 vram_base_offset;
/* is vm enabled? */
bool enabled;
- /* for hw to save the PD addr on suspend/resume */
- uint32_t saved_table_addr[RADEON_NUM_VM];
};
/*
@@ -994,29 +734,45 @@ struct r600_ih {
bool enabled;
};
+struct r600_blit_cp_primitives {
+ void (*set_render_target)(struct radeon_device *rdev, int format,
+ int w, int h, u64 gpu_addr);
+ void (*cp_set_surface_sync)(struct radeon_device *rdev,
+ u32 sync_type, u32 size,
+ u64 mc_addr);
+ void (*set_shaders)(struct radeon_device *rdev);
+ void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
+ void (*set_tex_resource)(struct radeon_device *rdev,
+ int format, int w, int h, int pitch,
+ u64 gpu_addr, u32 size);
+ void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
+ int x2, int y2);
+ void (*draw_auto)(struct radeon_device *rdev);
+ void (*set_default_state)(struct radeon_device *rdev);
+};
+
+struct r600_blit {
+ struct radeon_bo *shader_obj;
+ struct r600_blit_cp_primitives primitives;
+ int max_dim;
+ int ring_size_common;
+ int ring_size_per_loop;
+ u64 shader_gpu_addr;
+ u32 vs_offset, ps_offset;
+ u32 state_offset;
+ u32 state_len;
+};
+
/*
- * RLC stuff
+ * SI RLC stuff
*/
-#include "clearstate_defs.h"
-
-struct radeon_rlc {
+struct si_rlc {
/* for power gating */
struct radeon_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
- volatile uint32_t *sr_ptr;
- const u32 *reg_list;
- u32 reg_list_size;
/* for clear state */
struct radeon_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
- volatile uint32_t *cs_ptr;
- const struct cs_section_def *cs_data;
- u32 clear_state_size;
- /* for cp tables */
- struct radeon_bo *cp_table_obj;
- uint64_t cp_table_gpu_addr;
- volatile uint32_t *cp_table_ptr;
- u32 cp_table_size;
};
int radeon_ib_get(struct radeon_device *rdev, int ring,
@@ -1024,7 +780,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
- struct radeon_ib *const_ib, bool hdp_flush);
+ struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_ring_tests(struct radeon_device *rdev);
@@ -1034,22 +790,21 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp,
- bool hdp_flush);
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp,
- bool hdp_flush);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
-void radeon_ring_lockup_update(struct radeon_device *rdev,
- struct radeon_ring *ring);
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
+void radeon_ring_lockup_update(struct radeon_ring *ring);
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
uint32_t **data);
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
- unsigned rptr_offs, u32 nop);
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+ u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -1065,10 +820,23 @@ void cayman_dma_fini(struct radeon_device *rdev);
/*
* CS.
*/
+struct radeon_cs_reloc {
+ struct drm_gem_object *gobj;
+ struct radeon_bo *robj;
+ struct radeon_bo_list lobj;
+ uint32_t handle;
+ uint32_t flags;
+};
+
struct radeon_cs_chunk {
+ uint32_t chunk_id;
uint32_t length_dw;
+ int kpage_idx[2];
+ uint32_t *kpage[2];
uint32_t *kdata;
void __user *user_ptr;
+ int last_copied_page;
+ int last_page_index;
};
struct radeon_cs_parser {
@@ -1083,15 +851,15 @@ struct radeon_cs_parser {
unsigned idx;
/* relocations */
unsigned nrelocs;
- struct radeon_bo_list *relocs;
- struct radeon_bo_list *vm_bos;
+ struct radeon_cs_reloc *relocs;
+ struct radeon_cs_reloc **relocs_ptr;
struct list_head validated;
unsigned dma_reloc_idx;
/* indices of various chunks */
- struct radeon_cs_chunk *chunk_ib;
- struct radeon_cs_chunk *chunk_relocs;
- struct radeon_cs_chunk *chunk_flags;
- struct radeon_cs_chunk *chunk_const_ib;
+ int chunk_ib_idx;
+ int chunk_relocs_idx;
+ int chunk_flags_idx;
+ int chunk_const_ib_idx;
struct radeon_ib ib;
struct radeon_ib const_ib;
void *track;
@@ -1100,18 +868,10 @@ struct radeon_cs_parser {
u32 cs_flags;
u32 ring;
s32 priority;
- struct ww_acquire_ctx ticket;
};
-static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
- struct radeon_cs_chunk *ibc = p->chunk_ib;
-
- if (ibc->kdata)
- return ibc->kdata[idx];
- return p->ib.ptr[idx];
-}
-
+extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
+extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
struct radeon_cs_packet {
unsigned idx;
@@ -1158,10 +918,6 @@ struct radeon_wb {
#define R600_WB_IH_WPTR_OFFSET 2048
#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
#define R600_WB_EVENT_OFFSET 3072
-#define CIK_WB_CP1_WPTR_OFFSET 3328
-#define CIK_WB_CP2_WPTR_OFFSET 3584
-#define R600_WB_DMA_RING_TEST_OFFSET 3588
-#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592
/**
* struct radeon_pm - power management datas
@@ -1186,7 +942,6 @@ struct radeon_wb {
enum radeon_pm_method {
PM_METHOD_PROFILE,
PM_METHOD_DYNPM,
- PM_METHOD_DPM,
};
enum radeon_dynpm_state {
@@ -1212,24 +967,11 @@ enum radeon_voltage_type {
};
enum radeon_pm_state_type {
- /* not used for dpm */
POWER_STATE_TYPE_DEFAULT,
POWER_STATE_TYPE_POWERSAVE,
- /* user selectable states */
POWER_STATE_TYPE_BATTERY,
POWER_STATE_TYPE_BALANCED,
POWER_STATE_TYPE_PERFORMANCE,
- /* internal states */
- POWER_STATE_TYPE_INTERNAL_UVD,
- POWER_STATE_TYPE_INTERNAL_UVD_SD,
- POWER_STATE_TYPE_INTERNAL_UVD_HD,
- POWER_STATE_TYPE_INTERNAL_UVD_HD2,
- POWER_STATE_TYPE_INTERNAL_UVD_MVC,
- POWER_STATE_TYPE_INTERNAL_BOOT,
- POWER_STATE_TYPE_INTERNAL_THERMAL,
- POWER_STATE_TYPE_INTERNAL_ACPI,
- POWER_STATE_TYPE_INTERNAL_ULV,
- POWER_STATE_TYPE_INTERNAL_3DPERF,
};
enum radeon_pm_profile_type {
@@ -1258,18 +1000,12 @@ struct radeon_pm_profile {
enum radeon_int_thermal_type {
THERMAL_TYPE_NONE,
- THERMAL_TYPE_EXTERNAL,
- THERMAL_TYPE_EXTERNAL_GPIO,
THERMAL_TYPE_RV6XX,
THERMAL_TYPE_RV770,
- THERMAL_TYPE_ADT7473_WITH_INTERNAL,
THERMAL_TYPE_EVERGREEN,
THERMAL_TYPE_SUMO,
THERMAL_TYPE_NI,
THERMAL_TYPE_SI,
- THERMAL_TYPE_EMC2103_WITH_INTERNAL,
- THERMAL_TYPE_CI,
- THERMAL_TYPE_KV,
};
struct radeon_voltage {
@@ -1323,289 +1059,10 @@ struct radeon_power_state {
*/
#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
-enum radeon_dpm_auto_throttle_src {
- RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL,
- RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum radeon_dpm_event_src {
- RADEON_DPM_EVENT_SRC_ANALOG = 0,
- RADEON_DPM_EVENT_SRC_EXTERNAL = 1,
- RADEON_DPM_EVENT_SRC_DIGITAL = 2,
- RADEON_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
-#define RADEON_MAX_VCE_LEVELS 6
-
-enum radeon_vce_level {
- RADEON_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
- RADEON_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
- RADEON_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
- RADEON_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
- RADEON_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
- RADEON_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
-};
-
-struct radeon_ps {
- u32 caps; /* vbios flags */
- u32 class; /* vbios flags */
- u32 class2; /* vbios flags */
- /* UVD clocks */
- u32 vclk;
- u32 dclk;
- /* VCE clocks */
- u32 evclk;
- u32 ecclk;
- bool vce_active;
- enum radeon_vce_level vce_level;
- /* asic priv */
- void *ps_priv;
-};
-
-struct radeon_dpm_thermal {
- /* thermal interrupt work */
- struct work_struct work;
- /* low temperature threshold */
- int min_temp;
- /* high temperature threshold */
- int max_temp;
- /* was interrupt low to high or high to low */
- bool high_to_low;
-};
-
-enum radeon_clk_action
-{
- RADEON_SCLK_UP = 1,
- RADEON_SCLK_DOWN
-};
-
-struct radeon_blacklist_clocks
-{
- u32 sclk;
- u32 mclk;
- enum radeon_clk_action action;
-};
-
-struct radeon_clock_and_voltage_limits {
- u32 sclk;
- u32 mclk;
- u16 vddc;
- u16 vddci;
-};
-
-struct radeon_clock_array {
- u32 count;
- u32 *values;
-};
-
-struct radeon_clock_voltage_dependency_entry {
- u32 clk;
- u16 v;
-};
-
-struct radeon_clock_voltage_dependency_table {
- u32 count;
- struct radeon_clock_voltage_dependency_entry *entries;
-};
-
-union radeon_cac_leakage_entry {
- struct {
- u16 vddc;
- u32 leakage;
- };
- struct {
- u16 vddc1;
- u16 vddc2;
- u16 vddc3;
- };
-};
-
-struct radeon_cac_leakage_table {
- u32 count;
- union radeon_cac_leakage_entry *entries;
-};
-
-struct radeon_phase_shedding_limits_entry {
- u16 voltage;
- u32 sclk;
- u32 mclk;
-};
-
-struct radeon_phase_shedding_limits_table {
- u32 count;
- struct radeon_phase_shedding_limits_entry *entries;
-};
-
-struct radeon_uvd_clock_voltage_dependency_entry {
- u32 vclk;
- u32 dclk;
- u16 v;
-};
-
-struct radeon_uvd_clock_voltage_dependency_table {
- u8 count;
- struct radeon_uvd_clock_voltage_dependency_entry *entries;
-};
-
-struct radeon_vce_clock_voltage_dependency_entry {
- u32 ecclk;
- u32 evclk;
- u16 v;
-};
-
-struct radeon_vce_clock_voltage_dependency_table {
- u8 count;
- struct radeon_vce_clock_voltage_dependency_entry *entries;
-};
-
-struct radeon_ppm_table {
- u8 ppm_design;
- u16 cpu_core_number;
- u32 platform_tdp;
- u32 small_ac_platform_tdp;
- u32 platform_tdc;
- u32 small_ac_platform_tdc;
- u32 apu_tdp;
- u32 dgpu_tdp;
- u32 dgpu_ulv_power;
- u32 tj_max;
-};
-
-struct radeon_cac_tdp_table {
- u16 tdp;
- u16 configurable_tdp;
- u16 tdc;
- u16 battery_power_limit;
- u16 small_power_limit;
- u16 low_cac_leakage;
- u16 high_cac_leakage;
- u16 maximum_power_delivery_limit;
-};
-
-struct radeon_dpm_dynamic_state {
- struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
- struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
- struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
- struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
- struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
- struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
- struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
- struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
- struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
- struct radeon_clock_array valid_sclk_values;
- struct radeon_clock_array valid_mclk_values;
- struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
- struct radeon_clock_and_voltage_limits max_clock_voltage_on_ac;
- u32 mclk_sclk_ratio;
- u32 sclk_mclk_delta;
- u16 vddc_vddci_delta;
- u16 min_vddc_for_pcie_gen2;
- struct radeon_cac_leakage_table cac_leakage_table;
- struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
- struct radeon_ppm_table *ppm_table;
- struct radeon_cac_tdp_table *cac_tdp_table;
-};
-
-struct radeon_dpm_fan {
- u16 t_min;
- u16 t_med;
- u16 t_high;
- u16 pwm_min;
- u16 pwm_med;
- u16 pwm_high;
- u8 t_hyst;
- u32 cycle_delay;
- u16 t_max;
- u8 control_mode;
- u16 default_max_fan_pwm;
- u16 default_fan_output_sensitivity;
- u16 fan_output_sensitivity;
- bool ucode_fan_control;
-};
-
-enum radeon_pcie_gen {
- RADEON_PCIE_GEN1 = 0,
- RADEON_PCIE_GEN2 = 1,
- RADEON_PCIE_GEN3 = 2,
- RADEON_PCIE_GEN_INVALID = 0xffff
-};
-
-enum radeon_dpm_forced_level {
- RADEON_DPM_FORCED_LEVEL_AUTO = 0,
- RADEON_DPM_FORCED_LEVEL_LOW = 1,
- RADEON_DPM_FORCED_LEVEL_HIGH = 2,
-};
-
-struct radeon_vce_state {
- /* vce clocks */
- u32 evclk;
- u32 ecclk;
- /* gpu clocks */
- u32 sclk;
- u32 mclk;
- u8 clk_idx;
- u8 pstate;
-};
-
-struct radeon_dpm {
- struct radeon_ps *ps;
- /* number of valid power states */
- int num_ps;
- /* current power state that is active */
- struct radeon_ps *current_ps;
- /* requested power state */
- struct radeon_ps *requested_ps;
- /* boot up power state */
- struct radeon_ps *boot_ps;
- /* default uvd power state */
- struct radeon_ps *uvd_ps;
- /* vce requirements */
- struct radeon_vce_state vce_states[RADEON_MAX_VCE_LEVELS];
- enum radeon_vce_level vce_level;
- enum radeon_pm_state_type state;
- enum radeon_pm_state_type user_state;
- u32 platform_caps;
- u32 voltage_response_time;
- u32 backbias_response_time;
- void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
- bool single_display;
- struct radeon_dpm_dynamic_state dyn_state;
- struct radeon_dpm_fan fan;
- u32 tdp_limit;
- u32 near_tdp_limit;
- u32 near_tdp_limit_adjusted;
- u32 sq_ramping_threshold;
- u32 cac_leakage;
- u16 tdp_od_limit;
- u32 tdp_adjustment;
- u16 load_line_slope;
- bool power_control;
- bool ac_power;
- /* special states active */
- bool thermal_active;
- bool uvd_active;
- bool vce_active;
- /* thermal handling */
- struct radeon_dpm_thermal thermal;
- /* forced levels */
- enum radeon_dpm_forced_level forced_level;
- /* track UVD streams */
- unsigned sd;
- unsigned hd;
-};
-
-void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
-void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable);
-
struct radeon_pm {
- struct rwlock mutex;
+ struct rwlock mutex;
/* write locked while reprogramming mclk */
- struct rwlock mclk_lock;
+ struct rwlock mclk_lock;
u32 active_crtcs;
int active_crtc_count;
int req_vblank;
@@ -1655,120 +1112,18 @@ struct radeon_pm {
/* internal thermal controller on rv6xx+ */
enum radeon_int_thermal_type int_thermal_type;
struct device *int_hwmon_dev;
- /* fan control parameters */
- bool no_fan;
- u8 fan_pulses_per_revolution;
- u8 fan_min_rpm;
- u8 fan_max_rpm;
- /* dpm */
- bool dpm_enabled;
- bool sysfs_initialized;
- struct radeon_dpm dpm;
};
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance);
-/*
- * UVD
- */
-#define RADEON_MAX_UVD_HANDLES 10
-#define RADEON_UVD_STACK_SIZE (1024*1024)
-#define RADEON_UVD_HEAP_SIZE (1024*1024)
-
-struct radeon_uvd {
- struct radeon_bo *vcpu_bo;
- void *cpu_addr;
- uint64_t gpu_addr;
- atomic_t handles[RADEON_MAX_UVD_HANDLES];
- struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
- unsigned img_size[RADEON_MAX_UVD_HANDLES];
- struct delayed_work idle_work;
-};
-
-int radeon_uvd_init(struct radeon_device *rdev);
-void radeon_uvd_fini(struct radeon_device *rdev);
-int radeon_uvd_suspend(struct radeon_device *rdev);
-int radeon_uvd_resume(struct radeon_device *rdev);
-int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
- uint32_t handle, struct radeon_fence **fence);
-int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
- uint32_t handle, struct radeon_fence **fence);
-void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
- uint32_t allowed_domains);
-void radeon_uvd_free_handles(struct radeon_device *rdev,
- struct drm_file *filp);
-int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
-void radeon_uvd_note_usage(struct radeon_device *rdev);
-int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
- unsigned vclk, unsigned dclk,
- unsigned vco_min, unsigned vco_max,
- unsigned fb_factor, unsigned fb_mask,
- unsigned pd_min, unsigned pd_max,
- unsigned pd_even,
- unsigned *optimal_fb_div,
- unsigned *optimal_vclk_div,
- unsigned *optimal_dclk_div);
-int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
- unsigned cg_upll_func_cntl);
-
-/*
- * VCE
- */
-#define RADEON_MAX_VCE_HANDLES 16
-
-struct radeon_vce {
- struct radeon_bo *vcpu_bo;
- uint64_t gpu_addr;
- unsigned fw_version;
- unsigned fb_version;
- atomic_t handles[RADEON_MAX_VCE_HANDLES];
- struct drm_file *filp[RADEON_MAX_VCE_HANDLES];
- unsigned img_size[RADEON_MAX_VCE_HANDLES];
- struct delayed_work idle_work;
- uint32_t keyselect;
-};
-
-int radeon_vce_init(struct radeon_device *rdev);
-void radeon_vce_fini(struct radeon_device *rdev);
-int radeon_vce_suspend(struct radeon_device *rdev);
-int radeon_vce_resume(struct radeon_device *rdev);
-int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
- uint32_t handle, struct radeon_fence **fence);
-int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
- uint32_t handle, struct radeon_fence **fence);
-void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp);
-void radeon_vce_note_usage(struct radeon_device *rdev);
-int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size);
-int radeon_vce_cs_parse(struct radeon_cs_parser *p);
-bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
-void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-void radeon_vce_fence_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
-int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
-int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
-struct r600_audio_pin {
+struct r600_audio {
int channels;
int rate;
int bits_per_sample;
u8 status_bits;
u8 category_code;
- u32 offset;
- bool connected;
- u32 id;
-};
-
-struct r600_audio {
- bool enabled;
- struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
- int num_pins;
- struct radeon_audio_funcs *hdmi_funcs;
- struct radeon_audio_funcs *dp_funcs;
- struct radeon_audio_basic_funcs *funcs;
};
/*
@@ -1786,19 +1141,6 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
struct radeon_ring *cpB);
void radeon_test_syncing(struct radeon_device *rdev);
-/*
- * MMU Notifier
- */
-#if defined(CONFIG_MMU_NOTIFIER)
-int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
-void radeon_mn_unregister(struct radeon_bo *bo);
-#else
-static inline int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
-{
- return -ENODEV;
-}
-static inline void radeon_mn_unregister(struct radeon_bo *bo) {}
-#endif
/*
* Debugfs
@@ -1813,36 +1155,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
unsigned nfiles);
int radeon_debugfs_fence_init(struct radeon_device *rdev);
-/*
- * ASIC ring specific functions.
- */
-struct radeon_asic_ring {
- /* ring read/write ptr handling */
- u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
- u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
- void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
-
- /* validating and patching of IBs */
- int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
- int (*cs_parse)(struct radeon_cs_parser *p);
-
- /* command emmit functions */
- void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
- void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
- void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
- bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
- struct radeon_semaphore *semaphore, bool emit_wait);
- void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
- unsigned vm_id, uint64_t pd_addr);
-
- /* testing functions */
- int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
- int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
- bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
-
- /* deprecated */
- void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
-};
/*
* ASIC specific functions.
@@ -1854,46 +1166,45 @@ struct radeon_asic {
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
int (*asic_reset)(struct radeon_device *rdev);
- /* Flush the HDP cache via MMIO */
- void (*mmio_hdp_flush)(struct radeon_device *rdev);
+ /* ioctl hw specific callback. Some hw might want to perform special
+ * operation on specific ioctl. For instance on wait idle some hw
+ * might want to perform and HDP flush through MMIO as it seems that
+ * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
+ * through ring.
+ */
+ void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
/* check if 3D engine is idle */
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
int (*mc_wait_for_idle)(struct radeon_device *rdev);
- /* get the reference clock */
- u32 (*get_xclk)(struct radeon_device *rdev);
- /* get the gpu clock counter */
- uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
- /* get register for info ioctl */
- int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val);
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
- uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
- void (*set_page)(struct radeon_device *rdev, unsigned i,
- uint64_t entry);
+ int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
- void (*copy_pages)(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe, uint64_t src,
- unsigned count);
- void (*write_pages)(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
- void (*set_pages)(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
- void (*pad_ib)(struct radeon_ib *ib);
+
+ u32 pt_ring_index;
+ void (*set_page)(struct radeon_device *rdev, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
} vm;
/* ring specific callbacks */
- struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
+ struct {
+ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+ int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+ void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore, bool emit_wait);
+ int (*cs_parse)(struct radeon_cs_parser *p);
+ void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
+ int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+ void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+ } ring[RADEON_NUM_RINGS];
/* irqs */
struct {
int (*set)(struct radeon_device *rdev);
@@ -1911,30 +1222,27 @@ struct radeon_asic {
void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
/* get backlight level */
u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
- /* audio callbacks */
- void (*hdmi_enable)(struct drm_encoder *encoder, bool enable);
- void (*hdmi_setmode)(struct drm_encoder *encoder, struct drm_display_mode *mode);
} display;
/* copy functions for bo handling */
struct {
- struct radeon_fence *(*blit)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
+ int (*blit)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
u32 blit_ring_index;
- struct radeon_fence *(*dma)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
+ int (*dma)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
u32 dma_ring_index;
/* method used for bo copy */
- struct radeon_fence *(*copy)(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
+ int (*copy)(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
/* ring used for bo copies */
u32 copy_ring_index;
} copy;
@@ -1952,7 +1260,7 @@ struct radeon_asic {
bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
} hpd;
- /* static power management */
+ /* power management */
struct {
void (*misc)(struct radeon_device *rdev);
void (*prepare)(struct radeon_device *rdev);
@@ -1966,41 +1274,12 @@ struct radeon_asic {
int (*get_pcie_lanes)(struct radeon_device *rdev);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
- int (*set_uvd_clocks)(struct radeon_device *rdev, u32 vclk, u32 dclk);
- int (*set_vce_clocks)(struct radeon_device *rdev, u32 evclk, u32 ecclk);
- int (*get_temperature)(struct radeon_device *rdev);
} pm;
- /* dynamic power management */
- struct {
- int (*init)(struct radeon_device *rdev);
- void (*setup_asic)(struct radeon_device *rdev);
- int (*enable)(struct radeon_device *rdev);
- int (*late_enable)(struct radeon_device *rdev);
- void (*disable)(struct radeon_device *rdev);
- int (*pre_set_power_state)(struct radeon_device *rdev);
- int (*set_power_state)(struct radeon_device *rdev);
- void (*post_set_power_state)(struct radeon_device *rdev);
- void (*display_configuration_changed)(struct radeon_device *rdev);
- void (*fini)(struct radeon_device *rdev);
- u32 (*get_sclk)(struct radeon_device *rdev, bool low);
- u32 (*get_mclk)(struct radeon_device *rdev, bool low);
- void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps);
- void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
- int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
- bool (*vblank_too_short)(struct radeon_device *rdev);
- void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
- void (*enable_bapm)(struct radeon_device *rdev, bool enable);
- void (*fan_ctrl_set_mode)(struct radeon_device *rdev, u32 mode);
- u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev);
- int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed);
- int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed);
- u32 (*get_current_sclk)(struct radeon_device *rdev);
- u32 (*get_current_mclk)(struct radeon_device *rdev);
- } dpm;
/* pageflipping */
struct {
- void (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
- bool (*page_flip_pending)(struct radeon_device *rdev, int crtc);
+ void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+ u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+ void (*post_page_flip)(struct radeon_device *rdev, int crtc);
} pflip;
};
@@ -2039,7 +1318,6 @@ struct r600_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- unsigned active_simds;
};
struct rv770_asic {
@@ -2065,7 +1343,6 @@ struct rv770_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- unsigned active_simds;
};
struct evergreen_asic {
@@ -2092,7 +1369,6 @@ struct evergreen_asic {
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
- unsigned active_simds;
};
struct cayman_asic {
@@ -2131,7 +1407,6 @@ struct cayman_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
- unsigned active_simds;
};
struct si_asic {
@@ -2161,40 +1436,6 @@ struct si_asic {
unsigned multi_gpu_tile_size;
unsigned tile_config;
- uint32_t tile_mode_array[32];
- uint32_t active_cus;
-};
-
-struct cik_asic {
- unsigned max_shader_engines;
- unsigned max_tile_pipes;
- unsigned max_cu_per_sh;
- unsigned max_sh_per_se;
- unsigned max_backends_per_se;
- unsigned max_texture_channel_caches;
- unsigned max_gprs;
- unsigned max_gs_threads;
- unsigned max_hw_contexts;
- unsigned sc_prim_fifo_size_frontend;
- unsigned sc_prim_fifo_size_backend;
- unsigned sc_hiz_tile_fifo_size;
- unsigned sc_earlyz_tile_fifo_size;
-
- unsigned num_tile_pipes;
- unsigned backend_enable_mask;
- unsigned backend_disable_mask_per_asic;
- unsigned backend_map;
- unsigned num_texture_channel_caches;
- unsigned mem_max_burst_length_bytes;
- unsigned mem_row_size_in_kb;
- unsigned shader_engine_tile_size;
- unsigned num_gpus;
- unsigned multi_gpu_tile_size;
-
- unsigned tile_config;
- uint32_t tile_mode_array[32];
- uint32_t macrotile_mode_array[16];
- uint32_t active_cus;
};
union radeon_asic_config {
@@ -2205,7 +1446,6 @@ union radeon_asic_config {
struct evergreen_asic evergreen;
struct cayman_asic cayman;
struct si_asic si;
- struct cik_asic cik;
};
/*
@@ -2222,8 +1462,6 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -2242,8 +1480,6 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -2315,11 +1551,9 @@ typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
struct radeon_device {
- struct device self;
- struct device *dev;
+ struct device dev;
struct drm_device *ddev;
struct pci_dev *pdev;
- struct rwlock exclusive_lock;
pci_chipset_tag_t pc;
pcitag_t pa_tag;
@@ -2345,6 +1579,8 @@ struct radeon_device {
bus_space_handle_t memh;
#endif
+ struct rwlock exclusive_lock;
+
unsigned long fb_aper_offset;
unsigned long fb_aper_size;
@@ -2363,33 +1599,11 @@ struct radeon_device {
uint16_t bios_header_start;
struct radeon_bo *stollen_vga_memory;
/* Register mmio */
- resource_size_t rmmio_base;
- resource_size_t rmmio_size;
+ bus_addr_t rmmio_base;
+ bus_size_t rmmio_size;
/* protects concurrent MM_INDEX/DATA based register access */
spinlock_t mmio_idx_lock;
- /* protects concurrent SMC based register access */
- spinlock_t smc_idx_lock;
- /* protects concurrent PLL register access */
- spinlock_t pll_idx_lock;
- /* protects concurrent MC register access */
- spinlock_t mc_idx_lock;
- /* protects concurrent PCIE register access */
- spinlock_t pcie_idx_lock;
- /* protects concurrent PCIE_PORT register access */
- spinlock_t pciep_idx_lock;
- /* protects concurrent PIF register access */
- spinlock_t pif_idx_lock;
- /* protects concurrent CG register access */
- spinlock_t cg_idx_lock;
- /* protects concurrent UVD register access */
- spinlock_t uvd_idx_lock;
- /* protects concurrent RCU register access */
- spinlock_t rcu_idx_lock;
- /* protects concurrent DIDT register access */
- spinlock_t didt_idx_lock;
- /* protects concurrent ENDPOINT (audio) register access */
- spinlock_t end_idx_lock;
- bus_space_handle_t rmmio_bsh;
+ bus_space_handle_t rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
radeon_rreg_t pll_rreg;
@@ -2399,18 +1613,16 @@ struct radeon_device {
radeon_wreg_t pciep_wreg;
/* io port */
bus_space_handle_t rio_mem;
- resource_size_t rio_mem_size;
+ bus_size_t rio_mem_size;
struct radeon_clock clock;
struct radeon_mc mc;
struct radeon_gart gart;
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
- struct radeon_doorbell doorbell;
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
- unsigned fence_context;
- struct rwlock ring_lock;
+ struct rwlock ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
struct radeon_sa_manager ring_tmp_bo;
@@ -2418,8 +1630,6 @@ struct radeon_device {
struct radeon_asic *asic;
struct radeon_gem gem;
struct radeon_pm pm;
- struct radeon_uvd uvd;
- struct radeon_vce vce;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
@@ -2427,34 +1637,31 @@ struct radeon_device {
bool suspend;
bool need_dma32;
bool accel_working;
- bool fastfb_working; /* IGP feature*/
- bool needs_reset, in_reset;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
- const struct firmware *me_fw; /* all family ME firmware */
- const struct firmware *pfp_fw; /* r6/700 PFP firmware */
- const struct firmware *rlc_fw; /* r6/700 RLC firmware */
- const struct firmware *mc_fw; /* NI MC firmware */
- const struct firmware *ce_fw; /* SI CE firmware */
- const struct firmware *mec_fw; /* CIK MEC firmware */
- const struct firmware *mec2_fw; /* KV MEC2 firmware */
- const struct firmware *sdma_fw; /* CIK SDMA firmware */
- const struct firmware *smc_fw; /* SMC firmware */
- const struct firmware *uvd_fw; /* UVD firmware */
- const struct firmware *vce_fw; /* VCE firmware */
- bool new_fw;
+ u_char *me_fw; /* all family ME firmware */
+ size_t me_fw_size;
+ u_char *pfp_fw; /* r6/700 PFP firmware */
+ size_t pfp_fw_size;
+ u_char *rlc_fw; /* r6/700 RLC firmware */
+ size_t rlc_fw_size;
+ u_char *mc_fw; /* NI MC firmware */
+ size_t mc_fw_size;
+ u_char *ce_fw; /* SI CE firmware */
+ size_t ce_fw_size;
+ struct r600_blit r600_blit;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
- struct radeon_rlc rlc;
- struct radeon_mec mec;
- struct delayed_work hotplug_work;
- struct work_struct dp_work;
- struct work_struct audio_work;
+ struct si_rlc rlc;
+ struct task hotplug_task;
+ struct task audio_task;
int num_crtc; /* number of crtcs */
struct rwlock dc_hw_i2c_mutex; /* display controller hw i2c mutex */
- bool has_uvd;
- struct r600_audio audio; /* audio stuff */
+ bool audio_enabled;
+ struct r600_audio audio_status; /* audio stuff */
+#ifdef notyet
struct notifier_block acpi_nb;
+#endif
/* only one userspace can use Hyperz features or CMASK at a time */
struct drm_file *hyperz_filp;
struct drm_file *cmask_filp;
@@ -2465,102 +1672,41 @@ struct radeon_device {
unsigned debugfs_count;
/* virtual memory */
struct radeon_vm_manager vm_manager;
- struct rwlock gpu_clock_mutex;
- /* memory stats */
- atomic64_t vram_usage;
- atomic64_t gtt_usage;
- atomic64_t num_bytes_moved;
- atomic_t gpu_reset_counter;
+ struct rwlock gpu_clock_mutex;
/* ACPI interface */
struct radeon_atif atif;
struct radeon_atcs atcs;
- /* srbm instance registers */
- struct rwlock srbm_mutex;
- /* GRBM index mutex. Protects concurrents access to GRBM index */
- struct rwlock grbm_idx_mutex;
- /* clock, powergating flags */
- u32 cg_flags;
- u32 pg_flags;
-
-#ifdef __linux__
- struct dev_pm_domain vga_pm_domain;
-#endif
- bool have_disp_power_ref;
- u32 px_quirk_flags;
-
- /* tracking pinned memory */
- u64 vram_pin_size;
- u64 gart_pin_size;
-
- /* amdkfd interface */
- struct kfd_dev *kfd;
-
- struct rwlock mn_lock;
- DECLARE_HASHTABLE(mn_hash, 7);
};
-bool radeon_is_px(struct drm_device *dev);
int radeon_device_init(struct radeon_device *rdev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
- uint32_t flags);
+ struct drm_device *ddev);
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-#define RADEON_MIN_MMIO_SIZE 0x10000
-
-uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
- bool always_indirect)
-{
- /* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
- if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
- return bus_space_read_4(rdev->memt, rdev->rmmio_bsh, reg);
- else
- return r100_mm_rreg_slow(rdev, reg);
-}
-static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
- bool always_indirect)
-{
- if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
- bus_space_write_4(rdev->memt, rdev->rmmio_bsh, reg, v);
- else
- r100_mm_wreg_slow(rdev, reg, v);
-}
-
+uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect);
+void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect);
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
-void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
-
/*
* Cast helper
*/
-extern const struct fence_ops radeon_fence_ops;
-
-static inline struct radeon_fence *to_radeon_fence(struct fence *f)
-{
- struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
-
- if (__f->base.ops == &radeon_fence_ops)
- return __f;
-
- return NULL;
-}
+#define to_radeon_fence(p) ((struct radeon_fence *)(p))
/*
* Registers read & write functions.
*/
#define RREG8(reg) \
- bus_space_read_1(rdev->memt, rdev->rmmio_bsh, (reg))
+ bus_space_read_1(rdev->memt, rdev->rmmio, (reg))
#define WREG8(reg, v) \
- bus_space_write_1(rdev->memt, rdev->rmmio_bsh, (reg), (v))
+ bus_space_write_1(rdev->memt, rdev->rmmio, (reg), (v))
#define RREG16(reg) \
- bus_space_read_2(rdev->memt, rdev->rmmio_bsh, (reg))
+ bus_space_read_2(rdev->memt, rdev->rmmio, (reg))
#define WREG16(reg, v) \
- bus_space_write_2(rdev->memt, rdev->rmmio_bsh, (reg), (v))
+ bus_space_write_2(rdev->memt, rdev->rmmio, (reg), (v))
+
#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
@@ -2574,22 +1720,8 @@ static inline struct radeon_fence *to_radeon_fence(struct fence *f)
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
-#define RREG32_PCIE_PORT(reg) rdev->pciep_rreg(rdev, (reg))
-#define WREG32_PCIE_PORT(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
-#define RREG32_SMC(reg) tn_smc_rreg(rdev, (reg))
-#define WREG32_SMC(reg, v) tn_smc_wreg(rdev, (reg), (v))
-#define RREG32_RCU(reg) r600_rcu_rreg(rdev, (reg))
-#define WREG32_RCU(reg, v) r600_rcu_wreg(rdev, (reg), (v))
-#define RREG32_CG(reg) eg_cg_rreg(rdev, (reg))
-#define WREG32_CG(reg, v) eg_cg_wreg(rdev, (reg), (v))
-#define RREG32_PIF_PHY0(reg) eg_pif_phy0_rreg(rdev, (reg))
-#define WREG32_PIF_PHY0(reg, v) eg_pif_phy0_wreg(rdev, (reg), (v))
-#define RREG32_PIF_PHY1(reg) eg_pif_phy1_rreg(rdev, (reg))
-#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
-#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
-#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
-#define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg))
-#define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v))
+#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
+#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
#define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32(reg); \
@@ -2597,8 +1729,6 @@ static inline struct radeon_fence *to_radeon_fence(struct fence *f)
tmp_ |= ((val) & ~(mask)); \
WREG32(reg, tmp_); \
} while (0)
-#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
#define WREG32_PLL_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32_PLL(reg); \
@@ -2606,44 +1736,27 @@ static inline struct radeon_fence *to_radeon_fence(struct fence *f)
tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \
} while (0)
-#define WREG32_SMC_P(reg, val, mask) \
- do { \
- uint32_t tmp_ = RREG32_SMC(reg); \
- tmp_ &= (mask); \
- tmp_ |= ((val) & ~(mask)); \
- WREG32_SMC(reg, tmp_); \
- } while (0)
#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
-#define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
-#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
-
/*
- * Indirect registers accessors.
- * They used to be inlined, but this increases code size by ~65 kbytes.
- * Since each performs a pair of MMIO ops
- * within a spin_lock_irqsave/spin_unlock_irqrestore region,
- * the cost of call+ret is almost negligible. MMIO and locking
- * costs several dozens of cycles each at best, call+ret is ~5 cycles.
+ * Indirect registers accessor
*/
-uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
-void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg);
-void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg);
-void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg);
-void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg);
-void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg);
-void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg);
-void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg);
-void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ uint32_t r;
+
+ WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+ r = RREG32(RADEON_PCIE_DATA);
+ return r;
+}
+
+static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
+ WREG32(RADEON_PCIE_DATA, (v));
+}
void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -2691,22 +1804,6 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
(rdev->flags & RADEON_IS_IGP))
-#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
-#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
-#define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE))
-#define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI))
-#define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE))
-#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \
- (rdev->family == CHIP_MULLINS))
-
-#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \
- (rdev->ddev->pdev->device == 0x6850) || \
- (rdev->ddev->pdev->device == 0x6858) || \
- (rdev->ddev->pdev->device == 0x6859) || \
- (rdev->ddev->pdev->device == 0x6840) || \
- (rdev->ddev->pdev->device == 0x6841) || \
- (rdev->ddev->pdev->device == 0x6842) || \
- (rdev->ddev->pdev->device == 0x6843))
/*
* BIOS helpers.
@@ -2724,25 +1821,18 @@ void radeon_atombios_fini(struct radeon_device *rdev);
/*
* RING helpers.
*/
-
-/**
- * radeon_ring_write - write a value to the ring
- *
- * @ring: radeon_ring structure holding ring information
- * @v: dword (dw) value to write
- *
- * Write a value to the requested ring buffer (all asics).
- */
+#if !defined(DRM_DEBUG_CODE) || DRM_DEBUG_CODE == 0
static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
- if (ring->count_dw <= 0)
- DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
-
ring->ring[ring->wptr++] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
ring->ring_free_dw--;
}
+#else
+/* With debugging this is just too big to inline */
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
+#endif
/*
* ASICs macro.
@@ -2751,40 +1841,31 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
-#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
+#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
-#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
+#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
-#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
-#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
-#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
-#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
-#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
-#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
-#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
-#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
-#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
-#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
-#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
-#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
-#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
+#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
+#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
+#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
+#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
+#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
+#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
-#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
-#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
-#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
-#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
-#define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
-#define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
-#define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
+#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
+#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
+#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
@@ -2795,9 +1876,6 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
-#define radeon_set_uvd_clocks(rdev, v, d) (rdev)->asic->pm.set_uvd_clocks((rdev), (v), (d))
-#define radeon_set_vce_clocks(rdev, ev, ec) (rdev)->asic->pm.set_vce_clocks((rdev), (ev), (ec))
-#define radeon_get_temperature(rdev) (rdev)->asic->pm.get_temperature((rdev))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
@@ -2811,39 +1889,15 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
+#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
-#define radeon_page_flip_pending(rdev, crtc) (rdev)->asic->pflip.page_flip_pending((rdev), (crtc))
+#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
-#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
-#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
-#define radeon_get_allowed_info_register(rdev, r, v) (rdev)->asic->get_allowed_info_register((rdev), (r), (v))
-#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev))
-#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev))
-#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev))
-#define radeon_dpm_late_enable(rdev) rdev->asic->dpm.late_enable((rdev))
-#define radeon_dpm_disable(rdev) rdev->asic->dpm.disable((rdev))
-#define radeon_dpm_pre_set_power_state(rdev) rdev->asic->dpm.pre_set_power_state((rdev))
-#define radeon_dpm_set_power_state(rdev) rdev->asic->dpm.set_power_state((rdev))
-#define radeon_dpm_post_set_power_state(rdev) rdev->asic->dpm.post_set_power_state((rdev))
-#define radeon_dpm_display_configuration_changed(rdev) rdev->asic->dpm.display_configuration_changed((rdev))
-#define radeon_dpm_fini(rdev) rdev->asic->dpm.fini((rdev))
-#define radeon_dpm_get_sclk(rdev, l) rdev->asic->dpm.get_sclk((rdev), (l))
-#define radeon_dpm_get_mclk(rdev, l) rdev->asic->dpm.get_mclk((rdev), (l))
-#define radeon_dpm_print_power_state(rdev, ps) rdev->asic->dpm.print_power_state((rdev), (ps))
-#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
-#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
-#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
-#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
-#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e))
-#define radeon_dpm_get_current_sclk(rdev) rdev->asic->dpm.get_current_sclk((rdev))
-#define radeon_dpm_get_current_mclk(rdev) rdev->asic->dpm.get_current_mclk((rdev))
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
-extern void radeon_pci_config_reset(struct radeon_device *rdev);
-extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -2861,47 +1915,32 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
-extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
- uint32_t flags);
-extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm);
-extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-extern int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
+extern int radeon_resume_kms(struct drm_device *dev);
+extern int radeon_suspend_kms(struct drm_device *dev);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
-extern void radeon_program_register_sequence(struct radeon_device *rdev,
- const u32 *registers,
- const u32 array_size);
+extern struct uvm_object *radeon_mmap(struct drm_device *, voff_t, vsize_t);
/*
* vm
*/
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
-int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct list_head *head);
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
-void radeon_vm_flush(struct radeon_device *rdev,
- struct radeon_vm *vm,
- int ring, struct radeon_fence *fence);
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
-int radeon_vm_update_page_directory(struct radeon_device *rdev,
- struct radeon_vm *vm);
-int radeon_vm_clear_freed(struct radeon_device *rdev,
- struct radeon_vm *vm);
-int radeon_vm_clear_invalids(struct radeon_device *rdev,
- struct radeon_vm *vm);
-int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va,
- struct ttm_mem_reg *mem);
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
@@ -2913,19 +1952,11 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t offset,
uint32_t flags);
-void radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va);
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va);
/* audio */
-void r600_audio_update_hdmi(struct work_struct *work);
-struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
-struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
-void r600_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- u8 enable_mask);
-void dce6_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- u8 enable_mask);
+void r600_audio_update_hdmi(void *arg1);
/*
* R600 vram scratch functions
@@ -2962,6 +1993,9 @@ struct radeon_hdmi_acr {
extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
+extern void r600_hdmi_enable(struct drm_encoder *encoder);
+extern void r600_hdmi_disable(struct drm_encoder *encoder);
+extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -2972,6 +2006,8 @@ extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
* evergreen functions used by radeon_encoder.c
*/
+extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+
extern int ni_init_microcode(struct radeon_device *rdev);
extern int ni_mc_load_microcode(struct radeon_device *rdev);
@@ -2979,28 +2015,11 @@ extern int ni_mc_load_microcode(struct radeon_device *rdev);
#if defined(CONFIG_ACPI)
extern int radeon_acpi_init(struct radeon_device *rdev);
extern void radeon_acpi_fini(struct radeon_device *rdev);
-extern bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev);
-extern int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
- u8 perf_req, bool advertise);
-extern int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev);
#else
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
#endif
-int radeon_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx);
-bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
-void radeon_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt);
-int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_bo_list **cs_reloc,
- int nomm);
-int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
- uint32_t *vline_start_end,
- uint32_t *vline_status);
-
#include "radeon_object.h"
#endif
diff --git a/sys/dev/pci/drm/radeon/radeon_acpi.c b/sys/dev/pci/drm/radeon/radeon_acpi.c
index 4b0858c56f1..260b5650976 100644
--- a/sys/dev/pci/drm/radeon/radeon_acpi.c
+++ b/sys/dev/pci/drm/radeon/radeon_acpi.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_acpi.c,v 1.4 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
@@ -21,7 +22,6 @@
*
*/
-#include <acpi/video.h>
#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/drm_crtc_helper.h>
#include "radeon.h"
@@ -69,22 +69,6 @@ struct atcs_verify_interface {
u32 function_bits; /* supported functions bit vector */
} __packed;
-#define ATCS_VALID_FLAGS_MASK 0x3
-
-struct atcs_pref_req_input {
- u16 size; /* structure size in bytes (includes size field) */
- u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
- u16 valid_flags_mask; /* valid flags mask */
- u16 flags; /* flags */
- u8 req_type; /* request type */
- u8 perf_req; /* performance request */
-} __packed;
-
-struct atcs_pref_req_output {
- u16 size; /* structure size in bytes (includes size field) */
- u8 ret_val; /* return value */
-} __packed;
-
/* Call the ATIF method
*/
/**
@@ -360,7 +344,7 @@ int radeon_atif_handler(struct radeon_device *rdev,
return NOTIFY_DONE;
/* Check pending SBIOS requests */
- handle = ACPI_HANDLE(&rdev->pdev->dev);
+ handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
count = radeon_atif_get_sbios_requests(handle, &req);
if (count <= 0)
@@ -513,135 +497,6 @@ out:
}
/**
- * radeon_acpi_is_pcie_performance_request_supported
- *
- * @rdev: radeon_device pointer
- *
- * Check if the ATCS pcie_perf_req and pcie_dev_rdy methods
- * are supported (all asics).
- * returns true if supported, false if not.
- */
-bool radeon_acpi_is_pcie_performance_request_supported(struct radeon_device *rdev)
-{
- struct radeon_atcs *atcs = &rdev->atcs;
-
- if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
- return true;
-
- return false;
-}
-
-/**
- * radeon_acpi_pcie_notify_device_ready
- *
- * @rdev: radeon_device pointer
- *
- * Executes the PCIE_DEVICE_READY_NOTIFICATION method
- * (all asics).
- * returns 0 on success, error on failure.
- */
-int radeon_acpi_pcie_notify_device_ready(struct radeon_device *rdev)
-{
- acpi_handle handle;
- union acpi_object *info;
- struct radeon_atcs *atcs = &rdev->atcs;
-
- /* Get the device handle */
- handle = ACPI_HANDLE(&rdev->pdev->dev);
- if (!handle)
- return -EINVAL;
-
- if (!atcs->functions.pcie_dev_rdy)
- return -EINVAL;
-
- info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
- if (!info)
- return -EIO;
-
- kfree(info);
-
- return 0;
-}
-
-/**
- * radeon_acpi_pcie_performance_request
- *
- * @rdev: radeon_device pointer
- * @perf_req: requested perf level (pcie gen speed)
- * @advertise: set advertise caps flag if set
- *
- * Executes the PCIE_PERFORMANCE_REQUEST method to
- * change the pcie gen speed (all asics).
- * returns 0 on success, error on failure.
- */
-int radeon_acpi_pcie_performance_request(struct radeon_device *rdev,
- u8 perf_req, bool advertise)
-{
- acpi_handle handle;
- union acpi_object *info;
- struct radeon_atcs *atcs = &rdev->atcs;
- struct atcs_pref_req_input atcs_input;
- struct atcs_pref_req_output atcs_output;
- struct acpi_buffer params;
- size_t size;
- u32 retry = 3;
-
- /* Get the device handle */
- handle = ACPI_HANDLE(&rdev->pdev->dev);
- if (!handle)
- return -EINVAL;
-
- if (!atcs->functions.pcie_perf_req)
- return -EINVAL;
-
- atcs_input.size = sizeof(struct atcs_pref_req_input);
- /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
- atcs_input.client_id = rdev->pdev->devfn | (rdev->pdev->bus->number << 8);
- atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
- atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
- if (advertise)
- atcs_input.flags |= ATCS_ADVERTISE_CAPS;
- atcs_input.req_type = ATCS_PCIE_LINK_SPEED;
- atcs_input.perf_req = perf_req;
-
- params.length = sizeof(struct atcs_pref_req_input);
- params.pointer = &atcs_input;
-
- while (retry--) {
- info = radeon_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
- if (!info)
- return -EIO;
-
- memset(&atcs_output, 0, sizeof(atcs_output));
-
- size = *(u16 *) info->buffer.pointer;
- if (size < 3) {
- DRM_INFO("ATCS buffer is too small: %zu\n", size);
- kfree(info);
- return -EINVAL;
- }
- size = min(sizeof(atcs_output), size);
-
- memcpy(&atcs_output, info->buffer.pointer, size);
-
- kfree(info);
-
- switch (atcs_output.ret_val) {
- case ATCS_REQUEST_REFUSED:
- default:
- return -EINVAL;
- case ATCS_REQUEST_COMPLETE:
- return 0;
- case ATCS_REQUEST_IN_PROGRESS:
- udelay(10);
- break;
- }
- }
-
- return 0;
-}
-
-/**
* radeon_acpi_event - handle notify events
*
* @nb: notifier block
@@ -690,7 +545,7 @@ int radeon_acpi_init(struct radeon_device *rdev)
int ret;
/* Get the device handle */
- handle = ACPI_HANDLE(&rdev->pdev->dev);
+ handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
/* No need to proceed if we're sure that ATIF is not supported */
if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
diff --git a/sys/dev/pci/drm/radeon/radeon_acpi.h b/sys/dev/pci/drm/radeon/radeon_acpi.h
index be4af76f213..2487f07b52c 100644
--- a/sys/dev/pci/drm/radeon/radeon_acpi.h
+++ b/sys/dev/pci/drm/radeon/radeon_acpi.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_acpi.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
diff --git a/sys/dev/pci/drm/radeon/radeon_agp.c b/sys/dev/pci/drm/radeon/radeon_agp.c
index 71192113c6f..a0be2dd4932 100644
--- a/sys/dev/pci/drm/radeon/radeon_agp.c
+++ b/sys/dev/pci/drm/radeon/radeon_agp.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_agp.c,v 1.7 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
@@ -54,9 +55,6 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
/* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50,
PCI_VENDOR_ID_IBM, 0x0550, 1},
- /* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */
- { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
- PCI_VENDOR_ID_IBM, 0x054d, 1},
/* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */
{ PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57,
PCI_VENDOR_ID_IBM, 0x0530, 1},
@@ -120,6 +118,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
/* ATI Host Bridge / RV280 [M9+] Needs AGPMode 1 (phoronix forum) */
{ PCI_VENDOR_ID_ATI, 0xcbb2, PCI_VENDOR_ID_ATI, 0x5c61,
PCI_VENDOR_ID_SONY, 0x8175, 1},
+ /* HP Host Bridge / R300 [FireGL X1] Needs AGPMode 2 (fdo #7770) */
+ { PCI_VENDOR_ID_HP, 0x122e, PCI_VENDOR_ID_ATI, 0x4e47,
+ PCI_VENDOR_ID_ATI, 0x0152, 2},
{ 0, 0, 0, 0, 0, 0, 0 },
};
#endif
diff --git a/sys/dev/pci/drm/radeon/radeon_asic.c b/sys/dev/pci/drm/radeon/radeon_asic.c
index d2d6a347846..b6b24a37dc1 100644
--- a/sys/dev/pci/drm/radeon/radeon_asic.c
+++ b/sys/dev/pci/drm/radeon/radeon_asic.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_asic.c,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -119,25 +120,12 @@ static void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
- if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
- rdev->mc_rreg = &rs780_mc_rreg;
- rdev->mc_wreg = &rs780_mc_wreg;
- }
-
- if (rdev->family >= CHIP_BONAIRE) {
- rdev->pciep_rreg = &cik_pciep_rreg;
- rdev->pciep_wreg = &cik_pciep_wreg;
- } else if (rdev->family >= CHIP_R600) {
+ if (rdev->family >= CHIP_R600) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
}
-static int radeon_invalid_get_allowed_info_register(struct radeon_device *rdev,
- u32 reg, u32 *val)
-{
- return -EINVAL;
-}
/* helper to disable agp */
/**
@@ -161,13 +149,11 @@ void radeon_agp_disable(struct radeon_device *rdev)
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
- rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -176,21 +162,6 @@ void radeon_agp_disable(struct radeon_device *rdev)
/*
* ASIC
*/
-
-static struct radeon_asic_ring r100_gfx_ring = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r100_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r100_cs_parse,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &r100_gfx_get_rptr,
- .get_wptr = &r100_gfx_get_wptr,
- .set_wptr = &r100_gfx_set_wptr,
-};
-
static struct radeon_asic r100_asic = {
.init = &r100_init,
.fini = &r100_fini,
@@ -198,17 +169,24 @@ static struct radeon_asic r100_asic = {
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
- .mmio_hdp_flush = NULL,
+ .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
- .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
- .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
},
.irq = {
.set = &r100_irq_set,
@@ -254,8 +232,9 @@ static struct radeon_asic r100_asic = {
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .page_flip_pending = &r100_page_flip_pending,
+ .post_page_flip = &r100_post_page_flip,
},
};
@@ -266,17 +245,24 @@ static struct radeon_asic r200_asic = {
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
- .mmio_hdp_flush = NULL,
+ .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
- .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
- .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
},
.irq = {
.set = &r100_irq_set,
@@ -322,39 +308,12 @@ static struct radeon_asic r200_asic = {
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .page_flip_pending = &r100_page_flip_pending,
+ .post_page_flip = &r100_post_page_flip,
},
};
-static struct radeon_asic_ring r300_gfx_ring = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &r100_gfx_get_rptr,
- .get_wptr = &r100_gfx_get_wptr,
- .set_wptr = &r100_gfx_set_wptr,
-};
-
-static struct radeon_asic_ring rv515_gfx_ring = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &r100_gfx_get_rptr,
- .get_wptr = &r100_gfx_get_wptr,
- .set_wptr = &r100_gfx_set_wptr,
-};
-
static struct radeon_asic r300_asic = {
.init = &r300_init,
.fini = &r300_fini,
@@ -362,17 +321,24 @@ static struct radeon_asic r300_asic = {
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .mmio_hdp_flush = NULL,
+ .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
- .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
- .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
},
.irq = {
.set = &r100_irq_set,
@@ -418,8 +384,9 @@ static struct radeon_asic r300_asic = {
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .page_flip_pending = &r100_page_flip_pending,
+ .post_page_flip = &r100_post_page_flip,
},
};
@@ -430,17 +397,24 @@ static struct radeon_asic r300_asic_pcie = {
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .mmio_hdp_flush = NULL,
+ .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
- .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
- .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
},
.irq = {
.set = &r100_irq_set,
@@ -486,8 +460,9 @@ static struct radeon_asic r300_asic_pcie = {
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .page_flip_pending = &r100_page_flip_pending,
+ .post_page_flip = &r100_post_page_flip,
},
};
@@ -498,17 +473,24 @@ static struct radeon_asic r420_asic = {
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .mmio_hdp_flush = NULL,
+ .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
- .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
- .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
},
.irq = {
.set = &r100_irq_set,
@@ -554,8 +536,9 @@ static struct radeon_asic r420_asic = {
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .page_flip_pending = &r100_page_flip_pending,
+ .post_page_flip = &r100_post_page_flip,
},
};
@@ -566,17 +549,24 @@ static struct radeon_asic rs400_asic = {
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .mmio_hdp_flush = NULL,
+ .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
- .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
- .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
},
.irq = {
.set = &r100_irq_set,
@@ -622,8 +612,9 @@ static struct radeon_asic rs400_asic = {
.set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
+ .pre_page_flip = &r100_pre_page_flip,
.page_flip = &r100_page_flip,
- .page_flip_pending = &r100_page_flip_pending,
+ .post_page_flip = &r100_post_page_flip,
},
};
@@ -634,17 +625,24 @@ static struct radeon_asic rs600_asic = {
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .mmio_hdp_flush = NULL,
+ .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
- .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
},
.irq = {
.set = &rs600_irq_set,
@@ -690,8 +688,9 @@ static struct radeon_asic rs600_asic = {
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .page_flip_pending = &rs600_page_flip_pending,
+ .post_page_flip = &rs600_post_page_flip,
},
};
@@ -702,17 +701,24 @@ static struct radeon_asic rs690_asic = {
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .mmio_hdp_flush = NULL,
+ .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
- .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
- .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
},
.irq = {
.set = &rs600_irq_set,
@@ -758,8 +764,9 @@ static struct radeon_asic rs690_asic = {
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .page_flip_pending = &rs600_page_flip_pending,
+ .post_page_flip = &rs600_post_page_flip,
},
};
@@ -770,17 +777,24 @@ static struct radeon_asic rv515_asic = {
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .mmio_hdp_flush = NULL,
+ .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
- .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
- .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
},
.irq = {
.set = &rs600_irq_set,
@@ -826,8 +840,9 @@ static struct radeon_asic rv515_asic = {
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .page_flip_pending = &rs600_page_flip_pending,
+ .post_page_flip = &rs600_post_page_flip,
},
};
@@ -838,17 +853,24 @@ static struct radeon_asic r520_asic = {
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .mmio_hdp_flush = NULL,
+ .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r520_mc_wait_for_idle,
- .get_allowed_info_register = radeon_invalid_get_allowed_info_register,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
- .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ }
},
.irq = {
.set = &rs600_irq_set,
@@ -894,37 +916,12 @@ static struct radeon_asic r520_asic = {
.set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .page_flip_pending = &rs600_page_flip_pending,
+ .post_page_flip = &rs600_post_page_flip,
},
};
-static struct radeon_asic_ring r600_gfx_ring = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &r600_gfx_get_rptr,
- .get_wptr = &r600_gfx_get_wptr,
- .set_wptr = &r600_gfx_set_wptr,
-};
-
-static struct radeon_asic_ring r600_dma_ring = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &r600_dma_get_rptr,
- .get_wptr = &r600_dma_get_wptr,
- .set_wptr = &r600_dma_set_wptr,
-};
-
static struct radeon_asic r600_asic = {
.init = &r600_init,
.fini = &r600_fini,
@@ -932,106 +929,32 @@ static struct radeon_asic r600_asic = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .mmio_hdp_flush = r600_mmio_hdp_flush,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
- .get_xclk = &r600_get_xclk,
- .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
- .get_allowed_info_register = r600_get_allowed_info_register,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
- [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
- },
- .irq = {
- .set = &r600_irq_set,
- .process = &r600_irq_process,
- },
- .display = {
- .bandwidth_update = &rv515_bandwidth_update,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .wait_for_vblank = &avivo_wait_for_vblank,
- .set_backlight_level = &atombios_set_backlight_level,
- .get_backlight_level = &atombios_get_backlight_level,
- },
- .copy = {
- .blit = &r600_copy_cpdma,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &r600_copy_dma,
- .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &r600_copy_cpdma,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- },
- .surface = {
- .set_reg = r600_set_surface_reg,
- .clear_reg = r600_clear_surface_reg,
- },
- .hpd = {
- .init = &r600_hpd_init,
- .fini = &r600_hpd_fini,
- .sense = &r600_hpd_sense,
- .set_polarity = &r600_hpd_set_polarity,
- },
- .pm = {
- .misc = &r600_pm_misc,
- .prepare = &rs600_pm_prepare,
- .finish = &rs600_pm_finish,
- .init_profile = &r600_pm_init_profile,
- .get_dynpm_state = &r600_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &r600_get_pcie_lanes,
- .set_pcie_lanes = &r600_set_pcie_lanes,
- .set_clock_gating = NULL,
- .get_temperature = &rv6xx_get_temp,
- },
- .pflip = {
- .page_flip = &rs600_page_flip,
- .page_flip_pending = &rs600_page_flip_pending,
- },
-};
-
-static struct radeon_asic_ring rv6xx_uvd_ring = {
- .ib_execute = &uvd_v1_0_ib_execute,
- .emit_fence = &uvd_v1_0_fence_emit,
- .emit_semaphore = &uvd_v1_0_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &uvd_v1_0_ring_test,
- .ib_test = &uvd_v1_0_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &uvd_v1_0_get_rptr,
- .get_wptr = &uvd_v1_0_get_wptr,
- .set_wptr = &uvd_v1_0_set_wptr,
-};
-
-static struct radeon_asic rv6xx_asic = {
- .init = &r600_init,
- .fini = &r600_fini,
- .suspend = &r600_suspend,
- .resume = &r600_resume,
- .vga_set_state = &r600_vga_set_state,
- .asic_reset = &r600_asic_reset,
- .mmio_hdp_flush = r600_mmio_hdp_flush,
- .gui_idle = &r600_gui_idle,
- .mc_wait_for_idle = &r600_mc_wait_for_idle,
- .get_xclk = &r600_get_xclk,
- .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
- .get_allowed_info_register = r600_get_allowed_info_register,
- .gart = {
- .tlb_flush = &r600_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
- .set_page = &rs600_gart_set_page,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
- [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
- [R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring,
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
},
.irq = {
.set = &r600_irq_set,
@@ -1045,12 +968,12 @@ static struct radeon_asic rv6xx_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_cpdma,
+ .blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &r600_copy_cpdma,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1075,31 +998,11 @@ static struct radeon_asic rv6xx_asic = {
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
- .get_temperature = &rv6xx_get_temp,
- .set_uvd_clocks = &r600_set_uvd_clocks,
- },
- .dpm = {
- .init = &rv6xx_dpm_init,
- .setup_asic = &rv6xx_setup_asic,
- .enable = &rv6xx_dpm_enable,
- .late_enable = &r600_dpm_late_enable,
- .disable = &rv6xx_dpm_disable,
- .pre_set_power_state = &r600_dpm_pre_set_power_state,
- .set_power_state = &rv6xx_dpm_set_power_state,
- .post_set_power_state = &r600_dpm_post_set_power_state,
- .display_configuration_changed = &rv6xx_dpm_display_configuration_changed,
- .fini = &rv6xx_dpm_fini,
- .get_sclk = &rv6xx_dpm_get_sclk,
- .get_mclk = &rv6xx_dpm_get_mclk,
- .print_power_state = &rv6xx_dpm_print_power_state,
- .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &rv6xx_dpm_force_performance_level,
- .get_current_sclk = &rv6xx_dpm_get_current_sclk,
- .get_current_mclk = &rv6xx_dpm_get_current_mclk,
},
.pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .page_flip_pending = &rs600_page_flip_pending,
+ .post_page_flip = &rs600_post_page_flip,
},
};
@@ -1110,21 +1013,32 @@ static struct radeon_asic rs780_asic = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .mmio_hdp_flush = r600_mmio_hdp_flush,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
- .get_xclk = &r600_get_xclk,
- .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
- .get_allowed_info_register = r600_get_allowed_info_register,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
- [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
- [R600_RING_TYPE_UVD_INDEX] = &rv6xx_uvd_ring,
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
},
.irq = {
.set = &r600_irq_set,
@@ -1138,12 +1052,12 @@ static struct radeon_asic rs780_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_cpdma,
+ .blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &r600_copy_cpdma,
- .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .copy = &r600_copy_dma,
+ .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
@@ -1168,47 +1082,14 @@ static struct radeon_asic rs780_asic = {
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
- .get_temperature = &rv6xx_get_temp,
- .set_uvd_clocks = &r600_set_uvd_clocks,
- },
- .dpm = {
- .init = &rs780_dpm_init,
- .setup_asic = &rs780_dpm_setup_asic,
- .enable = &rs780_dpm_enable,
- .late_enable = &r600_dpm_late_enable,
- .disable = &rs780_dpm_disable,
- .pre_set_power_state = &r600_dpm_pre_set_power_state,
- .set_power_state = &rs780_dpm_set_power_state,
- .post_set_power_state = &r600_dpm_post_set_power_state,
- .display_configuration_changed = &rs780_dpm_display_configuration_changed,
- .fini = &rs780_dpm_fini,
- .get_sclk = &rs780_dpm_get_sclk,
- .get_mclk = &rs780_dpm_get_mclk,
- .print_power_state = &rs780_dpm_print_power_state,
- .debugfs_print_current_performance_level = &rs780_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &rs780_dpm_force_performance_level,
- .get_current_sclk = &rs780_dpm_get_current_sclk,
- .get_current_mclk = &rs780_dpm_get_current_mclk,
},
.pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rs600_page_flip,
- .page_flip_pending = &rs600_page_flip_pending,
+ .post_page_flip = &rs600_post_page_flip,
},
};
-static struct radeon_asic_ring rv770_uvd_ring = {
- .ib_execute = &uvd_v1_0_ib_execute,
- .emit_fence = &uvd_v2_2_fence_emit,
- .emit_semaphore = &uvd_v2_2_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &uvd_v1_0_ring_test,
- .ib_test = &uvd_v1_0_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &uvd_v1_0_get_rptr,
- .get_wptr = &uvd_v1_0_get_wptr,
- .set_wptr = &uvd_v1_0_set_wptr,
-};
-
static struct radeon_asic rv770_asic = {
.init = &rv770_init,
.fini = &rv770_fini,
@@ -1216,21 +1097,32 @@ static struct radeon_asic rv770_asic = {
.resume = &rv770_resume,
.asic_reset = &r600_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .mmio_hdp_flush = r600_mmio_hdp_flush,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
- .get_xclk = &rv770_get_xclk,
- .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
- .get_allowed_info_register = r600_get_allowed_info_register,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
- [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
- [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &r600_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
},
.irq = {
.set = &r600_irq_set,
@@ -1244,7 +1136,7 @@ static struct radeon_asic rv770_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_cpdma,
+ .blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &rv770_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1274,61 +1166,14 @@ static struct radeon_asic rv770_asic = {
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
- .set_uvd_clocks = &rv770_set_uvd_clocks,
- .get_temperature = &rv770_get_temp,
- },
- .dpm = {
- .init = &rv770_dpm_init,
- .setup_asic = &rv770_dpm_setup_asic,
- .enable = &rv770_dpm_enable,
- .late_enable = &rv770_dpm_late_enable,
- .disable = &rv770_dpm_disable,
- .pre_set_power_state = &r600_dpm_pre_set_power_state,
- .set_power_state = &rv770_dpm_set_power_state,
- .post_set_power_state = &r600_dpm_post_set_power_state,
- .display_configuration_changed = &rv770_dpm_display_configuration_changed,
- .fini = &rv770_dpm_fini,
- .get_sclk = &rv770_dpm_get_sclk,
- .get_mclk = &rv770_dpm_get_mclk,
- .print_power_state = &rv770_dpm_print_power_state,
- .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &rv770_dpm_force_performance_level,
- .vblank_too_short = &rv770_dpm_vblank_too_short,
- .get_current_sclk = &rv770_dpm_get_current_sclk,
- .get_current_mclk = &rv770_dpm_get_current_mclk,
},
.pflip = {
+ .pre_page_flip = &rs600_pre_page_flip,
.page_flip = &rv770_page_flip,
- .page_flip_pending = &rv770_page_flip_pending,
+ .post_page_flip = &rs600_post_page_flip,
},
};
-static struct radeon_asic_ring evergreen_gfx_ring = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &r600_gfx_get_rptr,
- .get_wptr = &r600_gfx_get_wptr,
- .set_wptr = &r600_gfx_set_wptr,
-};
-
-static struct radeon_asic_ring evergreen_dma_ring = {
- .ib_execute = &evergreen_dma_ring_ib_execute,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &evergreen_dma_is_lockup,
- .get_rptr = &r600_dma_get_rptr,
- .get_wptr = &r600_dma_get_wptr,
- .set_wptr = &r600_dma_set_wptr,
-};
-
static struct radeon_asic evergreen_asic = {
.init = &evergreen_init,
.fini = &evergreen_fini,
@@ -1336,21 +1181,32 @@ static struct radeon_asic evergreen_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .mmio_hdp_flush = r600_mmio_hdp_flush,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .get_xclk = &rv770_get_xclk,
- .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
- .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
- [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
- [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
},
.irq = {
.set = &evergreen_irq_set,
@@ -1364,7 +1220,7 @@ static struct radeon_asic evergreen_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_cpdma,
+ .blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1394,32 +1250,11 @@ static struct radeon_asic evergreen_asic = {
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
- .set_uvd_clocks = &evergreen_set_uvd_clocks,
- .get_temperature = &evergreen_get_temp,
- },
- .dpm = {
- .init = &cypress_dpm_init,
- .setup_asic = &cypress_dpm_setup_asic,
- .enable = &cypress_dpm_enable,
- .late_enable = &rv770_dpm_late_enable,
- .disable = &cypress_dpm_disable,
- .pre_set_power_state = &r600_dpm_pre_set_power_state,
- .set_power_state = &cypress_dpm_set_power_state,
- .post_set_power_state = &r600_dpm_post_set_power_state,
- .display_configuration_changed = &cypress_dpm_display_configuration_changed,
- .fini = &cypress_dpm_fini,
- .get_sclk = &rv770_dpm_get_sclk,
- .get_mclk = &rv770_dpm_get_mclk,
- .print_power_state = &rv770_dpm_print_power_state,
- .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &rv770_dpm_force_performance_level,
- .vblank_too_short = &cypress_dpm_vblank_too_short,
- .get_current_sclk = &rv770_dpm_get_current_sclk,
- .get_current_mclk = &rv770_dpm_get_current_mclk,
},
.pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .page_flip_pending = &evergreen_page_flip_pending,
+ .post_page_flip = &evergreen_post_page_flip,
},
};
@@ -1430,21 +1265,32 @@ static struct radeon_asic sumo_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .mmio_hdp_flush = r600_mmio_hdp_flush,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .get_xclk = &r600_get_xclk,
- .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
- .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
- [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
- [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
},
.irq = {
.set = &evergreen_irq_set,
@@ -1458,7 +1304,7 @@ static struct radeon_asic sumo_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_cpdma,
+ .blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1488,31 +1334,11 @@ static struct radeon_asic sumo_asic = {
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
- .set_uvd_clocks = &sumo_set_uvd_clocks,
- .get_temperature = &sumo_get_temp,
- },
- .dpm = {
- .init = &sumo_dpm_init,
- .setup_asic = &sumo_dpm_setup_asic,
- .enable = &sumo_dpm_enable,
- .late_enable = &sumo_dpm_late_enable,
- .disable = &sumo_dpm_disable,
- .pre_set_power_state = &sumo_dpm_pre_set_power_state,
- .set_power_state = &sumo_dpm_set_power_state,
- .post_set_power_state = &sumo_dpm_post_set_power_state,
- .display_configuration_changed = &sumo_dpm_display_configuration_changed,
- .fini = &sumo_dpm_fini,
- .get_sclk = &sumo_dpm_get_sclk,
- .get_mclk = &sumo_dpm_get_mclk,
- .print_power_state = &sumo_dpm_print_power_state,
- .debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &sumo_dpm_force_performance_level,
- .get_current_sclk = &sumo_dpm_get_current_sclk,
- .get_current_mclk = &sumo_dpm_get_current_mclk,
},
.pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .page_flip_pending = &evergreen_page_flip_pending,
+ .post_page_flip = &evergreen_post_page_flip,
},
};
@@ -1523,21 +1349,32 @@ static struct radeon_asic btc_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .mmio_hdp_flush = r600_mmio_hdp_flush,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .get_xclk = &rv770_get_xclk,
- .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
- .get_allowed_info_register = evergreen_get_allowed_info_register,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
- [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
- [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ }
},
.irq = {
.set = &evergreen_irq_set,
@@ -1551,7 +1388,7 @@ static struct radeon_asic btc_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_cpdma,
+ .blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1578,81 +1415,17 @@ static struct radeon_asic btc_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &r600_get_pcie_lanes,
- .set_pcie_lanes = &r600_set_pcie_lanes,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
.set_clock_gating = NULL,
- .set_uvd_clocks = &evergreen_set_uvd_clocks,
- .get_temperature = &evergreen_get_temp,
- },
- .dpm = {
- .init = &btc_dpm_init,
- .setup_asic = &btc_dpm_setup_asic,
- .enable = &btc_dpm_enable,
- .late_enable = &rv770_dpm_late_enable,
- .disable = &btc_dpm_disable,
- .pre_set_power_state = &btc_dpm_pre_set_power_state,
- .set_power_state = &btc_dpm_set_power_state,
- .post_set_power_state = &btc_dpm_post_set_power_state,
- .display_configuration_changed = &cypress_dpm_display_configuration_changed,
- .fini = &btc_dpm_fini,
- .get_sclk = &btc_dpm_get_sclk,
- .get_mclk = &btc_dpm_get_mclk,
- .print_power_state = &rv770_dpm_print_power_state,
- .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &rv770_dpm_force_performance_level,
- .vblank_too_short = &btc_dpm_vblank_too_short,
- .get_current_sclk = &btc_dpm_get_current_sclk,
- .get_current_mclk = &btc_dpm_get_current_mclk,
},
.pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .page_flip_pending = &evergreen_page_flip_pending,
+ .post_page_flip = &evergreen_post_page_flip,
},
};
-static struct radeon_asic_ring cayman_gfx_ring = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &cayman_gfx_get_rptr,
- .get_wptr = &cayman_gfx_get_wptr,
- .set_wptr = &cayman_gfx_set_wptr,
-};
-
-static struct radeon_asic_ring cayman_dma_ring = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &cayman_dma_get_rptr,
- .get_wptr = &cayman_dma_get_wptr,
- .set_wptr = &cayman_dma_set_wptr
-};
-
-static struct radeon_asic_ring cayman_uvd_ring = {
- .ib_execute = &uvd_v1_0_ib_execute,
- .emit_fence = &uvd_v2_2_fence_emit,
- .emit_semaphore = &uvd_v3_1_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &uvd_v1_0_ring_test,
- .ib_test = &uvd_v1_0_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &uvd_v1_0_get_rptr,
- .get_wptr = &uvd_v1_0_get_wptr,
- .set_wptr = &uvd_v1_0_set_wptr,
-};
-
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
@@ -1660,32 +1433,75 @@ static struct radeon_asic cayman_asic = {
.resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .mmio_hdp_flush = r600_mmio_hdp_flush,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .get_xclk = &rv770_get_xclk,
- .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
- .get_allowed_info_register = cayman_get_allowed_info_register,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .copy_pages = &cayman_dma_vm_copy_pages,
- .write_pages = &cayman_dma_vm_write_pages,
- .set_pages = &cayman_dma_vm_set_pages,
- .pad_ib = &cayman_dma_vm_pad_ib,
+ .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .set_page = &cayman_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
- [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
- [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
- [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
- [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
- [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP1_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP2_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ }
},
.irq = {
.set = &evergreen_irq_set,
@@ -1699,7 +1515,7 @@ static struct radeon_asic cayman_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_cpdma,
+ .blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1726,51 +1542,17 @@ static struct radeon_asic cayman_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &r600_get_pcie_lanes,
- .set_pcie_lanes = &r600_set_pcie_lanes,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
.set_clock_gating = NULL,
- .set_uvd_clocks = &evergreen_set_uvd_clocks,
- .get_temperature = &evergreen_get_temp,
- },
- .dpm = {
- .init = &ni_dpm_init,
- .setup_asic = &ni_dpm_setup_asic,
- .enable = &ni_dpm_enable,
- .late_enable = &rv770_dpm_late_enable,
- .disable = &ni_dpm_disable,
- .pre_set_power_state = &ni_dpm_pre_set_power_state,
- .set_power_state = &ni_dpm_set_power_state,
- .post_set_power_state = &ni_dpm_post_set_power_state,
- .display_configuration_changed = &cypress_dpm_display_configuration_changed,
- .fini = &ni_dpm_fini,
- .get_sclk = &ni_dpm_get_sclk,
- .get_mclk = &ni_dpm_get_mclk,
- .print_power_state = &ni_dpm_print_power_state,
- .debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &ni_dpm_force_performance_level,
- .vblank_too_short = &ni_dpm_vblank_too_short,
- .get_current_sclk = &ni_dpm_get_current_sclk,
- .get_current_mclk = &ni_dpm_get_current_mclk,
},
.pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .page_flip_pending = &evergreen_page_flip_pending,
+ .post_page_flip = &evergreen_post_page_flip,
},
};
-static struct radeon_asic_ring trinity_vce_ring = {
- .ib_execute = &radeon_vce_ib_execute,
- .emit_fence = &radeon_vce_fence_emit,
- .emit_semaphore = &radeon_vce_semaphore_emit,
- .cs_parse = &radeon_vce_cs_parse,
- .ring_test = &radeon_vce_ring_test,
- .ib_test = &radeon_vce_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &vce_v1_0_get_rptr,
- .get_wptr = &vce_v1_0_get_wptr,
- .set_wptr = &vce_v1_0_set_wptr,
-};
-
static struct radeon_asic trinity_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
@@ -1778,34 +1560,75 @@ static struct radeon_asic trinity_asic = {
.resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .mmio_hdp_flush = r600_mmio_hdp_flush,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .get_xclk = &r600_get_xclk,
- .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
- .get_allowed_info_register = cayman_get_allowed_info_register,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .copy_pages = &cayman_dma_vm_copy_pages,
- .write_pages = &cayman_dma_vm_write_pages,
- .set_pages = &cayman_dma_vm_set_pages,
- .pad_ib = &cayman_dma_vm_pad_ib,
+ .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .set_page = &cayman_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
- [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
- [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
- [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
- [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
- [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
- [TN_RING_TYPE_VCE1_INDEX] = &trinity_vce_ring,
- [TN_RING_TYPE_VCE2_INDEX] = &trinity_vce_ring,
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP1_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP2_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gpu_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ }
},
.irq = {
.set = &evergreen_irq_set,
@@ -1819,7 +1642,7 @@ static struct radeon_asic trinity_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_cpdma,
+ .blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1849,66 +1672,14 @@ static struct radeon_asic trinity_asic = {
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
- .set_uvd_clocks = &sumo_set_uvd_clocks,
- .set_vce_clocks = &tn_set_vce_clocks,
- .get_temperature = &tn_get_temp,
- },
- .dpm = {
- .init = &trinity_dpm_init,
- .setup_asic = &trinity_dpm_setup_asic,
- .enable = &trinity_dpm_enable,
- .late_enable = &trinity_dpm_late_enable,
- .disable = &trinity_dpm_disable,
- .pre_set_power_state = &trinity_dpm_pre_set_power_state,
- .set_power_state = &trinity_dpm_set_power_state,
- .post_set_power_state = &trinity_dpm_post_set_power_state,
- .display_configuration_changed = &trinity_dpm_display_configuration_changed,
- .fini = &trinity_dpm_fini,
- .get_sclk = &trinity_dpm_get_sclk,
- .get_mclk = &trinity_dpm_get_mclk,
- .print_power_state = &trinity_dpm_print_power_state,
- .debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &trinity_dpm_force_performance_level,
- .enable_bapm = &trinity_dpm_enable_bapm,
- .get_current_sclk = &trinity_dpm_get_current_sclk,
- .get_current_mclk = &trinity_dpm_get_current_mclk,
},
.pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .page_flip_pending = &evergreen_page_flip_pending,
+ .post_page_flip = &evergreen_post_page_flip,
},
};
-static struct radeon_asic_ring si_gfx_ring = {
- .ib_execute = &si_ring_ib_execute,
- .ib_parse = &si_ib_parse,
- .emit_fence = &si_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &si_gfx_is_lockup,
- .vm_flush = &si_vm_flush,
- .get_rptr = &cayman_gfx_get_rptr,
- .get_wptr = &cayman_gfx_get_wptr,
- .set_wptr = &cayman_gfx_set_wptr,
-};
-
-static struct radeon_asic_ring si_dma_ring = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &si_dma_is_lockup,
- .vm_flush = &si_dma_vm_flush,
- .get_rptr = &cayman_dma_get_rptr,
- .get_wptr = &cayman_dma_get_wptr,
- .set_wptr = &cayman_dma_set_wptr,
-};
-
static struct radeon_asic si_asic = {
.init = &si_init,
.fini = &si_fini,
@@ -1916,34 +1687,75 @@ static struct radeon_asic si_asic = {
.resume = &si_resume,
.asic_reset = &si_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .mmio_hdp_flush = r600_mmio_hdp_flush,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .get_xclk = &si_get_xclk,
- .get_gpu_clock_counter = &si_get_gpu_clock_counter,
- .get_allowed_info_register = si_get_allowed_info_register,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .copy_pages = &si_dma_vm_copy_pages,
- .write_pages = &si_dma_vm_write_pages,
- .set_pages = &si_dma_vm_set_pages,
- .pad_ib = &cayman_dma_vm_pad_ib,
+ .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
+ .set_page = &si_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
- [CAYMAN_RING_TYPE_CP1_INDEX] = &si_gfx_ring,
- [CAYMAN_RING_TYPE_CP2_INDEX] = &si_gfx_ring,
- [R600_RING_TYPE_DMA_INDEX] = &si_dma_ring,
- [CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring,
- [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
- [TN_RING_TYPE_VCE1_INDEX] = &trinity_vce_ring,
- [TN_RING_TYPE_VCE2_INDEX] = &trinity_vce_ring,
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &si_ring_ib_execute,
+ .ib_parse = &si_ib_parse,
+ .emit_fence = &si_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
+ .vm_flush = &si_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP1_INDEX] = {
+ .ib_execute = &si_ring_ib_execute,
+ .ib_parse = &si_ib_parse,
+ .emit_fence = &si_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
+ .vm_flush = &si_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_CP2_INDEX] = {
+ .ib_execute = &si_ring_ib_execute,
+ .ib_parse = &si_ib_parse,
+ .emit_fence = &si_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &si_gpu_is_lockup,
+ .vm_flush = &si_vm_flush,
+ },
+ [R600_RING_TYPE_DMA_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
+ },
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
+ }
},
.irq = {
.set = &si_irq_set,
@@ -1957,7 +1769,7 @@ static struct radeon_asic si_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_cpdma,
+ .blit = NULL,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &si_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1984,320 +1796,14 @@ static struct radeon_asic si_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &r600_get_pcie_lanes,
- .set_pcie_lanes = &r600_set_pcie_lanes,
- .set_clock_gating = NULL,
- .set_uvd_clocks = &si_set_uvd_clocks,
- .set_vce_clocks = &si_set_vce_clocks,
- .get_temperature = &si_get_temp,
- },
- .dpm = {
- .init = &si_dpm_init,
- .setup_asic = &si_dpm_setup_asic,
- .enable = &si_dpm_enable,
- .late_enable = &si_dpm_late_enable,
- .disable = &si_dpm_disable,
- .pre_set_power_state = &si_dpm_pre_set_power_state,
- .set_power_state = &si_dpm_set_power_state,
- .post_set_power_state = &si_dpm_post_set_power_state,
- .display_configuration_changed = &si_dpm_display_configuration_changed,
- .fini = &si_dpm_fini,
- .get_sclk = &ni_dpm_get_sclk,
- .get_mclk = &ni_dpm_get_mclk,
- .print_power_state = &ni_dpm_print_power_state,
- .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &si_dpm_force_performance_level,
- .vblank_too_short = &ni_dpm_vblank_too_short,
- .fan_ctrl_set_mode = &si_fan_ctrl_set_mode,
- .fan_ctrl_get_mode = &si_fan_ctrl_get_mode,
- .get_fan_speed_percent = &si_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = &si_fan_ctrl_set_fan_speed_percent,
- .get_current_sclk = &si_dpm_get_current_sclk,
- .get_current_mclk = &si_dpm_get_current_mclk,
- },
- .pflip = {
- .page_flip = &evergreen_page_flip,
- .page_flip_pending = &evergreen_page_flip_pending,
- },
-};
-
-static struct radeon_asic_ring ci_gfx_ring = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_gfx_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_gfx_get_rptr,
- .get_wptr = &cik_gfx_get_wptr,
- .set_wptr = &cik_gfx_set_wptr,
-};
-
-static struct radeon_asic_ring ci_cp_ring = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_get_rptr,
- .get_wptr = &cik_compute_get_wptr,
- .set_wptr = &cik_compute_set_wptr,
-};
-
-static struct radeon_asic_ring ci_dma_ring = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &cik_sdma_get_rptr,
- .get_wptr = &cik_sdma_get_wptr,
- .set_wptr = &cik_sdma_set_wptr,
-};
-
-static struct radeon_asic_ring ci_vce_ring = {
- .ib_execute = &radeon_vce_ib_execute,
- .emit_fence = &radeon_vce_fence_emit,
- .emit_semaphore = &radeon_vce_semaphore_emit,
- .cs_parse = &radeon_vce_cs_parse,
- .ring_test = &radeon_vce_ring_test,
- .ib_test = &radeon_vce_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &vce_v1_0_get_rptr,
- .get_wptr = &vce_v1_0_get_wptr,
- .set_wptr = &vce_v1_0_set_wptr,
-};
-
-static struct radeon_asic ci_asic = {
- .init = &cik_init,
- .fini = &cik_fini,
- .suspend = &cik_suspend,
- .resume = &cik_resume,
- .asic_reset = &cik_asic_reset,
- .vga_set_state = &r600_vga_set_state,
- .mmio_hdp_flush = &r600_mmio_hdp_flush,
- .gui_idle = &r600_gui_idle,
- .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .get_xclk = &cik_get_xclk,
- .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
- .get_allowed_info_register = cik_get_allowed_info_register,
- .gart = {
- .tlb_flush = &cik_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
- .set_page = &rs600_gart_set_page,
- },
- .vm = {
- .init = &cik_vm_init,
- .fini = &cik_vm_fini,
- .copy_pages = &cik_sdma_vm_copy_pages,
- .write_pages = &cik_sdma_vm_write_pages,
- .set_pages = &cik_sdma_vm_set_pages,
- .pad_ib = &cik_sdma_vm_pad_ib,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
- [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
- [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
- [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
- [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
- [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
- [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
- [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
- },
- .irq = {
- .set = &cik_irq_set,
- .process = &cik_irq_process,
- },
- .display = {
- .bandwidth_update = &dce8_bandwidth_update,
- .get_vblank_counter = &evergreen_get_vblank_counter,
- .wait_for_vblank = &dce4_wait_for_vblank,
- .set_backlight_level = &atombios_set_backlight_level,
- .get_backlight_level = &atombios_get_backlight_level,
- },
- .copy = {
- .blit = &cik_copy_cpdma,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &cik_copy_dma,
- .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &cik_copy_dma,
- .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
- },
- .surface = {
- .set_reg = r600_set_surface_reg,
- .clear_reg = r600_clear_surface_reg,
- },
- .hpd = {
- .init = &evergreen_hpd_init,
- .fini = &evergreen_hpd_fini,
- .sense = &evergreen_hpd_sense,
- .set_polarity = &evergreen_hpd_set_polarity,
- },
- .pm = {
- .misc = &evergreen_pm_misc,
- .prepare = &evergreen_pm_prepare,
- .finish = &evergreen_pm_finish,
- .init_profile = &sumo_pm_init_profile,
- .get_dynpm_state = &r600_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
- .set_uvd_clocks = &cik_set_uvd_clocks,
- .set_vce_clocks = &cik_set_vce_clocks,
- .get_temperature = &ci_get_temp,
- },
- .dpm = {
- .init = &ci_dpm_init,
- .setup_asic = &ci_dpm_setup_asic,
- .enable = &ci_dpm_enable,
- .late_enable = &ci_dpm_late_enable,
- .disable = &ci_dpm_disable,
- .pre_set_power_state = &ci_dpm_pre_set_power_state,
- .set_power_state = &ci_dpm_set_power_state,
- .post_set_power_state = &ci_dpm_post_set_power_state,
- .display_configuration_changed = &ci_dpm_display_configuration_changed,
- .fini = &ci_dpm_fini,
- .get_sclk = &ci_dpm_get_sclk,
- .get_mclk = &ci_dpm_get_mclk,
- .print_power_state = &ci_dpm_print_power_state,
- .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &ci_dpm_force_performance_level,
- .vblank_too_short = &ci_dpm_vblank_too_short,
- .powergate_uvd = &ci_dpm_powergate_uvd,
- .fan_ctrl_set_mode = &ci_fan_ctrl_set_mode,
- .fan_ctrl_get_mode = &ci_fan_ctrl_get_mode,
- .get_fan_speed_percent = &ci_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = &ci_fan_ctrl_set_fan_speed_percent,
- .get_current_sclk = &ci_dpm_get_current_sclk,
- .get_current_mclk = &ci_dpm_get_current_mclk,
},
.pflip = {
+ .pre_page_flip = &evergreen_pre_page_flip,
.page_flip = &evergreen_page_flip,
- .page_flip_pending = &evergreen_page_flip_pending,
- },
-};
-
-static struct radeon_asic kv_asic = {
- .init = &cik_init,
- .fini = &cik_fini,
- .suspend = &cik_suspend,
- .resume = &cik_resume,
- .asic_reset = &cik_asic_reset,
- .vga_set_state = &r600_vga_set_state,
- .mmio_hdp_flush = &r600_mmio_hdp_flush,
- .gui_idle = &r600_gui_idle,
- .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
- .get_xclk = &cik_get_xclk,
- .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
- .get_allowed_info_register = cik_get_allowed_info_register,
- .gart = {
- .tlb_flush = &cik_pcie_gart_tlb_flush,
- .get_page_entry = &rs600_gart_get_page_entry,
- .set_page = &rs600_gart_set_page,
- },
- .vm = {
- .init = &cik_vm_init,
- .fini = &cik_vm_fini,
- .copy_pages = &cik_sdma_vm_copy_pages,
- .write_pages = &cik_sdma_vm_write_pages,
- .set_pages = &cik_sdma_vm_set_pages,
- .pad_ib = &cik_sdma_vm_pad_ib,
- },
- .ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
- [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
- [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
- [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
- [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
- [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
- [TN_RING_TYPE_VCE1_INDEX] = &ci_vce_ring,
- [TN_RING_TYPE_VCE2_INDEX] = &ci_vce_ring,
- },
- .irq = {
- .set = &cik_irq_set,
- .process = &cik_irq_process,
- },
- .display = {
- .bandwidth_update = &dce8_bandwidth_update,
- .get_vblank_counter = &evergreen_get_vblank_counter,
- .wait_for_vblank = &dce4_wait_for_vblank,
- .set_backlight_level = &atombios_set_backlight_level,
- .get_backlight_level = &atombios_get_backlight_level,
- },
- .copy = {
- .blit = &cik_copy_cpdma,
- .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
- .dma = &cik_copy_dma,
- .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
- .copy = &cik_copy_dma,
- .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
- },
- .surface = {
- .set_reg = r600_set_surface_reg,
- .clear_reg = r600_clear_surface_reg,
- },
- .hpd = {
- .init = &evergreen_hpd_init,
- .fini = &evergreen_hpd_fini,
- .sense = &evergreen_hpd_sense,
- .set_polarity = &evergreen_hpd_set_polarity,
- },
- .pm = {
- .misc = &evergreen_pm_misc,
- .prepare = &evergreen_pm_prepare,
- .finish = &evergreen_pm_finish,
- .init_profile = &sumo_pm_init_profile,
- .get_dynpm_state = &r600_pm_get_dynpm_state,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = NULL,
- .set_uvd_clocks = &cik_set_uvd_clocks,
- .set_vce_clocks = &cik_set_vce_clocks,
- .get_temperature = &kv_get_temp,
- },
- .dpm = {
- .init = &kv_dpm_init,
- .setup_asic = &kv_dpm_setup_asic,
- .enable = &kv_dpm_enable,
- .late_enable = &kv_dpm_late_enable,
- .disable = &kv_dpm_disable,
- .pre_set_power_state = &kv_dpm_pre_set_power_state,
- .set_power_state = &kv_dpm_set_power_state,
- .post_set_power_state = &kv_dpm_post_set_power_state,
- .display_configuration_changed = &kv_dpm_display_configuration_changed,
- .fini = &kv_dpm_fini,
- .get_sclk = &kv_dpm_get_sclk,
- .get_mclk = &kv_dpm_get_mclk,
- .print_power_state = &kv_dpm_print_power_state,
- .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &kv_dpm_force_performance_level,
- .powergate_uvd = &kv_dpm_powergate_uvd,
- .enable_bapm = &kv_dpm_enable_bapm,
- .get_current_sclk = &kv_dpm_get_current_sclk,
- .get_current_mclk = &kv_dpm_get_current_mclk,
- },
- .pflip = {
- .page_flip = &evergreen_page_flip,
- .page_flip_pending = &evergreen_page_flip_pending,
+ .post_page_flip = &evergreen_post_page_flip,
},
};
@@ -2321,8 +1827,6 @@ int radeon_asic_init(struct radeon_device *rdev)
else
rdev->num_crtc = 2;
- rdev->has_uvd = false;
-
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -2381,35 +1885,22 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic = &r520_asic;
break;
case CHIP_R600:
- rdev->asic = &r600_asic;
- break;
case CHIP_RV610:
case CHIP_RV630:
case CHIP_RV620:
case CHIP_RV635:
case CHIP_RV670:
- rdev->asic = &rv6xx_asic;
- rdev->has_uvd = true;
+ rdev->asic = &r600_asic;
break;
case CHIP_RS780:
case CHIP_RS880:
rdev->asic = &rs780_asic;
- /* 760G/780V/880V don't have UVD */
- if ((rdev->pdev->device == 0x9616)||
- (rdev->pdev->device == 0x9611)||
- (rdev->pdev->device == 0x9613)||
- (rdev->pdev->device == 0x9711)||
- (rdev->pdev->device == 0x9713))
- rdev->has_uvd = false;
- else
- rdev->has_uvd = true;
break;
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV710:
case CHIP_RV740:
rdev->asic = &rv770_asic;
- rdev->has_uvd = true;
break;
case CHIP_CEDAR:
case CHIP_REDWOOD:
@@ -2422,13 +1913,11 @@ int radeon_asic_init(struct radeon_device *rdev)
else
rdev->num_crtc = 6;
rdev->asic = &evergreen_asic;
- rdev->has_uvd = true;
break;
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
rdev->asic = &sumo_asic;
- rdev->has_uvd = true;
break;
case CHIP_BARTS:
case CHIP_TURKS:
@@ -2439,243 +1928,23 @@ int radeon_asic_init(struct radeon_device *rdev)
else
rdev->num_crtc = 6;
rdev->asic = &btc_asic;
- rdev->has_uvd = true;
break;
case CHIP_CAYMAN:
rdev->asic = &cayman_asic;
/* set num crtcs */
rdev->num_crtc = 6;
- rdev->has_uvd = true;
break;
case CHIP_ARUBA:
rdev->asic = &trinity_asic;
/* set num crtcs */
rdev->num_crtc = 4;
- rdev->has_uvd = true;
- rdev->cg_flags =
- RADEON_CG_SUPPORT_VCE_MGCG;
break;
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
rdev->asic = &si_asic;
/* set num crtcs */
- if (rdev->family == CHIP_HAINAN)
- rdev->num_crtc = 0;
- else if (rdev->family == CHIP_OLAND)
- rdev->num_crtc = 2;
- else
- rdev->num_crtc = 6;
- if (rdev->family == CHIP_HAINAN)
- rdev->has_uvd = false;
- else
- rdev->has_uvd = true;
- switch (rdev->family) {
- case CHIP_TAHITI:
- rdev->cg_flags =
- RADEON_CG_SUPPORT_GFX_MGCG |
- RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
- RADEON_CG_SUPPORT_GFX_CGLS |
- RADEON_CG_SUPPORT_GFX_CGTS |
- RADEON_CG_SUPPORT_GFX_CP_LS |
- RADEON_CG_SUPPORT_MC_MGCG |
- RADEON_CG_SUPPORT_SDMA_MGCG |
- RADEON_CG_SUPPORT_BIF_LS |
- RADEON_CG_SUPPORT_VCE_MGCG |
- RADEON_CG_SUPPORT_UVD_MGCG |
- RADEON_CG_SUPPORT_HDP_LS |
- RADEON_CG_SUPPORT_HDP_MGCG;
- rdev->pg_flags = 0;
- break;
- case CHIP_PITCAIRN:
- rdev->cg_flags =
- RADEON_CG_SUPPORT_GFX_MGCG |
- RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
- RADEON_CG_SUPPORT_GFX_CGLS |
- RADEON_CG_SUPPORT_GFX_CGTS |
- RADEON_CG_SUPPORT_GFX_CP_LS |
- RADEON_CG_SUPPORT_GFX_RLC_LS |
- RADEON_CG_SUPPORT_MC_LS |
- RADEON_CG_SUPPORT_MC_MGCG |
- RADEON_CG_SUPPORT_SDMA_MGCG |
- RADEON_CG_SUPPORT_BIF_LS |
- RADEON_CG_SUPPORT_VCE_MGCG |
- RADEON_CG_SUPPORT_UVD_MGCG |
- RADEON_CG_SUPPORT_HDP_LS |
- RADEON_CG_SUPPORT_HDP_MGCG;
- rdev->pg_flags = 0;
- break;
- case CHIP_VERDE:
- rdev->cg_flags =
- RADEON_CG_SUPPORT_GFX_MGCG |
- RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
- RADEON_CG_SUPPORT_GFX_CGLS |
- RADEON_CG_SUPPORT_GFX_CGTS |
- RADEON_CG_SUPPORT_GFX_CP_LS |
- RADEON_CG_SUPPORT_GFX_RLC_LS |
- RADEON_CG_SUPPORT_MC_LS |
- RADEON_CG_SUPPORT_MC_MGCG |
- RADEON_CG_SUPPORT_SDMA_MGCG |
- RADEON_CG_SUPPORT_BIF_LS |
- RADEON_CG_SUPPORT_VCE_MGCG |
- RADEON_CG_SUPPORT_UVD_MGCG |
- RADEON_CG_SUPPORT_HDP_LS |
- RADEON_CG_SUPPORT_HDP_MGCG;
- rdev->pg_flags = 0 |
- /*RADEON_PG_SUPPORT_GFX_PG | */
- RADEON_PG_SUPPORT_SDMA;
- break;
- case CHIP_OLAND:
- rdev->cg_flags =
- RADEON_CG_SUPPORT_GFX_MGCG |
- RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
- RADEON_CG_SUPPORT_GFX_CGLS |
- RADEON_CG_SUPPORT_GFX_CGTS |
- RADEON_CG_SUPPORT_GFX_CP_LS |
- RADEON_CG_SUPPORT_GFX_RLC_LS |
- RADEON_CG_SUPPORT_MC_LS |
- RADEON_CG_SUPPORT_MC_MGCG |
- RADEON_CG_SUPPORT_SDMA_MGCG |
- RADEON_CG_SUPPORT_BIF_LS |
- RADEON_CG_SUPPORT_UVD_MGCG |
- RADEON_CG_SUPPORT_HDP_LS |
- RADEON_CG_SUPPORT_HDP_MGCG;
- rdev->pg_flags = 0;
- break;
- case CHIP_HAINAN:
- rdev->cg_flags =
- RADEON_CG_SUPPORT_GFX_MGCG |
- RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
- RADEON_CG_SUPPORT_GFX_CGLS |
- RADEON_CG_SUPPORT_GFX_CGTS |
- RADEON_CG_SUPPORT_GFX_CP_LS |
- RADEON_CG_SUPPORT_GFX_RLC_LS |
- RADEON_CG_SUPPORT_MC_LS |
- RADEON_CG_SUPPORT_MC_MGCG |
- RADEON_CG_SUPPORT_SDMA_MGCG |
- RADEON_CG_SUPPORT_BIF_LS |
- RADEON_CG_SUPPORT_HDP_LS |
- RADEON_CG_SUPPORT_HDP_MGCG;
- rdev->pg_flags = 0;
- break;
- default:
- rdev->cg_flags = 0;
- rdev->pg_flags = 0;
- break;
- }
- break;
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- rdev->asic = &ci_asic;
rdev->num_crtc = 6;
- rdev->has_uvd = true;
- if (rdev->family == CHIP_BONAIRE) {
- rdev->cg_flags =
- RADEON_CG_SUPPORT_GFX_MGCG |
- RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
- RADEON_CG_SUPPORT_GFX_CGLS |
- RADEON_CG_SUPPORT_GFX_CGTS |
- RADEON_CG_SUPPORT_GFX_CGTS_LS |
- RADEON_CG_SUPPORT_GFX_CP_LS |
- RADEON_CG_SUPPORT_MC_LS |
- RADEON_CG_SUPPORT_MC_MGCG |
- RADEON_CG_SUPPORT_SDMA_MGCG |
- RADEON_CG_SUPPORT_SDMA_LS |
- RADEON_CG_SUPPORT_BIF_LS |
- RADEON_CG_SUPPORT_VCE_MGCG |
- RADEON_CG_SUPPORT_UVD_MGCG |
- RADEON_CG_SUPPORT_HDP_LS |
- RADEON_CG_SUPPORT_HDP_MGCG;
- rdev->pg_flags = 0;
- } else {
- rdev->cg_flags =
- RADEON_CG_SUPPORT_GFX_MGCG |
- RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
- RADEON_CG_SUPPORT_GFX_CGLS |
- RADEON_CG_SUPPORT_GFX_CGTS |
- RADEON_CG_SUPPORT_GFX_CP_LS |
- RADEON_CG_SUPPORT_MC_LS |
- RADEON_CG_SUPPORT_MC_MGCG |
- RADEON_CG_SUPPORT_SDMA_MGCG |
- RADEON_CG_SUPPORT_SDMA_LS |
- RADEON_CG_SUPPORT_BIF_LS |
- RADEON_CG_SUPPORT_VCE_MGCG |
- RADEON_CG_SUPPORT_UVD_MGCG |
- RADEON_CG_SUPPORT_HDP_LS |
- RADEON_CG_SUPPORT_HDP_MGCG;
- rdev->pg_flags = 0;
- }
- break;
- case CHIP_KAVERI:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- rdev->asic = &kv_asic;
- /* set num crtcs */
- if (rdev->family == CHIP_KAVERI) {
- rdev->num_crtc = 4;
- rdev->cg_flags =
- RADEON_CG_SUPPORT_GFX_MGCG |
- RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
- RADEON_CG_SUPPORT_GFX_CGLS |
- RADEON_CG_SUPPORT_GFX_CGTS |
- RADEON_CG_SUPPORT_GFX_CGTS_LS |
- RADEON_CG_SUPPORT_GFX_CP_LS |
- RADEON_CG_SUPPORT_SDMA_MGCG |
- RADEON_CG_SUPPORT_SDMA_LS |
- RADEON_CG_SUPPORT_BIF_LS |
- RADEON_CG_SUPPORT_VCE_MGCG |
- RADEON_CG_SUPPORT_UVD_MGCG |
- RADEON_CG_SUPPORT_HDP_LS |
- RADEON_CG_SUPPORT_HDP_MGCG;
- rdev->pg_flags = 0;
- /*RADEON_PG_SUPPORT_GFX_PG |
- RADEON_PG_SUPPORT_GFX_SMG |
- RADEON_PG_SUPPORT_GFX_DMG |
- RADEON_PG_SUPPORT_UVD |
- RADEON_PG_SUPPORT_VCE |
- RADEON_PG_SUPPORT_CP |
- RADEON_PG_SUPPORT_GDS |
- RADEON_PG_SUPPORT_RLC_SMU_HS |
- RADEON_PG_SUPPORT_ACP |
- RADEON_PG_SUPPORT_SAMU;*/
- } else {
- rdev->num_crtc = 2;
- rdev->cg_flags =
- RADEON_CG_SUPPORT_GFX_MGCG |
- RADEON_CG_SUPPORT_GFX_MGLS |
- /*RADEON_CG_SUPPORT_GFX_CGCG |*/
- RADEON_CG_SUPPORT_GFX_CGLS |
- RADEON_CG_SUPPORT_GFX_CGTS |
- RADEON_CG_SUPPORT_GFX_CGTS_LS |
- RADEON_CG_SUPPORT_GFX_CP_LS |
- RADEON_CG_SUPPORT_SDMA_MGCG |
- RADEON_CG_SUPPORT_SDMA_LS |
- RADEON_CG_SUPPORT_BIF_LS |
- RADEON_CG_SUPPORT_VCE_MGCG |
- RADEON_CG_SUPPORT_UVD_MGCG |
- RADEON_CG_SUPPORT_HDP_LS |
- RADEON_CG_SUPPORT_HDP_MGCG;
- rdev->pg_flags = 0;
- /*RADEON_PG_SUPPORT_GFX_PG |
- RADEON_PG_SUPPORT_GFX_SMG |
- RADEON_PG_SUPPORT_UVD |
- RADEON_PG_SUPPORT_VCE |
- RADEON_PG_SUPPORT_CP |
- RADEON_PG_SUPPORT_GDS |
- RADEON_PG_SUPPORT_RLC_SMU_HS |
- RADEON_PG_SUPPORT_SAMU;*/
- }
- rdev->has_uvd = true;
break;
default:
/* FIXME: not supported yet */
diff --git a/sys/dev/pci/drm/radeon/radeon_asic.h b/sys/dev/pci/drm/radeon/radeon_asic.h
index e0aa33262ea..f6483d9c574 100644
--- a/sys/dev/pci/drm/radeon/radeon_asic.h
+++ b/sys/dev/pci/drm/radeon/radeon_asic.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_asic.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -47,6 +48,7 @@ u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
+
/*
* r100,rv100,rs100,rv200,rs200
*/
@@ -67,26 +69,24 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
-uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
-void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t entry);
+int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-bool r100_semaphore_ring_emit(struct radeon_device *rdev,
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
-struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
+int r100_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
@@ -137,27 +137,20 @@ extern void r100_pm_prepare(struct radeon_device *rdev);
extern void r100_pm_finish(struct radeon_device *rdev);
extern void r100_pm_init_profile(struct radeon_device *rdev);
extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
-extern void r100_page_flip(struct radeon_device *rdev, int crtc,
- u64 crtc_base);
-extern bool r100_page_flip_pending(struct radeon_device *rdev, int crtc);
+extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
-u32 r100_gfx_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 r100_gfx_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void r100_gfx_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-
/*
* r200,rv250,rs300,rv280
*/
-struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
+extern int r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
void r200_set_safe_registers(struct radeon_device *rdev);
/*
@@ -173,9 +166,7 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
-extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
-extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t entry);
+extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -210,9 +201,7 @@ extern void rs400_fini(struct radeon_device *rdev);
extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
-uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t entry);
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
@@ -235,9 +224,7 @@ int rs600_irq_process(struct radeon_device *rdev);
void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
-uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t entry);
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
@@ -249,9 +236,9 @@ void rs600_hpd_set_polarity(struct radeon_device *rdev,
extern void rs600_pm_misc(struct radeon_device *rdev);
extern void rs600_pm_prepare(struct radeon_device *rdev);
extern void rs600_pm_finish(struct radeon_device *rdev);
-extern void rs600_page_flip(struct radeon_device *rdev, int crtc,
- u64 crtc_base);
-extern bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc);
+extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
void rs600_set_safe_registers(struct radeon_device *rdev);
extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
@@ -321,19 +308,19 @@ int r600_cs_parse(struct radeon_cs_parser *p);
int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-bool r600_semaphore_ring_emit(struct radeon_device *rdev,
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
void r600_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
@@ -344,26 +331,22 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
-struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
-struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
+int r600_copy_blit(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages, struct radeon_fence **fence);
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages, struct radeon_fence **fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
-extern void r600_mmio_hdp_flush(struct radeon_device *rdev);
+extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
extern bool r600_gui_idle(struct radeon_device *rdev);
extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
extern void rs780_pm_init_profile(struct radeon_device *rdev);
-extern uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg);
-extern void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
@@ -377,15 +360,9 @@ int r600_count_pipe_bits(uint32_t val);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
int r600_pcie_gart_init(struct radeon_device *rdev);
void r600_scratch_init(struct radeon_device *rdev);
+int r600_blit_init(struct radeon_device *rdev);
+void r600_blit_fini(struct radeon_device *rdev);
int r600_init_microcode(struct radeon_device *rdev);
-u32 r600_gfx_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 r600_gfx_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void r600_gfx_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-int r600_get_allowed_info_register(struct radeon_device *rdev,
- u32 reg, u32 *val);
/* r600 irq */
int r600_irq_process(struct radeon_device *rdev);
int r600_irq_init(struct radeon_device *rdev);
@@ -396,65 +373,24 @@ void r600_irq_suspend(struct radeon_device *rdev);
void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
+int r600_audio_init(struct radeon_device *rdev);
+void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+struct r600_audio r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
-void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock);
-void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder, void *buffer,
- size_t size);
-void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
-void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+/* r600 blit */
+int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
+ struct radeon_fence **fence, struct radeon_sa_bo **vb,
+ struct radeon_semaphore **sem);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
+ struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
+void r600_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ unsigned num_gpu_pages,
+ struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
-u32 r600_get_xclk(struct radeon_device *rdev);
-uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
-int rv6xx_get_temp(struct radeon_device *rdev);
-int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
-void r600_dpm_post_set_power_state(struct radeon_device *rdev);
-int r600_dpm_late_enable(struct radeon_device *rdev);
-/* r600 dma */
-uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void r600_dma_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-/* rv6xx dpm */
-int rv6xx_dpm_init(struct radeon_device *rdev);
-int rv6xx_dpm_enable(struct radeon_device *rdev);
-void rv6xx_dpm_disable(struct radeon_device *rdev);
-int rv6xx_dpm_set_power_state(struct radeon_device *rdev);
-void rv6xx_setup_asic(struct radeon_device *rdev);
-void rv6xx_dpm_display_configuration_changed(struct radeon_device *rdev);
-void rv6xx_dpm_fini(struct radeon_device *rdev);
-u32 rv6xx_dpm_get_sclk(struct radeon_device *rdev, bool low);
-u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low);
-void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
- struct radeon_ps *ps);
-void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
- struct seq_file *m);
-int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
- enum radeon_dpm_forced_level level);
-u32 rv6xx_dpm_get_current_sclk(struct radeon_device *rdev);
-u32 rv6xx_dpm_get_current_mclk(struct radeon_device *rdev);
-/* rs780 dpm */
-int rs780_dpm_init(struct radeon_device *rdev);
-int rs780_dpm_enable(struct radeon_device *rdev);
-void rs780_dpm_disable(struct radeon_device *rdev);
-int rs780_dpm_set_power_state(struct radeon_device *rdev);
-void rs780_dpm_setup_asic(struct radeon_device *rdev);
-void rs780_dpm_display_configuration_changed(struct radeon_device *rdev);
-void rs780_dpm_fini(struct radeon_device *rdev);
-u32 rs780_dpm_get_sclk(struct radeon_device *rdev, bool low);
-u32 rs780_dpm_get_mclk(struct radeon_device *rdev, bool low);
-void rs780_dpm_print_power_state(struct radeon_device *rdev,
- struct radeon_ps *ps);
-void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
- struct seq_file *m);
-int rs780_dpm_force_performance_level(struct radeon_device *rdev,
- enum radeon_dpm_forced_level level);
-u32 rs780_dpm_get_current_sclk(struct radeon_device *rdev);
-u32 rs780_dpm_get_current_mclk(struct radeon_device *rdev);
+uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
/*
* rv770,rv730,rv710,rv740
@@ -464,38 +400,14 @@ void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
void rv770_pm_misc(struct radeon_device *rdev);
-void rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
-bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc);
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
void r700_cp_stop(struct radeon_device *rdev);
void r700_cp_fini(struct radeon_device *rdev);
-struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
-u32 rv770_get_xclk(struct radeon_device *rdev);
-int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-int rv770_get_temp(struct radeon_device *rdev);
-/* rv7xx pm */
-int rv770_dpm_init(struct radeon_device *rdev);
-int rv770_dpm_enable(struct radeon_device *rdev);
-int rv770_dpm_late_enable(struct radeon_device *rdev);
-void rv770_dpm_disable(struct radeon_device *rdev);
-int rv770_dpm_set_power_state(struct radeon_device *rdev);
-void rv770_dpm_setup_asic(struct radeon_device *rdev);
-void rv770_dpm_display_configuration_changed(struct radeon_device *rdev);
-void rv770_dpm_fini(struct radeon_device *rdev);
-u32 rv770_dpm_get_sclk(struct radeon_device *rdev, bool low);
-u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low);
-void rv770_dpm_print_power_state(struct radeon_device *rdev,
- struct radeon_ps *ps);
-void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
- struct seq_file *m);
-int rv770_dpm_force_performance_level(struct radeon_device *rdev,
- enum radeon_dpm_forced_level level);
-bool rv770_dpm_vblank_too_short(struct radeon_device *rdev);
-u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev);
-u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev);
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
/*
* evergreen
@@ -511,8 +423,7 @@ int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -531,70 +442,21 @@ extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void sumo_pm_init_profile(struct radeon_device *rdev);
extern void btc_pm_init_profile(struct radeon_device *rdev);
-int sumo_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-extern void evergreen_page_flip(struct radeon_device *rdev, int crtc,
- u64 crtc_base);
-extern bool evergreen_page_flip_pending(struct radeon_device *rdev, int crtc);
+extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+int evergreen_blit_init(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
-struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
-int evergreen_get_temp(struct radeon_device *rdev);
-int evergreen_get_allowed_info_register(struct radeon_device *rdev,
- u32 reg, u32 *val);
-int sumo_get_temp(struct radeon_device *rdev);
-int tn_get_temp(struct radeon_device *rdev);
-int cypress_dpm_init(struct radeon_device *rdev);
-void cypress_dpm_setup_asic(struct radeon_device *rdev);
-int cypress_dpm_enable(struct radeon_device *rdev);
-void cypress_dpm_disable(struct radeon_device *rdev);
-int cypress_dpm_set_power_state(struct radeon_device *rdev);
-void cypress_dpm_display_configuration_changed(struct radeon_device *rdev);
-void cypress_dpm_fini(struct radeon_device *rdev);
-bool cypress_dpm_vblank_too_short(struct radeon_device *rdev);
-int btc_dpm_init(struct radeon_device *rdev);
-void btc_dpm_setup_asic(struct radeon_device *rdev);
-int btc_dpm_enable(struct radeon_device *rdev);
-void btc_dpm_disable(struct radeon_device *rdev);
-int btc_dpm_pre_set_power_state(struct radeon_device *rdev);
-int btc_dpm_set_power_state(struct radeon_device *rdev);
-void btc_dpm_post_set_power_state(struct radeon_device *rdev);
-void btc_dpm_fini(struct radeon_device *rdev);
-u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
-u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
-bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
-void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
- struct seq_file *m);
-u32 btc_dpm_get_current_sclk(struct radeon_device *rdev);
-u32 btc_dpm_get_current_mclk(struct radeon_device *rdev);
-int sumo_dpm_init(struct radeon_device *rdev);
-int sumo_dpm_enable(struct radeon_device *rdev);
-int sumo_dpm_late_enable(struct radeon_device *rdev);
-void sumo_dpm_disable(struct radeon_device *rdev);
-int sumo_dpm_pre_set_power_state(struct radeon_device *rdev);
-int sumo_dpm_set_power_state(struct radeon_device *rdev);
-void sumo_dpm_post_set_power_state(struct radeon_device *rdev);
-void sumo_dpm_setup_asic(struct radeon_device *rdev);
-void sumo_dpm_display_configuration_changed(struct radeon_device *rdev);
-void sumo_dpm_fini(struct radeon_device *rdev);
-u32 sumo_dpm_get_sclk(struct radeon_device *rdev, bool low);
-u32 sumo_dpm_get_mclk(struct radeon_device *rdev, bool low);
-void sumo_dpm_print_power_state(struct radeon_device *rdev,
- struct radeon_ps *ps);
-void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
- struct seq_file *m);
-int sumo_dpm_force_performance_level(struct radeon_device *rdev,
- enum radeon_dpm_forced_level level);
-u32 sumo_dpm_get_current_sclk(struct radeon_device *rdev);
-u32 sumo_dpm_get_current_mclk(struct radeon_device *rdev);
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
/*
* cayman
@@ -610,95 +472,20 @@ int cayman_asic_reset(struct radeon_device *rdev);
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
-void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
- unsigned vm_id, uint64_t pd_addr);
+void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
+void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
-bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-
-void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe, uint64_t src,
- unsigned count);
-void cayman_dma_vm_write_pages(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
-void cayman_dma_vm_set_pages(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
-void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
-
-void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
- unsigned vm_id, uint64_t pd_addr);
-
-u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void cayman_gfx_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-uint32_t cayman_dma_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-uint32_t cayman_dma_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void cayman_dma_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-int cayman_get_allowed_info_register(struct radeon_device *rdev,
- u32 reg, u32 *val);
-
-int ni_dpm_init(struct radeon_device *rdev);
-void ni_dpm_setup_asic(struct radeon_device *rdev);
-int ni_dpm_enable(struct radeon_device *rdev);
-void ni_dpm_disable(struct radeon_device *rdev);
-int ni_dpm_pre_set_power_state(struct radeon_device *rdev);
-int ni_dpm_set_power_state(struct radeon_device *rdev);
-void ni_dpm_post_set_power_state(struct radeon_device *rdev);
-void ni_dpm_fini(struct radeon_device *rdev);
-u32 ni_dpm_get_sclk(struct radeon_device *rdev, bool low);
-u32 ni_dpm_get_mclk(struct radeon_device *rdev, bool low);
-void ni_dpm_print_power_state(struct radeon_device *rdev,
- struct radeon_ps *ps);
-void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
- struct seq_file *m);
-int ni_dpm_force_performance_level(struct radeon_device *rdev,
- enum radeon_dpm_forced_level level);
-bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
-u32 ni_dpm_get_current_sclk(struct radeon_device *rdev);
-u32 ni_dpm_get_current_mclk(struct radeon_device *rdev);
-int trinity_dpm_init(struct radeon_device *rdev);
-int trinity_dpm_enable(struct radeon_device *rdev);
-int trinity_dpm_late_enable(struct radeon_device *rdev);
-void trinity_dpm_disable(struct radeon_device *rdev);
-int trinity_dpm_pre_set_power_state(struct radeon_device *rdev);
-int trinity_dpm_set_power_state(struct radeon_device *rdev);
-void trinity_dpm_post_set_power_state(struct radeon_device *rdev);
-void trinity_dpm_setup_asic(struct radeon_device *rdev);
-void trinity_dpm_display_configuration_changed(struct radeon_device *rdev);
-void trinity_dpm_fini(struct radeon_device *rdev);
-u32 trinity_dpm_get_sclk(struct radeon_device *rdev, bool low);
-u32 trinity_dpm_get_mclk(struct radeon_device *rdev, bool low);
-void trinity_dpm_print_power_state(struct radeon_device *rdev,
- struct radeon_ps *ps);
-void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
- struct seq_file *m);
-int trinity_dpm_force_performance_level(struct radeon_device *rdev,
- enum radeon_dpm_forced_level level);
-void trinity_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
-u32 trinity_dpm_get_current_sclk(struct radeon_device *rdev);
-u32 trinity_dpm_get_current_mclk(struct radeon_device *rdev);
-int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk);
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
-void dce6_audio_fini(struct radeon_device *rdev);
/*
* si
@@ -710,276 +497,23 @@ int si_init(struct radeon_device *rdev);
void si_fini(struct radeon_device *rdev);
int si_suspend(struct radeon_device *rdev);
int si_resume(struct radeon_device *rdev);
-bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
+bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int si_asic_reset(struct radeon_device *rdev);
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
-void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
- unsigned vm_id, uint64_t pd_addr);
+void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
-struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
-
-void si_dma_vm_copy_pages(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe, uint64_t src,
- unsigned count);
-void si_dma_vm_write_pages(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
-void si_dma_vm_set_pages(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
-
-void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
- unsigned vm_id, uint64_t pd_addr);
-u32 si_get_xclk(struct radeon_device *rdev);
-uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
-int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk);
-int si_get_temp(struct radeon_device *rdev);
-int si_get_allowed_info_register(struct radeon_device *rdev,
- u32 reg, u32 *val);
-int si_dpm_init(struct radeon_device *rdev);
-void si_dpm_setup_asic(struct radeon_device *rdev);
-int si_dpm_enable(struct radeon_device *rdev);
-int si_dpm_late_enable(struct radeon_device *rdev);
-void si_dpm_disable(struct radeon_device *rdev);
-int si_dpm_pre_set_power_state(struct radeon_device *rdev);
-int si_dpm_set_power_state(struct radeon_device *rdev);
-void si_dpm_post_set_power_state(struct radeon_device *rdev);
-void si_dpm_fini(struct radeon_device *rdev);
-void si_dpm_display_configuration_changed(struct radeon_device *rdev);
-void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
- struct seq_file *m);
-int si_dpm_force_performance_level(struct radeon_device *rdev,
- enum radeon_dpm_forced_level level);
-int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
- u32 *speed);
-int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
- u32 speed);
-u32 si_fan_ctrl_get_mode(struct radeon_device *rdev);
-void si_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode);
-u32 si_dpm_get_current_sclk(struct radeon_device *rdev);
-u32 si_dpm_get_current_mclk(struct radeon_device *rdev);
-
-/* DCE8 - CIK */
-void dce8_bandwidth_update(struct radeon_device *rdev);
-
-/*
- * cik
- */
-uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev);
-u32 cik_get_xclk(struct radeon_device *rdev);
-uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
-void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-int cik_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk);
-void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
-bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
-void cik_sdma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
-struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct reservation_object *resv);
-int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
-int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
-bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
-void cik_fence_compute_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
-bool cik_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *cp,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
-void cik_pcie_gart_tlb_flush(struct radeon_device *rdev);
-int cik_init(struct radeon_device *rdev);
-void cik_fini(struct radeon_device *rdev);
-int cik_suspend(struct radeon_device *rdev);
-int cik_resume(struct radeon_device *rdev);
-bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
-int cik_asic_reset(struct radeon_device *rdev);
-void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
-int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
-int cik_irq_set(struct radeon_device *rdev);
-int cik_irq_process(struct radeon_device *rdev);
-int cik_vm_init(struct radeon_device *rdev);
-void cik_vm_fini(struct radeon_device *rdev);
-void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
- unsigned vm_id, uint64_t pd_addr);
-
-void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe, uint64_t src,
- unsigned count);
-void cik_sdma_vm_write_pages(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
-void cik_sdma_vm_set_pages(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
-void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
-
-void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
- unsigned vm_id, uint64_t pd_addr);
-int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
-u32 cik_gfx_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 cik_gfx_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void cik_gfx_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 cik_compute_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 cik_compute_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void cik_compute_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 cik_sdma_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-u32 cik_sdma_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void cik_sdma_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-int ci_get_temp(struct radeon_device *rdev);
-int kv_get_temp(struct radeon_device *rdev);
-int cik_get_allowed_info_register(struct radeon_device *rdev,
- u32 reg, u32 *val);
-
-int ci_dpm_init(struct radeon_device *rdev);
-int ci_dpm_enable(struct radeon_device *rdev);
-int ci_dpm_late_enable(struct radeon_device *rdev);
-void ci_dpm_disable(struct radeon_device *rdev);
-int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
-int ci_dpm_set_power_state(struct radeon_device *rdev);
-void ci_dpm_post_set_power_state(struct radeon_device *rdev);
-void ci_dpm_setup_asic(struct radeon_device *rdev);
-void ci_dpm_display_configuration_changed(struct radeon_device *rdev);
-void ci_dpm_fini(struct radeon_device *rdev);
-u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low);
-u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low);
-void ci_dpm_print_power_state(struct radeon_device *rdev,
- struct radeon_ps *ps);
-void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
- struct seq_file *m);
-int ci_dpm_force_performance_level(struct radeon_device *rdev,
- enum radeon_dpm_forced_level level);
-bool ci_dpm_vblank_too_short(struct radeon_device *rdev);
-void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
-u32 ci_dpm_get_current_sclk(struct radeon_device *rdev);
-u32 ci_dpm_get_current_mclk(struct radeon_device *rdev);
-
-int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
- u32 *speed);
-int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
- u32 speed);
-u32 ci_fan_ctrl_get_mode(struct radeon_device *rdev);
-void ci_fan_ctrl_set_mode(struct radeon_device *rdev, u32 mode);
-
-int kv_dpm_init(struct radeon_device *rdev);
-int kv_dpm_enable(struct radeon_device *rdev);
-int kv_dpm_late_enable(struct radeon_device *rdev);
-void kv_dpm_disable(struct radeon_device *rdev);
-int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
-int kv_dpm_set_power_state(struct radeon_device *rdev);
-void kv_dpm_post_set_power_state(struct radeon_device *rdev);
-void kv_dpm_setup_asic(struct radeon_device *rdev);
-void kv_dpm_display_configuration_changed(struct radeon_device *rdev);
-void kv_dpm_fini(struct radeon_device *rdev);
-u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low);
-u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low);
-void kv_dpm_print_power_state(struct radeon_device *rdev,
- struct radeon_ps *ps);
-void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
- struct seq_file *m);
-int kv_dpm_force_performance_level(struct radeon_device *rdev,
- enum radeon_dpm_forced_level level);
-void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
-void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable);
-u32 kv_dpm_get_current_sclk(struct radeon_device *rdev);
-u32 kv_dpm_get_current_mclk(struct radeon_device *rdev);
-
-/* uvd v1.0 */
-uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void uvd_v1_0_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-int uvd_v1_0_resume(struct radeon_device *rdev);
-
-int uvd_v1_0_init(struct radeon_device *rdev);
-void uvd_v1_0_fini(struct radeon_device *rdev);
-int uvd_v1_0_start(struct radeon_device *rdev);
-void uvd_v1_0_stop(struct radeon_device *rdev);
-
-int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
-void uvd_v1_0_fence_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
-int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
-bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
-void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-
-/* uvd v2.2 */
-int uvd_v2_2_resume(struct radeon_device *rdev);
-void uvd_v2_2_fence_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
-bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
-
-/* uvd v3.1 */
-bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
-
-/* uvd v4.2 */
-int uvd_v4_2_resume(struct radeon_device *rdev);
-
-/* vce v1.0 */
-uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-void vce_v1_0_set_wptr(struct radeon_device *rdev,
- struct radeon_ring *ring);
-int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data);
-unsigned vce_v1_0_bo_size(struct radeon_device *rdev);
-int vce_v1_0_resume(struct radeon_device *rdev);
-int vce_v1_0_init(struct radeon_device *rdev);
-int vce_v1_0_start(struct radeon_device *rdev);
-
-/* vce v2.0 */
-unsigned vce_v2_0_bo_size(struct radeon_device *rdev);
-int vce_v2_0_resume(struct radeon_device *rdev);
+uint64_t si_get_gpu_clock(struct radeon_device *rdev);
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence);
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
#endif
diff --git a/sys/dev/pci/drm/radeon/radeon_atombios.c b/sys/dev/pci/drm/radeon/radeon_atombios.c
index e817bcb2daa..683419cf064 100644
--- a/sys/dev/pci/drm/radeon/radeon_atombios.c
+++ b/sys/dev/pci/drm/radeon/radeon_atombios.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_atombios.c,v 1.9 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -30,15 +31,36 @@
#include "atom.h"
#include "atom-bits.h"
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
extern void
radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device, u16 caps);
+/* from radeon_connector.c */
+extern void
+radeon_add_atom_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint32_t igp_lane_info,
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd,
+ struct radeon_router *router);
+
/* from radeon_legacy_encoder.c */
extern void
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
+/* local */
+static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
+ u16 voltage_id, u16 *voltage);
+
union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO info;
struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
@@ -146,8 +168,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
- gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
+ gpio = &i2c_info->asGPIO_Info[i];
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
@@ -155,8 +177,6 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
break;
}
- gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
- ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
}
}
@@ -180,8 +200,9 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
- gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
+ gpio = &i2c_info->asGPIO_Info[i];
+
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
@@ -190,14 +211,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
snprintf(stmp, sizeof(stmp), "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
- gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
- ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
}
}
}
-struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
- u8 id)
+static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
+ u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
struct radeon_gpio_rec gpio;
@@ -216,18 +235,15 @@ struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
- pin = gpio_info->asGPIO_Pin;
for (i = 0; i < num_indices; i++) {
+ pin = &gpio_info->asGPIO_Pin[i];
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
- gpio.shift = pin->ucGpioPinBitShift;
gpio.mask = (1 << pin->ucGpioPinBitShift);
gpio.valid = true;
break;
}
- pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
- ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
}
}
@@ -437,9 +453,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
- if (((dev->pdev->device == 0x9802) ||
- (dev->pdev->device == 0x9805) ||
- (dev->pdev->device == 0x9806)) &&
+ if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
(dev->pdev->subsystem_vendor == 0x1734) &&
(dev->pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
@@ -450,10 +464,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
+
return true;
}
-static const int supported_devices_connector_convert[] = {
+const int supported_devices_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_VGA,
DRM_MODE_CONNECTOR_DVII,
@@ -472,7 +487,7 @@ static const int supported_devices_connector_convert[] = {
DRM_MODE_CONNECTOR_DisplayPort
};
-static const uint16_t supported_devices_connector_object_id_convert[] = {
+const uint16_t supported_devices_connector_object_id_convert[] = {
CONNECTOR_OBJECT_ID_NONE,
CONNECTOR_OBJECT_ID_VGA,
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */
@@ -489,7 +504,7 @@ static const uint16_t supported_devices_connector_object_id_convert[] = {
CONNECTOR_OBJECT_ID_SVIDEO
};
-static const int object_connector_convert[] = {
+const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DVII,
DRM_MODE_CONNECTOR_DVII,
@@ -796,7 +811,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
hpd_record =
(ATOM_HPD_INT_RECORD *)
record;
- gpio = radeon_atombios_lookup_gpio(rdev,
+ gpio = radeon_lookup_gpio(rdev,
hpd_record->ucHPDIntGPIOID);
hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
hpd.plugged_state = hpd_record->ucPlugged_PinState;
@@ -839,7 +854,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
radeon_link_encoder_connector(dev);
- radeon_setup_mst_connector(dev);
return true;
}
@@ -1106,31 +1120,6 @@ union firmware_info {
ATOM_FIRMWARE_INFO_V2_2 info_22;
};
-union igp_info {
- struct _ATOM_INTEGRATED_SYSTEM_INFO info;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
-};
-
-static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
-{
- struct radeon_mode_info *mode_info = &rdev->mode_info;
- int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
- union igp_info *igp_info;
- u8 frev, crev;
- u16 data_offset;
-
- if (atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset)) {
- igp_info = (union igp_info *)(mode_info->atom_context->bios +
- data_offset);
- rdev->clock.vco_freq =
- le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
- }
-}
-
bool radeon_atom_get_clock_info(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -1155,7 +1144,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
le16_to_cpu(firmware_info->info.usReferenceClock);
p1pll->reference_div = 0;
- if ((frev < 2) && (crev < 2))
+ if (crev < 2)
p1pll->pll_out_min =
le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
else
@@ -1164,7 +1153,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
p1pll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
- if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
+ if (crev >= 4) {
p1pll->lcd_pll_out_min =
le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
if (p1pll->lcd_pll_out_min == 0)
@@ -1255,22 +1244,13 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
rdev->clock.default_dispclk =
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
if (rdev->clock.default_dispclk == 0) {
- if (ASIC_IS_DCE6(rdev))
- rdev->clock.default_dispclk = 60000; /* 600 Mhz */
- else if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE5(rdev))
rdev->clock.default_dispclk = 54000; /* 540 Mhz */
else
rdev->clock.default_dispclk = 60000; /* 600 Mhz */
}
- /* set a reasonable default for DP */
- if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) {
- DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
- rdev->clock.default_dispclk / 100);
- rdev->clock.default_dispclk = 60000;
- }
rdev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
- rdev->clock.current_dispclk = rdev->clock.default_dispclk;
}
*dcpll = *p1pll;
@@ -1282,25 +1262,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
rdev->mode_info.firmware_flags =
le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
- if (ASIC_IS_DCE8(rdev))
- rdev->clock.vco_freq =
- le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
- else if (ASIC_IS_DCE5(rdev))
- rdev->clock.vco_freq = rdev->clock.current_dispclk;
- else if (ASIC_IS_DCE41(rdev))
- radeon_atombios_get_dentist_vco_freq(rdev);
- else
- rdev->clock.vco_freq = rdev->clock.current_dispclk;
-
- if (rdev->clock.vco_freq == 0)
- rdev->clock.vco_freq = 360000; /* 3.6 GHz */
-
return true;
}
return false;
}
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+};
+
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
@@ -1391,7 +1365,6 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info);
uint16_t data_offset, size;
struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
- struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT *ss_assign;
uint8_t frev, crev;
int i, num_indices;
@@ -1403,21 +1376,18 @@ bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
- ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
- ((u8 *)&ss_info->asSS_Info[0]);
+
for (i = 0; i < num_indices; i++) {
- if (ss_assign->ucSS_Id == id) {
+ if (ss_info->asSS_Info[i].ucSS_Id == id) {
ss->percentage =
- le16_to_cpu(ss_assign->usSpreadSpectrumPercentage);
- ss->type = ss_assign->ucSpreadSpectrumType;
- ss->step = ss_assign->ucSS_Step;
- ss->delay = ss_assign->ucSS_Delay;
- ss->range = ss_assign->ucSS_Range;
- ss->refdiv = ss_assign->ucRecommendedRef_Div;
+ le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
+ ss->step = ss_info->asSS_Info[i].ucSS_Step;
+ ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
+ ss->range = ss_info->asSS_Info[i].ucSS_Range;
+ ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
return true;
}
- ss_assign = (struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT*)
- ((u8 *)ss_assign + sizeof(struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT));
}
}
return false;
@@ -1472,22 +1442,6 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
break;
}
break;
- case 8:
- switch (id) {
- case ASIC_INTERNAL_SS_ON_TMDS:
- percentage = le16_to_cpu(igp_info->info_8.usDVISSPercentage);
- rate = le16_to_cpu(igp_info->info_8.usDVISSpreadRateIn10Hz);
- break;
- case ASIC_INTERNAL_SS_ON_HDMI:
- percentage = le16_to_cpu(igp_info->info_8.usHDMISSPercentage);
- rate = le16_to_cpu(igp_info->info_8.usHDMISSpreadRateIn10Hz);
- break;
- case ASIC_INTERNAL_SS_ON_LVDS:
- percentage = le16_to_cpu(igp_info->info_8.usLvdsSSPercentage);
- rate = le16_to_cpu(igp_info->info_8.usLvdsSSpreadRateIn10Hz);
- break;
- }
- break;
default:
DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
break;
@@ -1505,12 +1459,6 @@ union asic_ss_info {
struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
};
-union asic_ss_assignment {
- struct _ATOM_ASIC_SS_ASSIGNMENT v1;
- struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
- struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
-};
-
bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock)
@@ -1519,19 +1467,9 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
uint16_t data_offset, size;
union asic_ss_info *ss_info;
- union asic_ss_assignment *ss_assign;
uint8_t frev, crev;
int i, num_indices;
- if (id == ASIC_INTERNAL_MEMORY_SS) {
- if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
- return false;
- }
- if (id == ASIC_INTERNAL_ENGINE_SS) {
- if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
- return false;
- }
-
memset(ss, 0, sizeof(struct radeon_atom_ss));
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
@@ -1544,68 +1482,45 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT);
- ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
- if ((ss_assign->v1.ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
+ if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
- le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
- ss->type = ss_assign->v1.ucSpreadSpectrumMode;
- ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
- ss->percentage_divider = 100;
+ le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz);
return true;
}
- ss_assign = (union asic_ss_assignment *)
- ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
}
break;
case 2:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
- ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
- if ((ss_assign->v2.ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
+ if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
- le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
- ss->type = ss_assign->v2.ucSpreadSpectrumMode;
- ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
- ss->percentage_divider = 100;
- if ((crev == 2) &&
- ((id == ASIC_INTERNAL_ENGINE_SS) ||
- (id == ASIC_INTERNAL_MEMORY_SS)))
- ss->rate /= 100;
+ le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz);
return true;
}
- ss_assign = (union asic_ss_assignment *)
- ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
}
break;
case 3:
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
- ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
for (i = 0; i < num_indices; i++) {
- if ((ss_assign->v3.ucClockIndication == id) &&
- (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
+ if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
+ (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
ss->percentage =
- le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
- ss->type = ss_assign->v3.ucSpreadSpectrumMode;
- ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
- if (ss_assign->v3.ucSpreadSpectrumMode &
- SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
- ss->percentage_divider = 1000;
- else
- ss->percentage_divider = 100;
- if ((id == ASIC_INTERNAL_ENGINE_SS) ||
- (id == ASIC_INTERNAL_MEMORY_SS))
- ss->rate /= 100;
+ le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
+ ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
+ ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
if (rdev->flags & RADEON_IS_IGP)
radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
return true;
}
- ss_assign = (union asic_ss_assignment *)
- ((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
}
break;
default:
@@ -1840,8 +1755,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
if (misc & ATOM_DOUBLE_CLOCK_MODE)
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
- mode->crtc_clock = mode->clock =
- le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
+ mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10;
if (index == 1) {
/* PAL timings appear to have wrong values for totals */
@@ -1884,8 +1798,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
if (misc & ATOM_DOUBLE_CLOCK_MODE)
mode->flags |= DRM_MODE_FLAG_DBLSCAN;
- mode->crtc_clock = mode->clock =
- le16_to_cpu(dtd_timings->usPixClk) * 10;
+ mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10;
break;
}
return true;
@@ -1996,7 +1909,7 @@ static const char *thermal_controller_names[] = {
"adm1032",
"adm1030",
"max6649",
- "lm63", /* lm64 */
+ "lm64",
"f75375",
"asc7xxx",
};
@@ -2007,7 +1920,7 @@ static const char *pp_lib_thermal_controller_names[] = {
"adm1032",
"adm1030",
"max6649",
- "lm63", /* lm64 */
+ "lm64",
"f75375",
"RV6xx",
"RV770",
@@ -2020,7 +1933,6 @@ static const char *pp_lib_thermal_controller_names[] = {
"Northern Islands",
"Southern Islands",
"lm96163",
- "Sea Islands",
};
union power_info {
@@ -2038,7 +1950,6 @@ union pplib_clock_info {
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
struct _ATOM_PPLIB_SI_CLOCK_INFO si;
- struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
};
union pplib_power_state {
@@ -2156,7 +2067,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_atombios_lookup_gpio(rdev,
+ radeon_lookup_gpio(rdev,
power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2192,7 +2103,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_atombios_lookup_gpio(rdev,
+ radeon_lookup_gpio(rdev,
power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2228,7 +2139,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
rdev->pm.power_state[state_index].clock_info[0].voltage.type =
VOLTAGE_GPIO;
rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
- radeon_atombios_lookup_gpio(rdev,
+ radeon_lookup_gpio(rdev,
power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2276,14 +2187,6 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
/* add the i2c bus for thermal/fan chip */
if (controller->ucType > 0) {
- if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
- rdev->pm.no_fan = true;
- rdev->pm.fan_pulses_per_revolution =
- controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
- if (rdev->pm.fan_pulses_per_revolution) {
- rdev->pm.fan_min_rpm = controller->ucFanMinRPM;
- rdev->pm.fan_max_rpm = controller->ucFanMaxRPM;
- }
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
@@ -2314,41 +2217,19 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_SI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_CI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
- } else if (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
- DRM_INFO("External GPIO thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
- } else if (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
- DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
- } else if (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
- DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
+ } else if ((controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
+ (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
+ DRM_INFO("Special thermal controller config\n");
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
#ifdef notyet
@@ -2370,8 +2251,8 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
}
}
-void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
- u16 *vddc, u16 *vddci, u16 *mvdd)
+static void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
+ u16 *vddc, u16 *vddci)
{
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
@@ -2381,7 +2262,6 @@ void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
*vddc = 0;
*vddci = 0;
- *mvdd = 0;
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
@@ -2389,10 +2269,8 @@ void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
(union firmware_info *)(mode_info->atom_context->bios +
data_offset);
*vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
- if ((frev == 2) && (crev >= 2)) {
+ if ((frev == 2) && (crev >= 2))
*vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
- *mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
- }
}
}
@@ -2403,9 +2281,9 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
int j;
u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
- u16 vddc, vddci, mvdd;
+ u16 vddc, vddci;
- radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
+ radeon_atombios_get_default_voltages(rdev, &vddc, &vddci);
rdev->pm.power_state[state_index].misc = misc;
rdev->pm.power_state[state_index].misc2 = misc2;
@@ -2441,20 +2319,14 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
- if ((rdev->family >= CHIP_BARTS) && !(rdev->flags & RADEON_IS_IGP)) {
+ if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
/* NI chips post without MC ucode, so default clocks are strobe mode only */
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci;
} else {
- u16 max_vddci = 0;
-
- if (ASIC_IS_DCE4(rdev))
- radeon_atom_get_max_voltage(rdev,
- SET_VOLTAGE_TYPE_ASIC_VDDCI,
- &max_vddci);
- /* patch the table values with the default sclk/mclk from firmware info */
+ /* patch the table values with the default slck/mclk from firmware info */
for (j = 0; j < mode_index; j++) {
rdev->pm.power_state[state_index].clock_info[j].mclk =
rdev->clock.default_mclk;
@@ -2463,9 +2335,6 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
if (vddc)
rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
vddc;
- if (max_vddci)
- rdev->pm.power_state[state_index].clock_info[j].voltage.vddci =
- max_vddci;
}
}
}
@@ -2488,16 +2357,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
}
- } else if (rdev->family >= CHIP_BONAIRE) {
- sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
- sclk |= clock_info->ci.ucEngineClockHigh << 16;
- mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
- mclk |= clock_info->ci.ucMemoryClockHigh << 16;
- rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
- rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
- rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
- VOLTAGE_NONE;
- } else if (rdev->family >= CHIP_TAHITI) {
+ } else if (ASIC_IS_DCE6(rdev)) {
sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
sclk |= clock_info->si.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
@@ -2510,7 +2370,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
le16_to_cpu(clock_info->si.usVDDC);
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
le16_to_cpu(clock_info->si.usVDDCI);
- } else if (rdev->family >= CHIP_CEDAR) {
+ } else if (ASIC_IS_DCE4(rdev)) {
sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
@@ -2542,10 +2402,6 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
case ATOM_VIRTUAL_VOLTAGE_ID1:
case ATOM_VIRTUAL_VOLTAGE_ID2:
case ATOM_VIRTUAL_VOLTAGE_ID3:
- case ATOM_VIRTUAL_VOLTAGE_ID4:
- case ATOM_VIRTUAL_VOLTAGE_ID5:
- case ATOM_VIRTUAL_VOLTAGE_ID6:
- case ATOM_VIRTUAL_VOLTAGE_ID7:
if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC,
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage,
&vddc) == 0)
@@ -2815,184 +2671,6 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.current_vddc = 0;
}
-union get_clock_dividers {
- struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
- struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
- struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
- struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
- struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
- struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in;
- struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out;
-};
-
-int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
- u8 clock_type,
- u32 clock,
- bool strobe_mode,
- struct atom_clock_dividers *dividers)
-{
- union get_clock_dividers args;
- int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
- u8 frev, crev;
-
- memset(&args, 0, sizeof(args));
- memset(dividers, 0, sizeof(struct atom_clock_dividers));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return -EINVAL;
-
- switch (crev) {
- case 1:
- /* r4xx, r5xx */
- args.v1.ucAction = clock_type;
- args.v1.ulClock = cpu_to_le32(clock); /* 10 khz */
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- dividers->post_div = args.v1.ucPostDiv;
- dividers->fb_div = args.v1.ucFbDiv;
- dividers->enable_post_div = true;
- break;
- case 2:
- case 3:
- case 5:
- /* r6xx, r7xx, evergreen, ni, si */
- if (rdev->family <= CHIP_RV770) {
- args.v2.ucAction = clock_type;
- args.v2.ulClock = cpu_to_le32(clock); /* 10 khz */
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- dividers->post_div = args.v2.ucPostDiv;
- dividers->fb_div = le16_to_cpu(args.v2.usFbDiv);
- dividers->ref_div = args.v2.ucAction;
- if (rdev->family == CHIP_RV770) {
- dividers->enable_post_div = (le32_to_cpu(args.v2.ulClock) & (1 << 24)) ?
- true : false;
- dividers->vco_mode = (le32_to_cpu(args.v2.ulClock) & (1 << 25)) ? 1 : 0;
- } else
- dividers->enable_post_div = (dividers->fb_div & 1) ? true : false;
- } else {
- if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
- args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- dividers->post_div = args.v3.ucPostDiv;
- dividers->enable_post_div = (args.v3.ucCntlFlag &
- ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
- dividers->enable_dithen = (args.v3.ucCntlFlag &
- ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
- dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
- dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
- dividers->ref_div = args.v3.ucRefDiv;
- dividers->vco_mode = (args.v3.ucCntlFlag &
- ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
- } else {
- /* for SI we use ComputeMemoryClockParam for memory plls */
- if (rdev->family >= CHIP_TAHITI)
- return -EINVAL;
- args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
- if (strobe_mode)
- args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- dividers->post_div = args.v5.ucPostDiv;
- dividers->enable_post_div = (args.v5.ucCntlFlag &
- ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
- dividers->enable_dithen = (args.v5.ucCntlFlag &
- ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
- dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
- dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
- dividers->ref_div = args.v5.ucRefDiv;
- dividers->vco_mode = (args.v5.ucCntlFlag &
- ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
- }
- }
- break;
- case 4:
- /* fusion */
- args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
- dividers->real_clock = le32_to_cpu(args.v4.ulClock);
- break;
- case 6:
- /* CI */
- /* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */
- args.v6_in.ulClock.ulComputeClockFlag = clock_type;
- args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
- dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
- dividers->ref_div = args.v6_out.ucPllRefDiv;
- dividers->post_div = args.v6_out.ucPllPostDiv;
- dividers->flags = args.v6_out.ucPllCntlFlag;
- dividers->real_clock = le32_to_cpu(args.v6_out.ulClock.ulClock);
- dividers->post_divider = args.v6_out.ulClock.ucPostDiv;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-int radeon_atom_get_memory_pll_dividers(struct radeon_device *rdev,
- u32 clock,
- bool strobe_mode,
- struct atom_mpll_param *mpll_param)
-{
- COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args;
- int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam);
- u8 frev, crev;
-
- memset(&args, 0, sizeof(args));
- memset(mpll_param, 0, sizeof(struct atom_mpll_param));
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return -EINVAL;
-
- switch (frev) {
- case 2:
- switch (crev) {
- case 1:
- /* SI */
- args.ulClock = cpu_to_le32(clock); /* 10 khz */
- args.ucInputFlag = 0;
- if (strobe_mode)
- args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
- mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
- mpll_param->post_div = args.ucPostDiv;
- mpll_param->dll_speed = args.ucDllSpeed;
- mpll_param->bwcntl = args.ucBWCntl;
- mpll_param->vco_mode =
- (args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
- mpll_param->yclk_sel =
- (args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
- mpll_param->qdr =
- (args.ucPllCntlFlag & MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0;
- mpll_param->half_rate =
- (args.ucPllCntlFlag & MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0;
- break;
- default:
- return -EINVAL;
- }
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
{
DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
@@ -3046,48 +2724,6 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-void radeon_atom_set_engine_dram_timings(struct radeon_device *rdev,
- u32 eng_clock, u32 mem_clock)
-{
- SET_ENGINE_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
- u32 tmp;
-
- memset(&args, 0, sizeof(args));
-
- tmp = eng_clock & SET_CLOCK_FREQ_MASK;
- tmp |= (COMPUTE_ENGINE_PLL_PARAM << 24);
-
- args.ulTargetEngineClock = cpu_to_le32(tmp);
- if (mem_clock)
- args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-void radeon_atom_update_memory_dll(struct radeon_device *rdev,
- u32 mem_clock)
-{
- u32 args;
- int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
-
- args = cpu_to_le32(mem_clock); /* 10 khz */
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-void radeon_atom_set_ac_timing(struct radeon_device *rdev,
- u32 mem_clock)
-{
- SET_MEMORY_CLOCK_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
- u32 tmp = mem_clock | (COMPUTE_MEMORY_PLL_PARAM << 24);
-
- args.ulTargetMemoryClock = cpu_to_le32(tmp); /* 10 khz */
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
union set_voltage {
struct _SET_VOLTAGE_PS_ALLOCATION alloc;
struct _SET_VOLTAGE_PARAMETERS v1;
@@ -3132,8 +2768,8 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
- u16 voltage_id, u16 *voltage)
+static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
+ u16 voltage_id, u16 *voltage)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
@@ -3171,899 +2807,6 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
return 0;
}
-int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
- u16 *voltage,
- u16 leakage_idx)
-{
- return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
-}
-
-int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
- u16 *leakage_id)
-{
- union set_voltage args;
- int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev;
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return -EINVAL;
-
- switch (crev) {
- case 3:
- case 4:
- args.v3.ucVoltageType = 0;
- args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
- args.v3.usVoltageLevel = 0;
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
- u16 *vddc, u16 *vddci,
- u16 virtual_voltage_id,
- u16 vbios_voltage_id)
-{
- int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
- u8 frev, crev;
- u16 data_offset, size;
- int i, j;
- ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
- u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
-
- *vddc = 0;
- *vddci = 0;
-
- if (!atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset))
- return -EINVAL;
-
- profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
- (rdev->mode_info.atom_context->bios + data_offset);
-
- switch (frev) {
- case 1:
- return -EINVAL;
- case 2:
- switch (crev) {
- case 1:
- if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
- return -EINVAL;
- leakage_bin = (u16 *)
- (rdev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usLeakageBinArrayOffset));
- vddc_id_buf = (u16 *)
- (rdev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
- vddc_buf = (u16 *)
- (rdev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
- vddci_id_buf = (u16 *)
- (rdev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
- vddci_buf = (u16 *)
- (rdev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
-
- if (profile->ucElbVDDC_Num > 0) {
- for (i = 0; i < profile->ucElbVDDC_Num; i++) {
- if (vddc_id_buf[i] == virtual_voltage_id) {
- for (j = 0; j < profile->ucLeakageBinNum; j++) {
- if (vbios_voltage_id <= leakage_bin[j]) {
- *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
- break;
- }
- }
- break;
- }
- }
- }
- if (profile->ucElbVDDCI_Num > 0) {
- for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
- if (vddci_id_buf[i] == virtual_voltage_id) {
- for (j = 0; j < profile->ucLeakageBinNum; j++) {
- if (vbios_voltage_id <= leakage_bin[j]) {
- *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
- break;
- }
- }
- break;
- }
- }
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
-
- return 0;
-}
-
-union get_voltage_info {
- struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
- struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
-};
-
-int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
- u16 virtual_voltage_id,
- u16 *voltage)
-{
- int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
- u32 entry_id;
- u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
- union get_voltage_info args;
-
- for (entry_id = 0; entry_id < count; entry_id++) {
- if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
- virtual_voltage_id)
- break;
- }
-
- if (entry_id >= count)
- return -EINVAL;
-
- args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
- args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
- args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
- args.in.ulSCLKFreq =
- cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
-
- return 0;
-}
-
-int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
- u16 voltage_level, u8 voltage_type,
- u32 *gpio_value, u32 *gpio_mask)
-{
- union set_voltage args;
- int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev;
-
- if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
- return -EINVAL;
-
- switch (crev) {
- case 1:
- return -EINVAL;
- case 2:
- args.v2.ucVoltageType = voltage_type;
- args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK;
- args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- *gpio_mask = le32_to_cpu(*(u32 *)&args.v2);
-
- args.v2.ucVoltageType = voltage_type;
- args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL;
- args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
-
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
- *gpio_value = le32_to_cpu(*(u32 *)&args.v2);
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
-
- return 0;
-}
-
-union voltage_object_info {
- struct _ATOM_VOLTAGE_OBJECT_INFO v1;
- struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
- struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
-};
-
-union voltage_object {
- struct _ATOM_VOLTAGE_OBJECT v1;
- struct _ATOM_VOLTAGE_OBJECT_V2 v2;
- union _ATOM_VOLTAGE_OBJECT_V3 v3;
-};
-
-static ATOM_VOLTAGE_OBJECT *atom_lookup_voltage_object_v1(ATOM_VOLTAGE_OBJECT_INFO *v1,
- u8 voltage_type)
-{
- u32 size = le16_to_cpu(v1->sHeader.usStructureSize);
- u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO, asVoltageObj[0]);
- u8 *start = (u8 *)v1;
-
- while (offset < size) {
- ATOM_VOLTAGE_OBJECT *vo = (ATOM_VOLTAGE_OBJECT *)(start + offset);
- if (vo->ucVoltageType == voltage_type)
- return vo;
- offset += offsetof(ATOM_VOLTAGE_OBJECT, asFormula.ucVIDAdjustEntries) +
- vo->asFormula.ucNumOfVoltageEntries;
- }
- return NULL;
-}
-
-static ATOM_VOLTAGE_OBJECT_V2 *atom_lookup_voltage_object_v2(ATOM_VOLTAGE_OBJECT_INFO_V2 *v2,
- u8 voltage_type)
-{
- u32 size = le16_to_cpu(v2->sHeader.usStructureSize);
- u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V2, asVoltageObj[0]);
- u8 *start = (u8*)v2;
-
- while (offset < size) {
- ATOM_VOLTAGE_OBJECT_V2 *vo = (ATOM_VOLTAGE_OBJECT_V2 *)(start + offset);
- if (vo->ucVoltageType == voltage_type)
- return vo;
- offset += offsetof(ATOM_VOLTAGE_OBJECT_V2, asFormula.asVIDAdjustEntries) +
- (vo->asFormula.ucNumOfVoltageEntries * sizeof(VOLTAGE_LUT_ENTRY));
- }
- return NULL;
-}
-
-static ATOM_VOLTAGE_OBJECT_V3 *atom_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *v3,
- u8 voltage_type, u8 voltage_mode)
-{
- u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
- u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
- u8 *start = (u8*)v3;
-
- while (offset < size) {
- ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
- if ((vo->asGpioVoltageObj.sHeader.ucVoltageType == voltage_type) &&
- (vo->asGpioVoltageObj.sHeader.ucVoltageMode == voltage_mode))
- return vo;
- offset += le16_to_cpu(vo->asGpioVoltageObj.sHeader.usSize);
- }
- return NULL;
-}
-
-bool
-radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
- u8 voltage_type, u8 voltage_mode)
-{
- int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
- u8 frev, crev;
- u16 data_offset, size;
- union voltage_object_info *voltage_info;
- union voltage_object *voltage_object = NULL;
-
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- voltage_info = (union voltage_object_info *)
- (rdev->mode_info.atom_context->bios + data_offset);
-
- switch (frev) {
- case 1:
- case 2:
- switch (crev) {
- case 1:
- voltage_object = (union voltage_object *)
- atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
- if (voltage_object &&
- (voltage_object->v1.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO))
- return true;
- break;
- case 2:
- voltage_object = (union voltage_object *)
- atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
- if (voltage_object &&
- (voltage_object->v2.asControl.ucVoltageControlId == VOLTAGE_CONTROLLED_BY_GPIO))
- return true;
- break;
- default:
- DRM_ERROR("unknown voltage object table\n");
- return false;
- }
- break;
- case 3:
- switch (crev) {
- case 1:
- if (atom_lookup_voltage_object_v3(&voltage_info->v3,
- voltage_type, voltage_mode))
- return true;
- break;
- default:
- DRM_ERROR("unknown voltage object table\n");
- return false;
- }
- break;
- default:
- DRM_ERROR("unknown voltage object table\n");
- return false;
- }
-
- }
- return false;
-}
-
-int radeon_atom_get_svi2_info(struct radeon_device *rdev,
- u8 voltage_type,
- u8 *svd_gpio_id, u8 *svc_gpio_id)
-{
- int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
- u8 frev, crev;
- u16 data_offset, size;
- union voltage_object_info *voltage_info;
- union voltage_object *voltage_object = NULL;
-
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- voltage_info = (union voltage_object_info *)
- (rdev->mode_info.atom_context->bios + data_offset);
-
- switch (frev) {
- case 3:
- switch (crev) {
- case 1:
- voltage_object = (union voltage_object *)
- atom_lookup_voltage_object_v3(&voltage_info->v3,
- voltage_type,
- VOLTAGE_OBJ_SVID2);
- if (voltage_object) {
- *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
- *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
- } else {
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("unknown voltage object table\n");
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("unknown voltage object table\n");
- return -EINVAL;
- }
-
- }
- return 0;
-}
-
-int radeon_atom_get_max_voltage(struct radeon_device *rdev,
- u8 voltage_type, u16 *max_voltage)
-{
- int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
- u8 frev, crev;
- u16 data_offset, size;
- union voltage_object_info *voltage_info;
- union voltage_object *voltage_object = NULL;
-
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- voltage_info = (union voltage_object_info *)
- (rdev->mode_info.atom_context->bios + data_offset);
-
- switch (crev) {
- case 1:
- voltage_object = (union voltage_object *)
- atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
- if (voltage_object) {
- ATOM_VOLTAGE_FORMULA *formula =
- &voltage_object->v1.asFormula;
- if (formula->ucFlag & 1)
- *max_voltage =
- le16_to_cpu(formula->usVoltageBaseLevel) +
- formula->ucNumOfVoltageEntries / 2 *
- le16_to_cpu(formula->usVoltageStep);
- else
- *max_voltage =
- le16_to_cpu(formula->usVoltageBaseLevel) +
- (formula->ucNumOfVoltageEntries - 1) *
- le16_to_cpu(formula->usVoltageStep);
- return 0;
- }
- break;
- case 2:
- voltage_object = (union voltage_object *)
- atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
- if (voltage_object) {
- ATOM_VOLTAGE_FORMULA_V2 *formula =
- &voltage_object->v2.asFormula;
- if (formula->ucNumOfVoltageEntries) {
- VOLTAGE_LUT_ENTRY *lut = (VOLTAGE_LUT_ENTRY *)
- ((u8 *)&formula->asVIDAdjustEntries[0] +
- (sizeof(VOLTAGE_LUT_ENTRY) * (formula->ucNumOfVoltageEntries - 1)));
- *max_voltage =
- le16_to_cpu(lut->usVoltageValue);
- return 0;
- }
- }
- break;
- default:
- DRM_ERROR("unknown voltage object table\n");
- return -EINVAL;
- }
-
- }
- return -EINVAL;
-}
-
-int radeon_atom_get_min_voltage(struct radeon_device *rdev,
- u8 voltage_type, u16 *min_voltage)
-{
- int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
- u8 frev, crev;
- u16 data_offset, size;
- union voltage_object_info *voltage_info;
- union voltage_object *voltage_object = NULL;
-
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- voltage_info = (union voltage_object_info *)
- (rdev->mode_info.atom_context->bios + data_offset);
-
- switch (crev) {
- case 1:
- voltage_object = (union voltage_object *)
- atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
- if (voltage_object) {
- ATOM_VOLTAGE_FORMULA *formula =
- &voltage_object->v1.asFormula;
- *min_voltage =
- le16_to_cpu(formula->usVoltageBaseLevel);
- return 0;
- }
- break;
- case 2:
- voltage_object = (union voltage_object *)
- atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
- if (voltage_object) {
- ATOM_VOLTAGE_FORMULA_V2 *formula =
- &voltage_object->v2.asFormula;
- if (formula->ucNumOfVoltageEntries) {
- *min_voltage =
- le16_to_cpu(formula->asVIDAdjustEntries[
- 0
- ].usVoltageValue);
- return 0;
- }
- }
- break;
- default:
- DRM_ERROR("unknown voltage object table\n");
- return -EINVAL;
- }
-
- }
- return -EINVAL;
-}
-
-int radeon_atom_get_voltage_step(struct radeon_device *rdev,
- u8 voltage_type, u16 *voltage_step)
-{
- int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
- u8 frev, crev;
- u16 data_offset, size;
- union voltage_object_info *voltage_info;
- union voltage_object *voltage_object = NULL;
-
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- voltage_info = (union voltage_object_info *)
- (rdev->mode_info.atom_context->bios + data_offset);
-
- switch (crev) {
- case 1:
- voltage_object = (union voltage_object *)
- atom_lookup_voltage_object_v1(&voltage_info->v1, voltage_type);
- if (voltage_object) {
- ATOM_VOLTAGE_FORMULA *formula =
- &voltage_object->v1.asFormula;
- if (formula->ucFlag & 1)
- *voltage_step =
- (le16_to_cpu(formula->usVoltageStep) + 1) / 2;
- else
- *voltage_step =
- le16_to_cpu(formula->usVoltageStep);
- return 0;
- }
- break;
- case 2:
- return -EINVAL;
- default:
- DRM_ERROR("unknown voltage object table\n");
- return -EINVAL;
- }
-
- }
- return -EINVAL;
-}
-
-int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
- u8 voltage_type,
- u16 nominal_voltage,
- u16 *true_voltage)
-{
- u16 min_voltage, max_voltage, voltage_step;
-
- if (radeon_atom_get_max_voltage(rdev, voltage_type, &max_voltage))
- return -EINVAL;
- if (radeon_atom_get_min_voltage(rdev, voltage_type, &min_voltage))
- return -EINVAL;
- if (radeon_atom_get_voltage_step(rdev, voltage_type, &voltage_step))
- return -EINVAL;
-
- if (nominal_voltage <= min_voltage)
- *true_voltage = min_voltage;
- else if (nominal_voltage >= max_voltage)
- *true_voltage = max_voltage;
- else
- *true_voltage = min_voltage +
- ((nominal_voltage - min_voltage) / voltage_step) *
- voltage_step;
-
- return 0;
-}
-
-int radeon_atom_get_voltage_table(struct radeon_device *rdev,
- u8 voltage_type, u8 voltage_mode,
- struct atom_voltage_table *voltage_table)
-{
- int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
- u8 frev, crev;
- u16 data_offset, size;
- int i, ret;
- union voltage_object_info *voltage_info;
- union voltage_object *voltage_object = NULL;
-
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- voltage_info = (union voltage_object_info *)
- (rdev->mode_info.atom_context->bios + data_offset);
-
- switch (frev) {
- case 1:
- case 2:
- switch (crev) {
- case 1:
- DRM_ERROR("old table version %d, %d\n", frev, crev);
- return -EINVAL;
- case 2:
- voltage_object = (union voltage_object *)
- atom_lookup_voltage_object_v2(&voltage_info->v2, voltage_type);
- if (voltage_object) {
- ATOM_VOLTAGE_FORMULA_V2 *formula =
- &voltage_object->v2.asFormula;
- VOLTAGE_LUT_ENTRY *lut;
- if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES)
- return -EINVAL;
- lut = &formula->asVIDAdjustEntries[0];
- for (i = 0; i < formula->ucNumOfVoltageEntries; i++) {
- voltage_table->entries[i].value =
- le16_to_cpu(lut->usVoltageValue);
- ret = radeon_atom_get_voltage_gpio_settings(rdev,
- voltage_table->entries[i].value,
- voltage_type,
- &voltage_table->entries[i].smio_low,
- &voltage_table->mask_low);
- if (ret)
- return ret;
- lut = (VOLTAGE_LUT_ENTRY *)
- ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY));
- }
- voltage_table->count = formula->ucNumOfVoltageEntries;
- return 0;
- }
- break;
- default:
- DRM_ERROR("unknown voltage object table\n");
- return -EINVAL;
- }
- break;
- case 3:
- switch (crev) {
- case 1:
- voltage_object = (union voltage_object *)
- atom_lookup_voltage_object_v3(&voltage_info->v3,
- voltage_type, voltage_mode);
- if (voltage_object) {
- ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
- &voltage_object->v3.asGpioVoltageObj;
- VOLTAGE_LUT_ENTRY_V2 *lut;
- if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
- return -EINVAL;
- lut = &gpio->asVolGpioLut[0];
- for (i = 0; i < gpio->ucGpioEntryNum; i++) {
- voltage_table->entries[i].value =
- le16_to_cpu(lut->usVoltageValue);
- voltage_table->entries[i].smio_low =
- le32_to_cpu(lut->ulVoltageId);
- lut = (VOLTAGE_LUT_ENTRY_V2 *)
- ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
- }
- voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
- voltage_table->count = gpio->ucGpioEntryNum;
- voltage_table->phase_delay = gpio->ucPhaseDelay;
- return 0;
- }
- break;
- default:
- DRM_ERROR("unknown voltage object table\n");
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("unknown voltage object table\n");
- return -EINVAL;
- }
- }
- return -EINVAL;
-}
-
-union vram_info {
- struct _ATOM_VRAM_INFO_V3 v1_3;
- struct _ATOM_VRAM_INFO_V4 v1_4;
- struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1;
-};
-
-int radeon_atom_get_memory_info(struct radeon_device *rdev,
- u8 module_index, struct atom_memory_info *mem_info)
-{
- int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
- u8 frev, crev, i;
- u16 data_offset, size;
- union vram_info *vram_info;
-
- memset(mem_info, 0, sizeof(struct atom_memory_info));
-
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- vram_info = (union vram_info *)
- (rdev->mode_info.atom_context->bios + data_offset);
- switch (frev) {
- case 1:
- switch (crev) {
- case 3:
- /* r6xx */
- if (module_index < vram_info->v1_3.ucNumOfVRAMModule) {
- ATOM_VRAM_MODULE_V3 *vram_module =
- (ATOM_VRAM_MODULE_V3 *)vram_info->v1_3.aVramInfo;
-
- for (i = 0; i < module_index; i++) {
- if (le16_to_cpu(vram_module->usSize) == 0)
- return -EINVAL;
- vram_module = (ATOM_VRAM_MODULE_V3 *)
- ((u8 *)vram_module + le16_to_cpu(vram_module->usSize));
- }
- mem_info->mem_vendor = vram_module->asMemory.ucMemoryVenderID & 0xf;
- mem_info->mem_type = vram_module->asMemory.ucMemoryType & 0xf0;
- } else
- return -EINVAL;
- break;
- case 4:
- /* r7xx, evergreen */
- if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
- ATOM_VRAM_MODULE_V4 *vram_module =
- (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
-
- for (i = 0; i < module_index; i++) {
- if (le16_to_cpu(vram_module->usModuleSize) == 0)
- return -EINVAL;
- vram_module = (ATOM_VRAM_MODULE_V4 *)
- ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
- }
- mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
- mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
- } else
- return -EINVAL;
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
- break;
- case 2:
- switch (crev) {
- case 1:
- /* ni */
- if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
- ATOM_VRAM_MODULE_V7 *vram_module =
- (ATOM_VRAM_MODULE_V7 *)vram_info->v2_1.aVramInfo;
-
- for (i = 0; i < module_index; i++) {
- if (le16_to_cpu(vram_module->usModuleSize) == 0)
- return -EINVAL;
- vram_module = (ATOM_VRAM_MODULE_V7 *)
- ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
- }
- mem_info->mem_vendor = vram_module->ucMemoryVenderID & 0xf;
- mem_info->mem_type = vram_module->ucMemoryType & 0xf0;
- } else
- return -EINVAL;
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
- return 0;
- }
- return -EINVAL;
-}
-
-int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
- bool gddr5, u8 module_index,
- struct atom_memory_clock_range_table *mclk_range_table)
-{
- int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
- u8 frev, crev, i;
- u16 data_offset, size;
- union vram_info *vram_info;
- u32 mem_timing_size = gddr5 ?
- sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT);
-
- memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table));
-
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- vram_info = (union vram_info *)
- (rdev->mode_info.atom_context->bios + data_offset);
- switch (frev) {
- case 1:
- switch (crev) {
- case 3:
- DRM_ERROR("old table version %d, %d\n", frev, crev);
- return -EINVAL;
- case 4:
- /* r7xx, evergreen */
- if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
- ATOM_VRAM_MODULE_V4 *vram_module =
- (ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
- ATOM_MEMORY_TIMING_FORMAT *format;
-
- for (i = 0; i < module_index; i++) {
- if (le16_to_cpu(vram_module->usModuleSize) == 0)
- return -EINVAL;
- vram_module = (ATOM_VRAM_MODULE_V4 *)
- ((u8 *)vram_module + le16_to_cpu(vram_module->usModuleSize));
- }
- mclk_range_table->num_entries = (u8)
- ((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
- mem_timing_size);
- format = &vram_module->asMemTiming[0];
- for (i = 0; i < mclk_range_table->num_entries; i++) {
- mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
- format = (ATOM_MEMORY_TIMING_FORMAT *)
- ((u8 *)format + mem_timing_size);
- }
- } else
- return -EINVAL;
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
- break;
- case 2:
- DRM_ERROR("new table version %d, %d\n", frev, crev);
- return -EINVAL;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
- return 0;
- }
- return -EINVAL;
-}
-
-#define MEM_ID_MASK 0xff000000
-#define MEM_ID_SHIFT 24
-#define CLOCK_RANGE_MASK 0x00ffffff
-#define CLOCK_RANGE_SHIFT 0
-#define LOW_NIBBLE_MASK 0xf
-#define DATA_EQU_PREV 0
-#define DATA_FROM_TABLE 4
-
-int radeon_atom_init_mc_reg_table(struct radeon_device *rdev,
- u8 module_index,
- struct atom_mc_reg_table *reg_table)
-{
- int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
- u8 frev, crev, num_entries, t_mem_id, num_ranges = 0;
- u32 i = 0, j;
- u16 data_offset, size;
- union vram_info *vram_info;
-
- memset(reg_table, 0, sizeof(struct atom_mc_reg_table));
-
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- vram_info = (union vram_info *)
- (rdev->mode_info.atom_context->bios + data_offset);
- switch (frev) {
- case 1:
- DRM_ERROR("old table version %d, %d\n", frev, crev);
- return -EINVAL;
- case 2:
- switch (crev) {
- case 1:
- if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
- ATOM_INIT_REG_BLOCK *reg_block =
- (ATOM_INIT_REG_BLOCK *)
- ((u8 *)vram_info + le16_to_cpu(vram_info->v2_1.usMemClkPatchTblOffset));
- ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data =
- (ATOM_MEMORY_SETTING_DATA_BLOCK *)
- ((u8 *)reg_block + (2 * sizeof(u16)) +
- le16_to_cpu(reg_block->usRegIndexTblSize));
- ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
- num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
- sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
- if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- while (i < num_entries) {
- if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER)
- break;
- reg_table->mc_reg_address[i].s1 =
- (u16)(le16_to_cpu(format->usRegIndex));
- reg_table->mc_reg_address[i].pre_reg_data =
- (u8)(format->ucPreRegDataLength);
- i++;
- format = (ATOM_INIT_REG_INDEX_FORMAT *)
- ((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
- }
- reg_table->last = i;
- while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
- (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
- t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
- >> MEM_ID_SHIFT);
- if (module_index == t_mem_id) {
- reg_table->mc_reg_table_entry[num_ranges].mclk_max =
- (u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
- >> CLOCK_RANGE_SHIFT);
- for (i = 0, j = 1; i < reg_table->last; i++) {
- if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
- reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
- (u32)le32_to_cpu(*((u32 *)reg_data + j));
- j++;
- } else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
- reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
- reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
- }
- }
- num_ranges++;
- }
- reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
- ((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
- }
- if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
- return -EINVAL;
- reg_table->num_entries = num_ranges;
- } else
- return -EINVAL;
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
- return 0;
- }
- return -EINVAL;
-}
-
void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
diff --git a/sys/dev/pci/drm/radeon/radeon_benchmark.c b/sys/dev/pci/drm/radeon/radeon_benchmark.c
index 33a574967b3..0d561fe5d97 100644
--- a/sys/dev/pci/drm/radeon/radeon_benchmark.c
+++ b/sys/dev/pci/drm/radeon/radeon_benchmark.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_benchmark.c,v 1.7 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2009 Jerome Glisse.
*
@@ -34,8 +35,7 @@
static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
uint64_t saddr, uint64_t daddr,
- int flag, int n,
- struct reservation_object *resv)
+ int flag, int n)
{
unsigned long start_jiffies;
unsigned long end_jiffies;
@@ -46,29 +46,33 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
for (i = 0; i < n; i++) {
switch (flag) {
case RADEON_BENCHMARK_COPY_DMA:
- fence = radeon_copy_dma(rdev, saddr, daddr,
- size / RADEON_GPU_PAGE_SIZE,
- resv);
+ r = radeon_copy_dma(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE,
+ &fence);
break;
case RADEON_BENCHMARK_COPY_BLIT:
- fence = radeon_copy_blit(rdev, saddr, daddr,
- size / RADEON_GPU_PAGE_SIZE,
- resv);
+ r = radeon_copy_blit(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE,
+ &fence);
break;
default:
DRM_ERROR("Unknown copy method\n");
- return -EINVAL;
+ r = -EINVAL;
}
- if (IS_ERR(fence))
- return PTR_ERR(fence);
-
+ if (r)
+ goto exit_do_move;
r = radeon_fence_wait(fence, false);
- radeon_fence_unref(&fence);
if (r)
- return r;
+ goto exit_do_move;
+ radeon_fence_unref(&fence);
}
end_jiffies = jiffies;
- return jiffies_to_msecs(end_jiffies - start_jiffies);
+ r = jiffies_to_msecs(end_jiffies - start_jiffies);
+
+exit_do_move:
+ if (fence)
+ radeon_fence_unref(&fence);
+ return r;
}
@@ -96,7 +100,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
int time;
n = RADEON_BENCHMARK_ITERATIONS;
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, NULL, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -108,7 +112,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, NULL, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
if (r) {
goto out_cleanup;
}
@@ -121,10 +125,12 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
goto out_cleanup;
}
- if (rdev->asic->copy.dma) {
+ /* r100 doesn't have dma engine so skip the test */
+ /* also, VRAM-to-VRAM test doesn't make much sense for DMA */
+ /* skip it as well if domains are the same */
+ if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
- RADEON_BENCHMARK_COPY_DMA, n,
- dobj->tbo.resv);
+ RADEON_BENCHMARK_COPY_DMA, n);
if (time < 0)
goto out_cleanup;
if (time > 0)
@@ -134,8 +140,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (rdev->asic->copy.blit) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
- RADEON_BENCHMARK_COPY_BLIT, n,
- dobj->tbo.resv);
+ RADEON_BENCHMARK_COPY_BLIT, n);
if (time < 0)
goto out_cleanup;
if (time > 0)
diff --git a/sys/dev/pci/drm/radeon/radeon_bios.c b/sys/dev/pci/drm/radeon/radeon_bios.c
index 370322c27b9..3fdfdd39916 100644
--- a/sys/dev/pci/drm/radeon/radeon_bios.c
+++ b/sys/dev/pci/drm/radeon/radeon_bios.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_bios.c,v 1.12 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -26,6 +27,7 @@
* Jerome Glisse
*/
#include <dev/pci/drm/drmP.h>
+#include <dev/pci/pcidevs.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
@@ -33,7 +35,6 @@
#if defined(__amd64__) || defined(__i386__)
#include <dev/isa/isareg.h>
#include <dev/isa/isavar.h>
-#include "acpi.h"
#endif
#if defined (__loongson__)
@@ -44,44 +45,61 @@
* BIOS.
*/
-/* If you boot an IGP board with a discrete card as the primary,
- * the IGP rom is not accessible via the rom bar as the IGP rom is
- * part of the system bios. On boot, the system bios puts a
- * copy of the igp rom at the start of vram if a discrete card is
- * present.
- */
-#ifdef __linux__
-static bool igp_read_bios_from_vram(struct radeon_device *rdev)
+bool radeon_read_platform_bios(struct radeon_device *);
+
+bool
+radeon_read_platform_bios(struct radeon_device *rdev)
{
+#if defined(__amd64__) || defined(__i386__) || defined(__loongson__)
uint8_t __iomem *bios;
- resource_size_t vram_base;
- resource_size_t size = 256 * 1024; /* ??? */
-
+ bus_size_t size = 256 * 1024; /* ??? */
+ uint8_t *found = NULL;
+ int i;
+
+
if (!(rdev->flags & RADEON_IS_IGP))
if (!radeon_card_posted(rdev))
return false;
rdev->bios = NULL;
- vram_base = pci_resource_start(rdev->pdev, 0);
- bios = ioremap(vram_base, size);
- if (!bios) {
+
+#if defined(__loongson__)
+ if (loongson_videobios == NULL)
return false;
- }
+ bios = loongson_videobios;
+#else
+ bios = (u8 *)ISA_HOLE_VADDR(0xc0000);
+#endif
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
- iounmap(bios);
+ for (i = 0; i + 2 < size; i++) {
+ if (bios[i] == 0x55 && bios[i + 1] == 0xaa) {
+ found = bios + i;
+ break;
+ }
+
+ }
+ if (found == NULL) {
+ DRM_ERROR("bios size zero or checksum mismatch\n");
return false;
}
+
rdev->bios = kmalloc(size, GFP_KERNEL);
- if (rdev->bios == NULL) {
- iounmap(bios);
+ if (rdev->bios == NULL)
return false;
- }
- memcpy_fromio(rdev->bios, bios, size);
- iounmap(bios);
+
+ memcpy(rdev->bios, found, size);
+
return true;
+#endif
+ return false;
}
-#else
+
+/* If you boot an IGP board with a discrete card as the primary,
+ * the IGP rom is not accessible via the rom bar as the IGP rom is
+ * part of the system bios. On boot, the system bios puts a
+ * copy of the igp rom at the start of vram if a discrete card is
+ * present.
+ */
static bool igp_read_bios_from_vram(struct radeon_device *rdev)
{
uint8_t __iomem *bios;
@@ -117,38 +135,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
bus_space_unmap(bst, bsh, size);
return true;
}
-#endif
-#ifdef __linux__
-static bool radeon_read_bios(struct radeon_device *rdev)
-{
- uint8_t __iomem *bios, val1, val2;
- size_t size;
-
- rdev->bios = NULL;
- /* XXX: some cards may return 0 for rom size? ddx has a workaround */
- bios = pci_map_rom(rdev->pdev, &size);
- if (!bios) {
- return false;
- }
-
- val1 = readb(&bios[0]);
- val2 = readb(&bios[1]);
-
- if (size == 0 || val1 != 0x55 || val2 != 0xaa) {
- pci_unmap_rom(rdev->pdev, bios);
- return false;
- }
- rdev->bios = kzalloc(size, GFP_KERNEL);
- if (rdev->bios == NULL) {
- pci_unmap_rom(rdev->pdev, bios);
- return false;
- }
- memcpy_fromio(rdev->bios, bios, size);
- pci_unmap_rom(rdev->pdev, bios);
- return true;
-}
-#else
static bool radeon_read_bios(struct radeon_device *rdev)
{
uint8_t __iomem *bios;
@@ -192,78 +179,6 @@ fail:
return false;
}
-#endif
-
-#ifdef __linux__
-static bool radeon_read_platform_bios(struct radeon_device *rdev)
-{
- uint8_t __iomem *bios;
- size_t size;
-
- rdev->bios = NULL;
-
- bios = pci_platform_rom(rdev->pdev, &size);
- if (!bios) {
- return false;
- }
-
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
- return false;
- }
- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
- if (rdev->bios == NULL) {
- return false;
- }
-
- return true;
-}
-#else
-static bool radeon_read_platform_bios(struct radeon_device *rdev)
-{
-#if defined(__amd64__) || defined(__i386__) || defined(__loongson__)
- uint8_t __iomem *bios;
- bus_size_t size = 256 * 1024; /* ??? */
- uint8_t *found = NULL;
- int i;
-
- if (!(rdev->flags & RADEON_IS_IGP))
- if (!radeon_card_posted(rdev))
- return false;
-
- rdev->bios = NULL;
-
-#if defined(__loongson__)
- if (loongson_videobios == NULL)
- return false;
- bios = loongson_videobios;
-#else
- bios = (u8 *)ISA_HOLE_VADDR(0xc0000);
-#endif
-
- for (i = 0; i + 2 < size; i++) {
- if (bios[i] == 0x55 && bios[i + 1] == 0xaa) {
- found = bios + i;
- break;
- }
-
- }
- if (found == NULL) {
- DRM_ERROR("bios size zero or checksum mismatch\n");
- return false;
- }
-
- rdev->bios = kmalloc(size, GFP_KERNEL);
- if (rdev->bios == NULL)
- return false;
-
- memcpy(rdev->bios, found, size);
-
- return true;
-#endif
- return false;
-}
-#endif
-
#ifdef CONFIG_ACPI
/* ATRM is used to get the BIOS on the discrete cards in
* dual-gpu systems.
@@ -327,7 +242,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
return false;
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
- dhandle = ACPI_HANDLE(&pdev->dev);
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
@@ -400,28 +315,24 @@ static bool ni_read_disabled_bios(struct radeon_device *rdev)
/* enable the rom */
WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
- if (!ASIC_IS_NODCE(rdev)) {
- /* Disable VGA mode */
- WREG32(AVIVO_D1VGA_CONTROL,
- (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(AVIVO_D2VGA_CONTROL,
- (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
- AVIVO_DVGA_CONTROL_TIMING_SELECT)));
- WREG32(AVIVO_VGA_RENDER_CONTROL,
- (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
- }
+ /* Disable VGA mode */
+ WREG32(AVIVO_D1VGA_CONTROL,
+ (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_D2VGA_CONTROL,
+ (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+ AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+ WREG32(AVIVO_VGA_RENDER_CONTROL,
+ (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
r = radeon_read_bios(rdev);
/* restore regs */
WREG32(R600_BUS_CNTL, bus_cntl);
- if (!ASIC_IS_NODCE(rdev)) {
- WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
- WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
- WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
- }
+ WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+ WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+ WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
WREG32(R600_ROM_CNTL, rom_cntl);
return r;
}
@@ -655,7 +566,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
fp2_gen_cntl = 0;
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
}
@@ -692,7 +603,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
(RADEON_CRTC_SYNC_TRISTAT |
RADEON_CRTC_DISPLAY_DIS)));
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
}
@@ -710,7 +621,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
}
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
- if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+ if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
}
return r;
@@ -732,6 +643,10 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
return legacy_read_disabled_bios(rdev);
}
+#if defined(__amd64__) || defined(__i386__)
+#include "acpi.h"
+#endif
+
#if NACPI > 0
#define CONFIG_ACPI
#endif
@@ -772,7 +687,7 @@ static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
vhdr->DeviceID != rdev->pdev->device) {
DRM_INFO("ACPI VFCT table is not for this card\n");
goto out_unmap;
- }
+ };
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
DRM_ERROR("ACPI VFCT image truncated\n");
@@ -803,11 +718,12 @@ bool radeon_get_bios(struct radeon_device *rdev)
if (r == false)
r = igp_read_bios_from_vram(rdev);
if (r == false)
- r = radeon_read_bios(rdev);
+ r = radeon_read_platform_bios(rdev);
if (r == false)
+ r = radeon_read_bios(rdev);
+ if (r == false) {
r = radeon_read_disabled_bios(rdev);
- if (r == false)
- r = radeon_read_platform_bios(rdev);
+ }
if (r == false || rdev->bios == NULL) {
DRM_ERROR("Unable to locate a BIOS ROM\n");
rdev->bios = NULL;
diff --git a/sys/dev/pci/drm/radeon/radeon_clocks.c b/sys/dev/pci/drm/radeon/radeon_clocks.c
index a2d1bd915ac..5cdee96d28c 100644
--- a/sys/dev/pci/drm/radeon/radeon_clocks.c
+++ b/sys/dev/pci/drm/radeon/radeon_clocks.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_clocks.c,v 1.5 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
diff --git a/sys/dev/pci/drm/radeon/radeon_combios.c b/sys/dev/pci/drm/radeon/radeon_combios.c
index 749d9ccfe99..3ce6db979e9 100644
--- a/sys/dev/pci/drm/radeon/radeon_combios.c
+++ b/sys/dev/pci/drm/radeon/radeon_combios.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_combios.c,v 1.13 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2004 ATI Technologies Inc., Markham, Ontario
* Copyright 2007-8 Advanced Micro Devices, Inc.
@@ -29,13 +30,21 @@
#include "radeon.h"
#include "atom.h"
-#ifdef CONFIG_PPC_PMAC
-/* not sure which of these are needed */
-#include <asm/machdep.h>
-#include <asm/pmac_feature.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#endif /* CONFIG_PPC_PMAC */
+/* from radeon_encoder.c */
+extern uint32_t
+radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
+ uint8_t dac);
+extern void radeon_link_encoder_connector(struct drm_device *dev);
+
+/* from radeon_connector.c */
+extern void
+radeon_add_legacy_connector(struct drm_device *dev,
+ uint32_t connector_id,
+ uint32_t supported_device,
+ int connector_type,
+ struct radeon_i2c_bus_rec *i2c_bus,
+ uint16_t connector_object_id,
+ struct radeon_hpd *hpd);
/* from radeon_legacy_encoder.c */
extern void
@@ -116,7 +125,7 @@ enum radeon_combios_connector {
CONNECTOR_UNSUPPORTED_LEGACY
};
-static const int legacy_connector_convert[] = {
+const int legacy_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_DVID,
DRM_MODE_CONNECTOR_VGA,
@@ -355,13 +364,11 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
break;
default:
- check_offset = 0;
break;
}
size = RBIOS8(rdev->bios_header_start + 0x6);
- /* check absolute offset tables */
- if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
+ if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset < size)
offset = RBIOS16(rdev->bios_header_start + check_offset);
return offset;
@@ -414,9 +421,9 @@ radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
#ifdef __clang__
static inline struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
- enum radeon_combios_ddc ddc,
- u32 clk_mask,
- u32 data_mask)
+ enum radeon_combios_ddc ddc,
+ u32 clk_mask,
+ u32 data_mask)
#else
static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
enum radeon_combios_ddc ddc,
@@ -1262,15 +1269,10 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
(RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
- u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
-
- if (hss > lvds->native_mode.hdisplay)
- hss = (10 - 1) * 8;
-
lvds->native_mode.htotal = lvds->native_mode.hdisplay +
(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
- hss;
+ (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
(RBIOS8(tmp + 23) * 8);
@@ -3400,21 +3402,6 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_device == 0x30ae)
return;
- /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
- * - it hangs on resume inside the dynclk 1 table.
- */
- if (rdev->family == CHIP_RS480 &&
- rdev->pdev->subsystem_vendor == 0x103c &&
- rdev->pdev->subsystem_device == 0x280a)
- return;
- /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
- * - it hangs on resume inside the dynclk 1 table.
- */
- if (rdev->family == CHIP_RS400 &&
- rdev->pdev->subsystem_vendor == 0x1179 &&
- rdev->pdev->subsystem_device == 0xff31)
- return;
-
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table)
diff --git a/sys/dev/pci/drm/radeon/radeon_connectors.c b/sys/dev/pci/drm/radeon/radeon_connectors.c
index fa852640736..28ce501f682 100644
--- a/sys/dev/pci/drm/radeon/radeon_connectors.c
+++ b/sys/dev/pci/drm/radeon/radeon_connectors.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_connectors.c,v 1.9 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -27,40 +28,25 @@
#include <dev/pci/drm/drm_edid.h>
#include <dev/pci/drm/drm_crtc_helper.h>
#include <dev/pci/drm/drm_fb_helper.h>
-#include <dev/pci/drm/drm_dp_mst_helper.h>
#include <dev/pci/drm/radeon_drm.h>
#include "radeon.h"
-#include "radeon_audio.h"
#include "atom.h"
+extern void
+radeon_combios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected);
+extern void
+radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
+ struct drm_encoder *encoder,
+ bool connected);
-static int radeon_dp_handle_hpd(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- int ret;
-
- ret = radeon_dp_mst_check_status(radeon_connector);
- if (ret == -EINVAL)
- return 1;
- return 0;
-}
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
- struct radeon_connector_atom_dig *dig_connector =
- radeon_connector->con_priv;
-
- if (radeon_connector->is_mst_connector)
- return;
- if (dig_connector->is_mst) {
- radeon_dp_handle_hpd(connector);
- return;
- }
- }
/* bail if the connector does not have hpd pin, e.g.,
* VGA, TV, etc.
*/
@@ -70,7 +56,6 @@ void radeon_connector_hotplug(struct drm_connector *connector)
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
/* if the connector is already off, don't turn it back on */
- /* FIXME: This access isn't protected by any locks. */
if (connector->dpms != DRM_MODE_DPMS_ON)
return;
@@ -88,18 +73,20 @@ void radeon_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
- radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
- radeon_dp_needs_link_train(radeon_connector)) {
- /* Don't start link training before we have the DPCD */
- if (!radeon_dp_getdpcd(radeon_connector))
- return;
-
- /* Turn the connector off and back on immediately, which
- * will trigger link training
- */
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ int saved_dpms = connector->dpms;
+ /* Only turn off the display if it's physically disconnected */
+ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ } else if (radeon_dp_needs_link_train(radeon_connector)) {
+ /* set it to OFF so that drm_helper_connector_dpms()
+ * won't return immediately since the current state
+ * is ON at this point.
+ */
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+ connector->dpms = saved_dpms;
}
}
}
@@ -121,13 +108,12 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int bpc = 8;
- int mode_clock, max_tmds_clock;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
- if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -135,7 +121,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
- if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+ if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -144,7 +130,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
- drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+ drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -167,73 +153,6 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
}
break;
}
-
- if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
- /* hdmi deep color only implemented on DCE4+ */
- if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
- DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
- connector->name, bpc);
- bpc = 8;
- }
-
- /*
- * Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
- * much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
- * 12 bpc is always supported on hdmi deep color sinks, as this is
- * required by the HDMI-1.3 spec. Clamp to a safe 12 bpc maximum.
- */
- if (bpc > 12) {
- DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 12 bpc.\n",
- connector->name, bpc);
- bpc = 12;
- }
-
- /* Any defined maximum tmds clock limit we must not exceed? */
- if (connector->max_tmds_clock > 0) {
- /* mode_clock is clock in kHz for mode to be modeset on this connector */
- mode_clock = radeon_connector->pixelclock_for_modeset;
-
- /* Maximum allowable input clock in kHz */
- max_tmds_clock = connector->max_tmds_clock * 1000;
-
- DRM_DEBUG("%s: hdmi mode dotclock %d kHz, max tmds input clock %d kHz.\n",
- connector->name, mode_clock, max_tmds_clock);
-
- /* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
- if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
- if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
- (mode_clock * 5/4 <= max_tmds_clock))
- bpc = 10;
- else
- bpc = 8;
-
- DRM_DEBUG("%s: HDMI deep color 12 bpc exceeds max tmds clock. Using %d bpc.\n",
- connector->name, bpc);
- }
-
- if ((bpc == 10) && (mode_clock * 5/4 > max_tmds_clock)) {
- bpc = 8;
- DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
- connector->name, bpc);
- }
- }
- else if (bpc > 8) {
- /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
- DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
- connector->name);
- bpc = 8;
- }
- }
-
- if ((radeon_deep_color == 0) && (bpc > 8)) {
- DRM_DEBUG("%s: Deep color disabled. Set radeon module param deep_color=1 to enable.\n",
- connector->name);
- bpc = 8;
- }
-
- DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
- connector->name, connector->display_info.bpc, bpc);
-
return bpc;
}
@@ -245,6 +164,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
struct drm_encoder *best_encoder = NULL;
struct drm_encoder *encoder = NULL;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ struct drm_mode_object *obj;
bool connected;
int i;
@@ -254,11 +174,14 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
- connector->encoder_ids[i]);
- if (!encoder)
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
continue;
+ encoder = obj_to_encoder(obj);
+
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -274,6 +197,7 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
+ struct drm_mode_object *obj;
struct drm_encoder *encoder;
int i;
@@ -281,138 +205,32 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
- if (!encoder)
+ obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
continue;
+ encoder = obj_to_encoder(obj);
if (encoder->encoder_type == encoder_type)
return encoder;
}
return NULL;
}
-struct edid *radeon_connector_edid(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
-
- if (radeon_connector->edid) {
- return radeon_connector->edid;
- } else if (edid_blob) {
- struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
- if (edid)
- radeon_connector->edid = edid;
- }
- return radeon_connector->edid;
-}
-
-static void radeon_connector_get_edid(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
- if (radeon_connector->edid)
- return;
-
- /* on hw with routers, select right port */
- if (radeon_connector->router.ddc_valid)
- radeon_router_select_ddc_port(radeon_connector);
-
- if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
- ENCODER_OBJECT_ID_NONE) &&
- radeon_connector->ddc_bus->has_aux) {
- radeon_connector->edid = drm_get_edid(connector,
- &radeon_connector->ddc_bus->aux.ddc);
- } else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
- struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
-
- if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
- dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
- radeon_connector->ddc_bus->has_aux)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->aux.ddc);
- else if (radeon_connector->ddc_bus)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- } else if (radeon_connector->ddc_bus) {
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- }
-
- if (!radeon_connector->edid) {
- /* don't fetch the edid from the vbios if ddc fails and runpm is
- * enabled so we report disconnected.
- */
- if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
- return;
-
- if (rdev->is_atom_bios) {
- /* some laptops provide a hardcoded edid in rom for LCDs */
- if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
- (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
- radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
- } else {
- /* some servers provide a hardcoded edid in rom for KVMs */
- radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
- }
- }
-}
-
-static void radeon_connector_free_edid(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
-}
-
-static int radeon_ddc_get_modes(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- int ret;
-
- if (radeon_connector->edid) {
- drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
- ret = drm_add_edid_modes(connector, radeon_connector->edid);
- drm_edid_to_eld(connector, radeon_connector->edid);
- return ret;
- }
- drm_mode_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
- return NULL;
-}
-
-static void radeon_get_native_mode(struct drm_connector *connector)
-{
- struct drm_encoder *encoder = radeon_best_single_encoder(connector);
- struct radeon_encoder *radeon_encoder;
-
- if (encoder == NULL)
- return;
-
- radeon_encoder = to_radeon_encoder(encoder);
-
- if (!list_empty(&connector->probed_modes)) {
- struct drm_display_mode *preferred_mode =
- list_first_entry(&connector->probed_modes,
- struct drm_display_mode, head);
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
- radeon_encoder->native_mode = *preferred_mode;
- } else {
- radeon_encoder->native_mode.clock = 0;
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
}
+ return NULL;
}
/*
@@ -450,17 +268,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
continue;
if (priority == true) {
- DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n",
- conflict->name);
- DRM_DEBUG_KMS("in favor of %s\n",
- connector->name);
+ DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", conflict->name);
+ DRM_DEBUG_KMS("in favor of %s\n", connector->name);
conflict->status = connector_status_disconnected;
radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
} else {
- DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n",
- connector->name);
- DRM_DEBUG_KMS("in favor of %s\n",
- conflict->name);
+ DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", connector->name);
+ DRM_DEBUG_KMS("in favor of %s\n", conflict->name);
current_status = connector_status_disconnected;
}
break;
@@ -583,36 +397,6 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
}
}
- if (property == rdev->mode_info.audio_property) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- /* need to find digital encoder on connector */
- encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
- if (!encoder)
- return 0;
-
- radeon_encoder = to_radeon_encoder(encoder);
-
- if (radeon_connector->audio != val) {
- radeon_connector->audio = val;
- radeon_property_change_mode(&radeon_encoder->base);
- }
- }
-
- if (property == rdev->mode_info.dither_property) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- /* need to find digital encoder on connector */
- encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
- if (!encoder)
- return 0;
-
- radeon_encoder = to_radeon_encoder(encoder);
-
- if (radeon_connector->dither != val) {
- radeon_connector->dither = val;
- radeon_property_change_mode(&radeon_encoder->base);
- }
- }
-
if (property == rdev->mode_info.underscan_property) {
/* need to find digital encoder on connector */
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -715,59 +499,6 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
radeon_property_change_mode(&radeon_encoder->base);
}
- if (property == dev->mode_config.scaling_mode_property) {
- enum radeon_rmx_type rmx_type;
-
- if (connector->encoder)
- radeon_encoder = to_radeon_encoder(connector->encoder);
- else {
- const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
- radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
- }
-
- switch (val) {
- default:
- case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
- case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
- case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
- case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
- }
- if (radeon_encoder->rmx_type == rmx_type)
- return 0;
-
- if ((rmx_type != DRM_MODE_SCALE_NONE) &&
- (radeon_encoder->native_mode.clock == 0))
- return 0;
-
- radeon_encoder->rmx_type = rmx_type;
-
- radeon_property_change_mode(&radeon_encoder->base);
- }
-
- if (property == rdev->mode_info.output_csc_property) {
- if (connector->encoder)
- radeon_encoder = to_radeon_encoder(connector->encoder);
- else {
- const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
- radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
- }
-
- if (radeon_encoder->output_csc == val)
- return 0;
-
- radeon_encoder->output_csc = val;
-
- if (connector->encoder->crtc) {
- struct drm_crtc *crtc = connector->encoder->crtc;
- const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-
- radeon_crtc->output_csc = radeon_encoder->output_csc;
-
- (*crtc_funcs->load_lut)(crtc);
- }
- }
-
return 0;
}
@@ -808,20 +539,22 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
static int radeon_lvds_get_modes(struct drm_connector *connector)
{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
int ret = 0;
struct drm_display_mode *mode;
- radeon_connector_get_edid(connector);
- ret = radeon_ddc_get_modes(connector);
- if (ret > 0) {
- encoder = radeon_best_single_encoder(connector);
- if (encoder) {
- radeon_fixup_lvds_native_mode(encoder, connector);
- /* add scaled modes */
- radeon_add_common_modes(encoder, connector);
+ if (radeon_connector->ddc_bus) {
+ ret = radeon_ddc_get_modes(radeon_connector);
+ if (ret > 0) {
+ encoder = radeon_best_single_encoder(connector);
+ if (encoder) {
+ radeon_fixup_lvds_native_mode(encoder, connector);
+ /* add scaled modes */
+ radeon_add_common_modes(encoder, connector);
+ }
+ return ret;
}
- return ret;
}
encoder = radeon_best_single_encoder(connector);
@@ -876,16 +609,9 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
radeon_lvds_detect(struct drm_connector *connector, bool force)
{
- struct drm_device *dev = connector->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
enum drm_connector_status ret = connector_status_disconnected;
- int r;
-
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -894,22 +620,23 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
/* check if panel is valid */
if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
ret = connector_status_connected;
- /* don't fetch the edid from the vbios if ddc fails and runpm is
- * enabled so we report disconnected.
- */
- if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
- ret = connector_status_disconnected;
+
}
/* check for edid as well */
- radeon_connector_get_edid(connector);
if (radeon_connector->edid)
ret = connector_status_connected;
+ else {
+ if (radeon_connector->ddc_bus) {
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ if (radeon_connector->edid)
+ ret = connector_status_connected;
+ }
+ }
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
return ret;
}
@@ -917,7 +644,8 @@ static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- radeon_connector_free_edid(connector);
+ if (radeon_connector->edid)
+ kfree(radeon_connector->edid);
kfree(radeon_connector->con_priv);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -976,12 +704,10 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
static int radeon_vga_get_modes(struct drm_connector *connector)
{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret;
- radeon_connector_get_edid(connector);
- ret = radeon_ddc_get_modes(connector);
-
- radeon_get_native_mode(connector);
+ ret = radeon_ddc_get_modes(radeon_connector);
return ret;
}
@@ -1010,11 +736,6 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
const struct drm_encoder_helper_funcs *encoder_funcs;
bool dret = false;
enum drm_connector_status ret = connector_status_disconnected;
- int r;
-
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -1024,26 +745,28 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
- radeon_connector_free_edid(connector);
- radeon_connector_get_edid(connector);
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
ret = connector_status_connected;
} else {
- radeon_connector->use_digital =
- !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
- radeon_connector_free_edid(connector);
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
ret = connector_status_disconnected;
- } else {
+ } else
ret = connector_status_connected;
- }
}
} else {
@@ -1053,8 +776,9 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
* detected a monitor via load.
*/
if (radeon_connector->detected_by_load)
- ret = connector->status;
- goto out;
+ return connector->status;
+ else
+ return ret;
}
if (radeon_connector->dac_load_detect && encoder) {
@@ -1079,11 +803,6 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
}
radeon_connector_update_scratch_regs(connector, ret);
-
-out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
-
return ret;
}
@@ -1140,15 +859,10 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
const struct drm_encoder_helper_funcs *encoder_funcs;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
- int r;
if (!radeon_connector->dac_load_detect)
return ret;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
-
encoder = radeon_best_single_encoder(connector);
if (!encoder)
ret = connector_status_disconnected;
@@ -1159,8 +873,6 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
return ret;
}
@@ -1178,6 +890,15 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
.set_property = radeon_connector_set_property,
};
+static int radeon_dvi_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ ret = radeon_ddc_get_modes(radeon_connector);
+ return ret;
+}
+
static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -1218,73 +939,50 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
const struct drm_encoder_helper_funcs *encoder_funcs;
- int i, r;
+ struct drm_mode_object *obj;
+ int i;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
-
- if (radeon_connector->detected_hpd_without_ddc) {
- force = true;
- radeon_connector->detected_hpd_without_ddc = false;
- }
-
- if (!force && radeon_check_hpd_status_unchanged(connector)) {
- ret = connector->status;
- goto exit;
- }
+ if (!force && radeon_check_hpd_status_unchanged(connector))
+ return connector->status;
- if (radeon_connector->ddc_bus) {
+ if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector, false);
-
- /* Sometimes the pins required for the DDC probe on DVI
- * connectors don't make contact at the same time that the ones
- * for HPD do. If the DDC probe fails even though we had an HPD
- * signal, try again later */
- if (!dret && !force &&
- connector->status != connector_status_connected) {
- DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
- radeon_connector->detected_hpd_without_ddc = true;
- schedule_delayed_work(&rdev->hotplug_work,
- msecs_to_jiffies(1000));
- goto exit;
- }
- }
if (dret) {
radeon_connector->detected_by_load = false;
- radeon_connector_free_edid(connector);
- radeon_connector_get_edid(connector);
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
/* rs690 seems to have a problem with connectors not existing and always
* return a block of 0's. If we see this just stop polling on this output */
- if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) &&
- radeon_connector->base.null_edid_counter) {
+ if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
ret = connector_status_disconnected;
- DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
- connector->name);
+ DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", connector->name);
radeon_connector->ddc_bus = NULL;
} else {
ret = connector_status_connected;
broken_edid = true; /* defer use_digital to later */
}
} else {
- radeon_connector->use_digital =
- !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
- radeon_connector_free_edid(connector);
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
ret = connector_status_disconnected;
- } else {
+ } else
ret = connector_status_connected;
- }
+
/* This gets complicated. We have boards with VGA + HDMI with a
* shared DDC line and we have boards with DVI-D + HDMI with a shared
* DDC line. The latter is more complex because with DVI<->HDMI adapters
@@ -1304,7 +1002,8 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
/* hpd is our only option in this case */
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
- radeon_connector_free_edid(connector);
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
ret = connector_status_disconnected;
}
}
@@ -1338,11 +1037,14 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev,
- connector->encoder_ids[i]);
- if (!encoder)
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
continue;
+ encoder = obj_to_encoder(obj);
+
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1394,22 +1096,6 @@ out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
-
- if ((radeon_audio != 0) && radeon_connector->use_digital) {
- const struct drm_connector_helper_funcs *connector_funcs =
- connector->helper_private;
-
- encoder = connector_funcs->best_encoder(connector);
- if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
- radeon_connector_get_edid(connector);
- radeon_audio_detect(connector, encoder, ret);
- }
- }
-
-exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
-
return ret;
}
@@ -1418,16 +1104,19 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_mode_object *obj;
struct drm_encoder *encoder;
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
- if (!encoder)
+ obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
continue;
+ encoder = obj_to_encoder(obj);
+
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1442,8 +1131,13 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
+ return NULL;
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
return NULL;
}
@@ -1476,15 +1170,17 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
- else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
+ else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
+ if (ASIC_IS_DCE6(rdev)) {
+ /* HDMI 1.3+ supports max clock of 340 Mhz */
+ if (mode->clock > 340000)
+ return MODE_CLOCK_HIGH;
+ else
+ return MODE_OK;
+ } else
return MODE_CLOCK_HIGH;
- else
- return MODE_OK;
- } else {
+ } else
return MODE_CLOCK_HIGH;
- }
}
/* check against the max pixel clock */
@@ -1495,7 +1191,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
}
static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
- .get_modes = radeon_vga_get_modes,
+ .get_modes = radeon_dvi_get_modes,
.mode_valid = radeon_dvi_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
@@ -1509,6 +1205,21 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
.force = radeon_dvi_force,
};
+static void radeon_dp_connector_destroy(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+ if (radeon_connector->edid)
+ kfree(radeon_connector->edid);
+ if (radeon_dig_connector->dp_i2c_bus)
+ radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+ kfree(radeon_connector->con_priv);
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
static int radeon_dp_get_modes(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1524,8 +1235,7 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
- radeon_connector_get_edid(connector);
- ret = radeon_ddc_get_modes(connector);
+ ret = radeon_ddc_get_modes(radeon_connector);
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
@@ -1536,8 +1246,7 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
- radeon_connector_get_edid(connector);
- ret = radeon_ddc_get_modes(connector);
+ ret = radeon_ddc_get_modes(radeon_connector);
}
if (ret > 0) {
@@ -1570,10 +1279,7 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
- radeon_connector_get_edid(connector);
- ret = radeon_ddc_get_modes(connector);
-
- radeon_get_native_mode(connector);
+ ret = radeon_ddc_get_modes(radeon_connector);
}
return ret;
@@ -1581,6 +1287,7 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
{
+ struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
@@ -1589,10 +1296,11 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
- if (!encoder)
+ obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
continue;
+ encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
@@ -1607,8 +1315,9 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
return ENCODER_OBJECT_ID_NONE;
}
-static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
+bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
{
+ struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
@@ -1618,10 +1327,11 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
- if (!encoder)
+ obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
+ if (!obj)
continue;
+ encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
@@ -1653,22 +1363,15 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
- int r;
- if (radeon_dig_connector->is_mst)
- return connector_status_disconnected;
+ if (!force && radeon_check_hpd_status_unchanged(connector))
+ return connector->status;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
-
- if (!force && radeon_check_hpd_status_unchanged(connector)) {
- ret = connector->status;
- goto out;
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
}
- radeon_connector_free_edid(connector);
-
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
if (encoder) {
@@ -1678,11 +1381,6 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
/* check if panel is valid */
if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
ret = connector_status_connected;
- /* don't fetch the edid from the vbios if ddc fails and runpm is
- * enabled so we report disconnected.
- */
- if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
- ret = connector_status_disconnected;
}
/* eDP is always DP */
radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
@@ -1716,23 +1414,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
radeon_dp_getdpcd(radeon_connector);
- r = radeon_dp_mst_probe(radeon_connector);
- if (r == 1)
- ret = connector_status_disconnected;
- }
} else {
if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- if (radeon_dp_getdpcd(radeon_connector)) {
- r = radeon_dp_mst_probe(radeon_connector);
- if (r == 1)
- ret = connector_status_disconnected;
- else
- ret = connector_status_connected;
- }
+ if (radeon_dp_getdpcd(radeon_connector))
+ ret = connector_status_connected;
} else {
- /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
+ /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
if (radeon_ddc_probe(radeon_connector, false))
ret = connector_status_connected;
}
@@ -1740,24 +1429,12 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
}
radeon_connector_update_scratch_regs(connector, ret);
-
- if ((radeon_audio != 0) && encoder) {
- radeon_connector_get_edid(connector);
- radeon_audio_detect(connector, encoder, ret);
- }
-
-out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
-
return ret;
}
static int radeon_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_device *dev = connector->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
@@ -1788,23 +1465,14 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
}
+ return MODE_OK;
} else {
if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+ (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return radeon_dp_mode_valid_helper(connector, mode);
- } else {
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
- return MODE_CLOCK_HIGH;
- } else {
- if (mode->clock > 165000)
- return MODE_CLOCK_HIGH;
- }
- }
+ else
+ return MODE_OK;
}
-
- return MODE_OK;
}
static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
@@ -1818,7 +1486,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
- .destroy = radeon_connector_destroy,
+ .destroy = radeon_dp_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1827,7 +1495,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
- .destroy = radeon_connector_destroy,
+ .destroy = radeon_dp_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1836,7 +1504,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
- .destroy = radeon_connector_destroy,
+ .destroy = radeon_dp_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1953,13 +1621,6 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
- drm_object_attach_property(&radeon_connector->base.base,
- dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_NONE);
- if (ASIC_IS_DCE5(rdev))
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.output_csc_property,
- RADEON_OUTPUT_CSC_BYPASS);
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1979,26 +1640,6 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
-
- drm_object_attach_property(&radeon_connector->base.base,
- dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_NONE);
-
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
-
- if (radeon_audio != 0) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.audio_property,
- RADEON_AUDIO_AUTO);
- radeon_connector->audio = RADEON_AUDIO_AUTO;
- }
- if (ASIC_IS_DCE5(rdev))
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.output_csc_property,
- RADEON_OUTPUT_CSC_BYPASS);
-
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -2040,16 +1681,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
- if (ASIC_IS_AVIVO(rdev))
- drm_object_attach_property(&radeon_connector->base.base,
- dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_NONE);
- if (ASIC_IS_DCE5(rdev))
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.output_csc_property,
- RADEON_OUTPUT_CSC_BYPASS);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -2065,14 +1699,6 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
- if (ASIC_IS_AVIVO(rdev))
- drm_object_attach_property(&radeon_connector->base.base,
- dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_NONE);
- if (ASIC_IS_DCE5(rdev))
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.output_csc_property,
- RADEON_OUTPUT_CSC_BYPASS);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = true;
@@ -2106,18 +1732,6 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
- drm_object_attach_property(&radeon_connector->base.base,
- dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_NONE);
- }
- if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.audio_property,
- RADEON_AUDIO_AUTO);
- radeon_connector->audio = RADEON_AUDIO_AUTO;
}
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
@@ -2125,10 +1739,6 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.load_detect_property,
1);
}
- if (ASIC_IS_DCE5(rdev))
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.output_csc_property,
- RADEON_OUTPUT_CSC_BYPASS);
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_DVII)
connector->doublescan_allowed = true;
@@ -2162,23 +1772,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
- drm_object_attach_property(&radeon_connector->base.base,
- dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_NONE);
- }
- if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.audio_property,
- RADEON_AUDIO_AUTO);
- radeon_connector->audio = RADEON_AUDIO_AUTO;
}
- if (ASIC_IS_DCE5(rdev))
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.output_csc_property,
- RADEON_OUTPUT_CSC_BYPASS);
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -2215,23 +1809,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
- drm_object_attach_property(&radeon_connector->base.base,
- dev->mode_config.scaling_mode_property,
- DRM_MODE_SCALE_NONE);
- }
- if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.audio_property,
- RADEON_AUDIO_AUTO);
- radeon_connector->audio = RADEON_AUDIO_AUTO;
}
- if (ASIC_IS_DCE5(rdev))
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.output_csc_property,
- RADEON_OUTPUT_CSC_BYPASS);
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
@@ -2299,10 +1877,8 @@ radeon_add_atom_connector(struct drm_device *dev,
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid) {
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
- }
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -2378,6 +1954,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
break;
@@ -2462,37 +2039,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
}
if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
- if (i2c_bus->valid) {
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
- }
+ if (i2c_bus->valid)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
-
connector->display_info.subpixel_order = subpixel_order;
drm_connector_register(connector);
}
-
-void radeon_setup_mst_connector(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
-
- if (!ASIC_IS_DCE5(rdev))
- return;
-
- if (radeon_mst == 0)
- return;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- int ret;
-
- radeon_connector = to_radeon_connector(connector);
-
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- ret = radeon_dp_mst_init(radeon_connector);
- }
-}
diff --git a/sys/dev/pci/drm/radeon/radeon_cs.c b/sys/dev/pci/drm/radeon/radeon_cs.c
index 6f35c794d11..8a784fe813c 100644
--- a/sys/dev/pci/drm/radeon/radeon_cs.c
+++ b/sys/dev/pci/drm/radeon/radeon_cs.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_cs.c,v 1.6 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Jerome Glisse.
* All Rights Reserved.
@@ -28,166 +29,68 @@
#include <dev/pci/drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
-#include "radeon_trace.h"
-#define RADEON_CS_MAX_PRIORITY 32u
-#define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
-
-/* This is based on the bucket sort with O(n) time complexity.
- * An item with priority "i" is added to bucket[i]. The lists are then
- * concatenated in descending order.
- */
-struct radeon_cs_buckets {
- struct list_head bucket[RADEON_CS_NUM_BUCKETS];
-};
-
-static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
-{
- unsigned i;
-
- for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
- INIT_LIST_HEAD(&b->bucket[i]);
-}
-
-static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
- struct list_head *item, unsigned priority)
-{
- /* Since buffers which appear sooner in the relocation list are
- * likely to be used more often than buffers which appear later
- * in the list, the sort mustn't change the ordering of buffers
- * with the same priority, i.e. it must be stable.
- */
- list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
-}
-
-static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
- struct list_head *out_list)
-{
- unsigned i;
-
- /* Connect the sorted buckets in the output list. */
- for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
- list_splice(&b->bucket[i], out_list);
- }
-}
+void r100_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt);
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
struct drm_device *ddev = p->rdev->ddev;
struct radeon_cs_chunk *chunk;
- struct radeon_cs_buckets buckets;
- unsigned i;
- bool need_mmap_lock = false;
- int r;
+ unsigned i, j;
+ bool duplicate;
- if (p->chunk_relocs == NULL) {
+ if (p->chunk_relocs_idx == -1) {
return 0;
}
- chunk = p->chunk_relocs;
+ chunk = &p->chunks[p->chunk_relocs_idx];
p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
- p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
+ p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
+ if (p->relocs_ptr == NULL) {
+ return -ENOMEM;
+ }
+ p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
-
- radeon_cs_buckets_init(&buckets);
-
for (i = 0; i < p->nrelocs; i++) {
struct drm_radeon_cs_reloc *r;
- struct drm_gem_object *gobj;
- unsigned priority;
+ duplicate = false;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
- gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
- if (gobj == NULL) {
- DRM_ERROR("gem object lookup failed 0x%x\n",
- r->handle);
- return -ENOENT;
- }
- p->relocs[i].robj = gem_to_radeon_bo(gobj);
-
- /* The userspace buffer priorities are from 0 to 15. A higher
- * number means the buffer is more important.
- * Also, the buffers used for write have a higher priority than
- * the buffers used for read only, which doubles the range
- * to 0 to 31. 32 is reserved for the kernel driver.
- */
- priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
- + !!r->write_domain;
-
- /* the first reloc of an UVD job is the msg and that must be in
- VRAM, also but everything into VRAM on AGP cards and older
- IGP chips to avoid image corruptions */
- if (p->ring == R600_RING_TYPE_UVD_INDEX &&
- (i == 0 || (p->rdev->flags & RADEON_IS_AGP) ||
- p->rdev->family == CHIP_RS780 ||
- p->rdev->family == CHIP_RS880)) {
-
- /* TODO: is this still needed for NI+ ? */
- p->relocs[i].prefered_domains =
- RADEON_GEM_DOMAIN_VRAM;
-
- p->relocs[i].allowed_domains =
- RADEON_GEM_DOMAIN_VRAM;
-
- /* prioritize this over any other relocation */
- priority = RADEON_CS_MAX_PRIORITY;
- } else {
- uint32_t domain = r->write_domain ?
- r->write_domain : r->read_domains;
-
- if (domain & RADEON_GEM_DOMAIN_CPU) {
- DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
- "for command submission\n");
- return -EINVAL;
+ for (j = 0; j < i; j++) {
+ if (r->handle == p->relocs[j].handle) {
+ p->relocs_ptr[i] = &p->relocs[j];
+ duplicate = true;
+ break;
}
-
- p->relocs[i].prefered_domains = domain;
- if (domain == RADEON_GEM_DOMAIN_VRAM)
- domain |= RADEON_GEM_DOMAIN_GTT;
- p->relocs[i].allowed_domains = domain;
}
-
- if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
- uint32_t domain = p->relocs[i].prefered_domains;
- if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
- DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
- "allowed for userptr BOs\n");
- return -EINVAL;
+ if (!duplicate) {
+ p->relocs[i].gobj = drm_gem_object_lookup(ddev,
+ p->filp,
+ r->handle);
+ if (p->relocs[i].gobj == NULL) {
+ DRM_ERROR("gem object lookup failed 0x%x\n",
+ r->handle);
+ return -ENOENT;
}
- need_mmap_lock = true;
- domain = RADEON_GEM_DOMAIN_GTT;
- p->relocs[i].prefered_domains = domain;
- p->relocs[i].allowed_domains = domain;
- }
+ p->relocs_ptr[i] = &p->relocs[i];
+ p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+ p->relocs[i].lobj.bo = p->relocs[i].robj;
+ p->relocs[i].lobj.wdomain = r->write_domain;
+ p->relocs[i].lobj.rdomain = r->read_domains;
+ p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
+ p->relocs[i].handle = r->handle;
+ p->relocs[i].flags = r->flags;
+ radeon_bo_list_add_object(&p->relocs[i].lobj,
+ &p->validated);
- p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].tv.shared = !r->write_domain;
-
- radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
- priority);
+ } else
+ p->relocs[i].handle = 0;
}
-
- radeon_cs_buckets_get_list(&buckets, &p->validated);
-
- if (p->cs_flags & RADEON_CS_USE_VM)
- p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
- &p->validated);
-#ifdef notyet
- if (need_mmap_lock)
- down_read(&current->mm->mmap_sem);
-#endif
-
- r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
-
-#ifdef notyet
- if (need_mmap_lock)
- up_read(&current->mm->mmap_sem);
-#endif
-
- return r;
+ return radeon_bo_list_validate(&p->validated);
}
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -216,38 +119,38 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
p->ring = R600_RING_TYPE_DMA_INDEX;
else
p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
- } else if (p->rdev->family >= CHIP_RV770) {
+ } else if (p->rdev->family >= CHIP_R600) {
p->ring = R600_RING_TYPE_DMA_INDEX;
} else {
return -EINVAL;
}
break;
- case RADEON_CS_RING_UVD:
- p->ring = R600_RING_TYPE_UVD_INDEX;
- break;
- case RADEON_CS_RING_VCE:
- /* TODO: only use the low priority ring for now */
- p->ring = TN_RING_TYPE_VCE1_INDEX;
- break;
}
return 0;
}
-static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
+static void radeon_cs_sync_to(struct radeon_cs_parser *p,
+ struct radeon_fence *fence)
{
- struct radeon_bo_list *reloc;
- int r;
+ struct radeon_fence *other;
- list_for_each_entry(reloc, &p->validated, tv.head) {
- struct reservation_object *resv;
+ if (!fence)
+ return;
- resv = reloc->robj->tbo.resv;
- r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
- reloc->tv.shared);
- if (r)
- return r;
+ other = p->ib.sync_to[fence->ring];
+ p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
+}
+
+static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
+{
+ int i;
+
+ for (i = 0; i < p->nrelocs; i++) {
+ if (!p->relocs[i].robj)
+ continue;
+
+ radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
}
- return 0;
}
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -259,26 +162,26 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
u32 ring = RADEON_CS_RING_GFX;
s32 priority = 0;
- INIT_LIST_HEAD(&p->validated);
-
if (!cs->num_chunks) {
return 0;
}
-
/* get chunks */
+ INIT_LIST_HEAD(&p->validated);
p->idx = 0;
p->ib.sa_bo = NULL;
+ p->ib.semaphore = NULL;
p->const_ib.sa_bo = NULL;
- p->chunk_ib = NULL;
- p->chunk_relocs = NULL;
- p->chunk_flags = NULL;
- p->chunk_const_ib = NULL;
+ p->const_ib.semaphore = NULL;
+ p->chunk_ib_idx = -1;
+ p->chunk_relocs_idx = -1;
+ p->chunk_flags_idx = -1;
+ p->chunk_const_ib_idx = -1;
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (p->chunks_array == NULL) {
return -ENOMEM;
}
chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
- if (copy_from_user(p->chunks_array, chunk_array_ptr,
+ if (DRM_COPY_FROM_USER(p->chunks_array, chunk_array_ptr,
sizeof(uint64_t)*cs->num_chunks)) {
return -EFAULT;
}
@@ -294,58 +197,58 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
uint32_t __user *cdata;
chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
- if (copy_from_user(&user_chunk, chunk_ptr,
+ if (DRM_COPY_FROM_USER(&user_chunk, chunk_ptr,
sizeof(struct drm_radeon_cs_chunk))) {
return -EFAULT;
}
p->chunks[i].length_dw = user_chunk.length_dw;
- if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
- p->chunk_relocs = &p->chunks[i];
+ p->chunks[i].kdata = NULL;
+ p->chunks[i].chunk_id = user_chunk.chunk_id;
+
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
+ p->chunk_relocs_idx = i;
}
- if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
- p->chunk_ib = &p->chunks[i];
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+ p->chunk_ib_idx = i;
/* zero length IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
- p->chunk_const_ib = &p->chunks[i];
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
+ p->chunk_const_ib_idx = i;
/* zero length CONST IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
- p->chunk_flags = &p->chunks[i];
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->chunk_flags_idx = i;
/* zero length flags aren't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- size = p->chunks[i].length_dw;
- cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
- p->chunks[i].user_ptr = cdata;
- if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
- continue;
-
- if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
- if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
- continue;
- }
-
- p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
- size *= sizeof(uint32_t);
- if (p->chunks[i].kdata == NULL) {
- return -ENOMEM;
- }
- if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
- return -EFAULT;
- }
- if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
- p->cs_flags = p->chunks[i].kdata[0];
- if (p->chunks[i].length_dw > 1)
- ring = p->chunks[i].kdata[1];
- if (p->chunks[i].length_dw > 2)
- priority = (s32)p->chunks[i].kdata[2];
+ p->chunks[i].length_dw = user_chunk.length_dw;
+ p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
+
+ cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
+ if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
+ (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
+ size = p->chunks[i].length_dw * sizeof(uint32_t);
+ p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
+ if (p->chunks[i].kdata == NULL) {
+ return -ENOMEM;
+ }
+ if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
+ p->chunks[i].user_ptr, size)) {
+ return -EFAULT;
+ }
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->cs_flags = p->chunks[i].kdata[0];
+ if (p->chunks[i].length_dw > 1)
+ ring = p->chunks[i].kdata[1];
+ if (p->chunks[i].length_dw > 2)
+ priority = (s32)p->chunks[i].kdata[2];
+ }
}
}
@@ -357,37 +260,48 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
}
+ /* we only support VM on SI+ */
+ if ((p->rdev->family >= CHIP_TAHITI) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+ DRM_ERROR("VM required on SI+!\n");
+ return -EINVAL;
+ }
+
if (radeon_cs_get_ring(p, ring, priority))
return -EINVAL;
+ }
- /* we only support VM on some SI+ rings */
- if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
- if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
- DRM_ERROR("Ring %d requires VM!\n", p->ring);
- return -EINVAL;
- }
- } else {
- if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
- DRM_ERROR("VM not supported on ring %d!\n",
- p->ring);
- return -EINVAL;
+ /* deal with non-vm */
+ if ((p->chunk_ib_idx != -1) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
+ (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
+ if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+ DRM_ERROR("cs IB too big: %d\n",
+ p->chunks[p->chunk_ib_idx].length_dw);
+ return -EINVAL;
+ }
+ if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
+ p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+ p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+ kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+ p->chunks[p->chunk_ib_idx].kpage[0] = NULL;
+ p->chunks[p->chunk_ib_idx].kpage[1] = NULL;
+ return -ENOMEM;
}
}
+ p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
+ p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
+ p->chunks[p->chunk_ib_idx].last_copied_page = -1;
+ p->chunks[p->chunk_ib_idx].last_page_index =
+ ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
}
return 0;
}
-static int cmp_size_smaller_first(void *priv, struct list_head *a,
- struct list_head *b)
-{
- struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
- struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
-
- /* Sort A before B if A is smaller. */
- return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
-}
-
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
@@ -396,45 +310,33 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a,
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
-static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
+static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
if (!error) {
- /* Sort the buffer list from the smallest to largest buffer,
- * which affects the order of buffers in the LRU list.
- * This assures that the smallest buffers are added first
- * to the LRU list, so they are likely to be later evicted
- * first, instead of large buffers whose eviction is more
- * expensive.
- *
- * This slightly lowers the number of bytes moved by TTM
- * per frame under memory pressure.
- */
- list_sort(NULL, &parser->validated, cmp_size_smaller_first);
-
- ttm_eu_fence_buffer_objects(&parser->ticket,
- &parser->validated,
- &parser->ib.fence->base);
- } else if (backoff) {
- ttm_eu_backoff_reservation(&parser->ticket,
- &parser->validated);
+ ttm_eu_fence_buffer_objects(&parser->validated,
+ parser->ib.fence);
+ } else {
+ ttm_eu_backoff_reservation(&parser->validated);
}
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
- struct radeon_bo *bo = parser->relocs[i].robj;
- if (bo == NULL)
- continue;
-
- drm_gem_object_unreference_unlocked(&bo->gem_base);
+ if (parser->relocs[i].gobj)
+ drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
}
}
kfree(parser->track);
- drm_free_large(parser->relocs);
- drm_free_large(parser->vm_bos);
- for (i = 0; i < parser->nchunks; i++)
- drm_free_large(parser->chunks[i].kdata);
+ kfree(parser->relocs);
+ kfree(parser->relocs_ptr);
+ for (i = 0; i < parser->nchunks; i++) {
+ kfree(parser->chunks[i].kdata);
+ if ((parser->rdev->flags & RADEON_IS_AGP)) {
+ kfree(parser->chunks[i].kpage[0]);
+ kfree(parser->chunks[i].kpage[1]);
+ }
+ }
kfree(parser->chunks);
kfree(parser->chunks_array);
radeon_ib_free(parser->rdev, &parser->ib);
@@ -444,134 +346,157 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
struct radeon_cs_parser *parser)
{
+ struct radeon_cs_chunk *ib_chunk;
int r;
- if (parser->chunk_ib == NULL)
+ if (parser->chunk_ib_idx == -1)
return 0;
if (parser->cs_flags & RADEON_CS_USE_VM)
return 0;
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ /* Copy the packet into the IB, the parser will read from the
+ * input memory (cached) and write to the IB (which can be
+ * uncached).
+ */
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ NULL, ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib.length_dw = ib_chunk->length_dw;
r = radeon_cs_parse(rdev, parser->ring, parser);
if (r || parser->parser_error) {
DRM_ERROR("Invalid command stream !\n");
return r;
}
-
- r = radeon_cs_sync_rings(parser);
+ r = radeon_cs_finish_pages(parser);
if (r) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("Failed to sync rings: %i\n", r);
+ DRM_ERROR("Invalid command stream !\n");
return r;
}
-
- if (parser->ring == R600_RING_TYPE_UVD_INDEX)
- radeon_uvd_note_usage(rdev);
- else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
- (parser->ring == TN_RING_TYPE_VCE2_INDEX))
- radeon_vce_note_usage(rdev);
-
- r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
+ radeon_cs_sync_rings(parser);
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
return r;
}
-static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
struct radeon_vm *vm)
{
- struct radeon_device *rdev = p->rdev;
- struct radeon_bo_va *bo_va;
- int i, r;
-
- r = radeon_vm_update_page_directory(rdev, vm);
- if (r)
- return r;
+ struct radeon_device *rdev = parser->rdev;
+ struct radeon_bo_list *lobj;
+ struct radeon_bo *bo;
+ int r;
- r = radeon_vm_clear_freed(rdev, vm);
- if (r)
+ r = radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
+ if (r) {
return r;
-
- if (vm->ib_bo_va == NULL) {
- DRM_ERROR("Tmp BO not in VM!\n");
- return -EINVAL;
}
-
- r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
- &rdev->ring_tmp_bo.bo->tbo.mem);
- if (r)
- return r;
-
- for (i = 0; i < p->nrelocs; i++) {
- struct radeon_bo *bo;
-
- bo = p->relocs[i].robj;
- bo_va = radeon_vm_bo_find(vm, bo);
- if (bo_va == NULL) {
- dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
- return -EINVAL;
- }
-
- r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
- if (r)
+ list_for_each_entry(lobj, &parser->validated, tv.head) {
+ bo = lobj->bo;
+ r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+ if (r) {
return r;
-
- radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
+ }
}
-
- return radeon_vm_clear_invalids(rdev, vm);
+ return 0;
}
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
struct radeon_cs_parser *parser)
{
+ struct radeon_cs_chunk *ib_chunk;
struct radeon_fpriv *fpriv = parser->filp->driver_priv;
struct radeon_vm *vm = &fpriv->vm;
int r;
- if (parser->chunk_ib == NULL)
+ if (parser->chunk_ib_idx == -1)
return 0;
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;
- if (parser->const_ib.length_dw) {
+ if ((rdev->family >= CHIP_TAHITI) &&
+ (parser->chunk_const_ib_idx != -1)) {
+ ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+ if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+ DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
+ vm, ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get const ib !\n");
+ return r;
+ }
+ parser->const_ib.is_const_ib = true;
+ parser->const_ib.length_dw = ib_chunk->length_dw;
+ /* Copy the packet into the IB */
+ if (DRM_COPY_FROM_USER(parser->const_ib.ptr, ib_chunk->user_ptr,
+ ib_chunk->length_dw * 4)) {
+ return -EFAULT;
+ }
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
if (r) {
return r;
}
}
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+ DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ vm, ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib.length_dw = ib_chunk->length_dw;
+ /* Copy the packet into the IB */
+ if (DRM_COPY_FROM_USER(parser->ib.ptr, ib_chunk->user_ptr,
+ ib_chunk->length_dw * 4)) {
+ return -EFAULT;
+ }
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
if (r) {
return r;
}
- if (parser->ring == R600_RING_TYPE_UVD_INDEX)
- radeon_uvd_note_usage(rdev);
-
+ mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
- r = radeon_bo_vm_update_pte(parser, vm);
+ r = radeon_vm_alloc_pt(rdev, vm);
if (r) {
goto out;
}
-
- r = radeon_cs_sync_rings(parser);
+ r = radeon_bo_vm_update_pte(parser, vm);
if (r) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("Failed to sync rings: %i\n", r);
goto out;
}
+ radeon_cs_sync_rings(parser);
+ radeon_cs_sync_to(parser, vm->fence);
+ radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
- (parser->chunk_const_ib != NULL)) {
- r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
+ (parser->chunk_const_ib_idx != -1)) {
+ r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib);
} else {
- r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
+ r = radeon_ib_schedule(rdev, &parser->ib, NULL);
+ }
+
+ if (!r) {
+ radeon_vm_fence(rdev, vm, parser->ib.fence);
}
out:
+ radeon_vm_add_to_lru(rdev, vm);
mutex_unlock(&vm->mutex);
+ mutex_unlock(&rdev->vm_manager.lock);
return r;
}
@@ -585,62 +510,6 @@ static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
return r;
}
-static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
-{
- struct radeon_cs_chunk *ib_chunk;
- struct radeon_vm *vm = NULL;
- int r;
-
- if (parser->chunk_ib == NULL)
- return 0;
-
- if (parser->cs_flags & RADEON_CS_USE_VM) {
- struct radeon_fpriv *fpriv = parser->filp->driver_priv;
- vm = &fpriv->vm;
-
- if ((rdev->family >= CHIP_TAHITI) &&
- (parser->chunk_const_ib != NULL)) {
- ib_chunk = parser->chunk_const_ib;
- if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
- DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
- return -EINVAL;
- }
- r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
- vm, ib_chunk->length_dw * 4);
- if (r) {
- DRM_ERROR("Failed to get const ib !\n");
- return r;
- }
- parser->const_ib.is_const_ib = true;
- parser->const_ib.length_dw = ib_chunk->length_dw;
- if (copy_from_user(parser->const_ib.ptr,
- ib_chunk->user_ptr,
- ib_chunk->length_dw * 4))
- return -EFAULT;
- }
-
- ib_chunk = parser->chunk_ib;
- if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
- DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
- return -EINVAL;
- }
- }
- ib_chunk = parser->chunk_ib;
-
- r = radeon_ib_get(rdev, parser->ring, &parser->ib,
- vm, ib_chunk->length_dw * 4);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
- parser->ib.length_dw = ib_chunk->length_dw;
- if (ib_chunk->kdata)
- memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
- else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
- return -EFAULT;
- return 0;
-}
-
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
@@ -652,44 +521,31 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
up_read(&rdev->exclusive_lock);
return -EBUSY;
}
- if (rdev->in_reset) {
- up_read(&rdev->exclusive_lock);
- r = radeon_gpu_reset(rdev);
- if (!r)
- r = -EAGAIN;
- return r;
- }
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
parser.rdev = rdev;
+#ifdef notyet
parser.dev = rdev->dev;
+#endif
parser.family = rdev->family;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
- radeon_cs_parser_fini(&parser, r, false);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
-
- r = radeon_cs_ib_fill(rdev, &parser);
- if (!r) {
- r = radeon_cs_parser_relocs(&parser);
- if (r && r != -ERESTARTSYS)
- DRM_ERROR("Failed to parse relocation %d!\n", r);
- }
-
+ r = radeon_cs_parser_relocs(&parser);
if (r) {
- radeon_cs_parser_fini(&parser, r, false);
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to parse relocation %d!\n", r);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
-
- trace_radeon_cs(&parser);
-
r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
goto out;
@@ -699,169 +555,99 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
}
out:
- radeon_cs_parser_fini(&parser, r, true);
+ radeon_cs_parser_fini(&parser, r);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
-/**
- * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
- * @parser: parser structure holding parsing context.
- * @pkt: where to store packet information
- *
- * Assume that chunk_ib_index is properly set. Will return -EINVAL
- * if packet is bigger than remaining ib size. or if packets is unknown.
- **/
-int radeon_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx)
+int radeon_cs_finish_pages(struct radeon_cs_parser *p)
{
- struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
- struct radeon_device *rdev = p->rdev;
- uint32_t header;
- int ret = 0, i;
-
- if (idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- idx, ib_chunk->length_dw);
- return -EINVAL;
- }
- header = radeon_get_ib_value(p, idx);
- pkt->idx = idx;
- pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
- pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
- pkt->one_reg_wr = 0;
- switch (pkt->type) {
- case RADEON_PACKET_TYPE0:
- if (rdev->family < CHIP_R600) {
- pkt->reg = R100_CP_PACKET0_GET_REG(header);
- pkt->one_reg_wr =
- RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
- } else
- pkt->reg = R600_CP_PACKET0_GET_REG(header);
- break;
- case RADEON_PACKET_TYPE3:
- pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
- break;
- case RADEON_PACKET_TYPE2:
- pkt->count = -1;
- break;
- default:
- DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
- ret = -EINVAL;
- goto dump_ib;
- }
- if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
- DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
- pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
- ret = -EINVAL;
- goto dump_ib;
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ int i;
+ int size = PAGE_SIZE;
+
+ for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
+ if (i == ibc->last_page_index) {
+ size = (ibc->length_dw * 4) % PAGE_SIZE;
+ if (size == 0)
+ size = PAGE_SIZE;
+ }
+
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
+ ibc->user_ptr + (i * PAGE_SIZE),
+ size))
+ return -EFAULT;
}
return 0;
-
-dump_ib:
- for (i = 0; i < ib_chunk->length_dw; i++) {
- if (i == idx)
- printk("\t0x%08x <---\n", radeon_get_ib_value(p, i));
- else
- printk("\t0x%08x\n", radeon_get_ib_value(p, i));
- }
- return ret;
}
-/**
- * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
- * @p: structure holding the parser context.
- *
- * Check if the next packet is NOP relocation packet3.
- **/
-bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
{
- struct radeon_cs_packet p3reloc;
- int r;
+ int new_page;
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ int i;
+ int size = PAGE_SIZE;
+ bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
+ false : true;
+
+ for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
+ if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
+ ibc->user_ptr + (i * PAGE_SIZE),
+ PAGE_SIZE)) {
+ p->parser_error = -EFAULT;
+ return 0;
+ }
+ }
- r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
- if (r)
- return false;
- if (p3reloc.type != RADEON_PACKET_TYPE3)
- return false;
- if (p3reloc.opcode != RADEON_PACKET3_NOP)
- return false;
- return true;
-}
+ if (pg_idx == ibc->last_page_index) {
+ size = (ibc->length_dw * 4) % PAGE_SIZE;
+ if (size == 0)
+ size = PAGE_SIZE;
+ }
-/**
- * radeon_cs_dump_packet() - dump raw packet context
- * @p: structure holding the parser context.
- * @pkt: structure holding the packet.
- *
- * Used mostly for debugging and error reporting.
- **/
-void radeon_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt)
-{
- volatile uint32_t *ib;
- unsigned i;
- unsigned idx;
+ new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
+ if (copy1)
+ ibc->kpage[new_page] = p->ib.ptr + (pg_idx * (PAGE_SIZE / 4));
+
+ if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
+ ibc->user_ptr + (pg_idx * PAGE_SIZE),
+ size)) {
+ p->parser_error = -EFAULT;
+ return 0;
+ }
+
+ /* copy to IB for non single case */
+ if (!copy1)
+ memcpy((void *)(p->ib.ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
- ib = p->ib.ptr;
- idx = pkt->idx;
- for (i = 0; i <= (pkt->count + 1); i++, idx++)
- DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+ ibc->last_copied_page = pg_idx;
+ ibc->kpage_idx[new_page] = pg_idx;
+
+ return new_page;
}
-/**
- * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
- * @parser: parser structure holding parsing context.
- * @data: pointer to relocation data
- * @offset_start: starting offset
- * @offset_mask: offset mask (to align start offset on)
- * @reloc: reloc informations
- *
- * Check if next packet is relocation packet3, do bo validation and compute
- * GPU offset using the provided start.
- **/
-int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_bo_list **cs_reloc,
- int nomm)
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
- struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_packet p3reloc;
- unsigned idx;
- int r;
-
- if (p->chunk_relocs == NULL) {
- DRM_ERROR("No relocation chunk !\n");
- return -EINVAL;
- }
- *cs_reloc = NULL;
- relocs_chunk = p->chunk_relocs;
- r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
- if (r)
- return r;
- p->idx += p3reloc.count + 2;
- if (p3reloc.type != RADEON_PACKET_TYPE3 ||
- p3reloc.opcode != RADEON_PACKET3_NOP) {
- DRM_ERROR("No packet3 for relocation for packet at %d.\n",
- p3reloc.idx);
- radeon_cs_dump_packet(p, &p3reloc);
- return -EINVAL;
- }
- idx = radeon_get_ib_value(p, p3reloc.idx + 1);
- if (idx >= relocs_chunk->length_dw) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, relocs_chunk->length_dw);
- radeon_cs_dump_packet(p, &p3reloc);
- return -EINVAL;
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ u32 pg_idx, pg_offset;
+ u32 idx_value = 0;
+ int new_page;
+
+ pg_idx = (idx * 4) / PAGE_SIZE;
+ pg_offset = (idx * 4) % PAGE_SIZE;
+
+ if (ibc->kpage_idx[0] == pg_idx)
+ return ibc->kpage[0][pg_offset/4];
+ if (ibc->kpage_idx[1] == pg_idx)
+ return ibc->kpage[1][pg_offset/4];
+
+ new_page = radeon_cs_update_pages(p, pg_idx);
+ if (new_page < 0) {
+ p->parser_error = new_page;
+ return 0;
}
- /* FIXME: we assume reloc size is 4 dwords */
- if (nomm) {
- *cs_reloc = p->relocs;
- (*cs_reloc)->gpu_offset =
- (u64)relocs_chunk->kdata[idx + 3] << 32;
- (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
- } else
- *cs_reloc = &p->relocs[(idx / 4)];
- return 0;
+
+ idx_value = ibc->kpage[new_page][pg_offset/4];
+ return idx_value;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_cursor.c b/sys/dev/pci/drm/radeon/radeon_cursor.c
index 2f4e73496d0..88a75e3b259 100644
--- a/sys/dev/pci/drm/radeon/radeon_cursor.c
+++ b/sys/dev/pci/drm/radeon/radeon_cursor.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_cursor.c,v 1.5 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -27,6 +28,9 @@
#include <dev/pci/drm/radeon_drm.h>
#include "radeon.h"
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
{
struct radeon_device *rdev = crtc->dev->dev_private;
@@ -90,38 +94,16 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (radeon_crtc->cursor_out_of_bounds)
- return;
-
if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
- upper_32_bits(radeon_crtc->cursor_addr));
- WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- lower_32_bits(radeon_crtc->cursor_addr));
WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
} else if (ASIC_IS_AVIVO(rdev)) {
- if (rdev->family >= CHIP_RV770) {
- if (radeon_crtc->crtc_id)
- WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
- upper_32_bits(radeon_crtc->cursor_addr));
- else
- WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
- upper_32_bits(radeon_crtc->cursor_addr));
- }
-
- WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
- lower_32_bits(radeon_crtc->cursor_addr));
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
(AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
- /* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
- radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
-
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
@@ -139,150 +121,44 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
}
}
-static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
+static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
+ uint64_t gpu_addr)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- int xorigin = 0, yorigin = 0;
- int w = radeon_crtc->cursor_width;
-
- radeon_crtc->cursor_x = x;
- radeon_crtc->cursor_y = y;
-
- if (ASIC_IS_AVIVO(rdev)) {
- /* avivo cursor are offset into the total surface */
- x += crtc->x;
- y += crtc->y;
- }
-
- if (x < 0)
- xorigin = min(-x, radeon_crtc->max_cursor_width - 1);
- if (y < 0)
- yorigin = min(-y, radeon_crtc->max_cursor_height - 1);
-
- if (!ASIC_IS_AVIVO(rdev)) {
- x += crtc->x;
- y += crtc->y;
- }
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
-
- /* fixed on DCE6 and newer */
- if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
- int i = 0;
- struct drm_crtc *crtc_p;
-
- /*
- * avivo cursor image can't end on 128 pixel boundary or
- * go past the end of the frame if both crtcs are enabled
- *
- * NOTE: It is safe to access crtc->enabled of other crtcs
- * without holding either the mode_config lock or the other
- * crtc's lock as long as write access to this flag _always_
- * grabs all locks.
- */
- list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
- if (crtc_p->enabled)
- i++;
- }
- if (i > 1) {
- int cursor_end, frame_end;
-
- cursor_end = x + w;
- frame_end = crtc->x + crtc->mode.crtc_hdisplay;
- if (cursor_end >= frame_end) {
- w = w - (cursor_end - frame_end);
- if (!(frame_end & 0x7f))
- w--;
- } else if (cursor_end <= 0) {
- goto out_of_bounds;
- } else if (!(cursor_end & 0x7f)) {
- w--;
- }
- if (w <= 0) {
- goto out_of_bounds;
- }
- }
- }
-
- if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
- x >= (crtc->x + crtc->mode.hdisplay) ||
- y >= (crtc->y + crtc->mode.vdisplay))
- goto out_of_bounds;
-
- x += xorigin;
- y += yorigin;
if (ASIC_IS_DCE4(rdev)) {
- WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
- WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
- ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(gpu_addr));
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
} else if (ASIC_IS_AVIVO(rdev)) {
- WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
- WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
- ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ if (rdev->family >= CHIP_RV770) {
+ if (radeon_crtc->crtc_id)
+ WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+ else
+ WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
+ }
+ WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ gpu_addr & 0xffffffff);
} else {
- x -= crtc->x;
- y -= crtc->y;
-
- if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
- y *= 2;
-
- WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
- (RADEON_CUR_LOCK
- | (xorigin << 16)
- | yorigin));
- WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
- (RADEON_CUR_LOCK
- | (x << 16)
- | y));
+ radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
/* offset is from DISP(2)_BASE_ADDRESS */
- WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
- radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
- yorigin * 256);
- }
-
- if (radeon_crtc->cursor_out_of_bounds) {
- radeon_crtc->cursor_out_of_bounds = false;
- if (radeon_crtc->cursor_bo)
- radeon_show_cursor(crtc);
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
}
-
- return 0;
-
- out_of_bounds:
- if (!radeon_crtc->cursor_out_of_bounds) {
- radeon_hide_cursor(crtc);
- radeon_crtc->cursor_out_of_bounds = true;
- }
- return 0;
}
-int radeon_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
-{
- int ret;
-
- radeon_lock_cursor(crtc, true);
- ret = radeon_cursor_move_locked(crtc, x, y);
- radeon_lock_cursor(crtc, false);
-
- return ret;
-}
-
-int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height,
- int32_t hot_x,
- int32_t hot_y)
+int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct radeon_bo *robj;
+ uint64_t gpu_addr;
int ret;
if (!handle) {
@@ -292,8 +168,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
goto unpin;
}
- if ((width > radeon_crtc->max_cursor_width) ||
- (height > radeon_crtc->max_cursor_height)) {
+ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
return -EINVAL;
}
@@ -306,47 +181,27 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
robj = gem_to_radeon_bo(obj);
ret = radeon_bo_reserve(robj, false);
- if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
- return ret;
- }
+ if (unlikely(ret != 0))
+ goto fail;
/* Only 27 bit offset for legacy cursor */
ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
- &radeon_crtc->cursor_addr);
+ &gpu_addr);
radeon_bo_unreserve(robj);
- if (ret) {
- DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
- return ret;
- }
+ if (ret)
+ goto fail;
- radeon_lock_cursor(crtc, true);
-
- if (width != radeon_crtc->cursor_width ||
- height != radeon_crtc->cursor_height ||
- hot_x != radeon_crtc->cursor_hot_x ||
- hot_y != radeon_crtc->cursor_hot_y) {
- int x, y;
-
- x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
- y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
-
- radeon_crtc->cursor_width = width;
- radeon_crtc->cursor_height = height;
- radeon_crtc->cursor_hot_x = hot_x;
- radeon_crtc->cursor_hot_y = hot_y;
-
- radeon_cursor_move_locked(crtc, x, y);
- }
+ radeon_crtc->cursor_width = width;
+ radeon_crtc->cursor_height = height;
+ radeon_lock_cursor(crtc, true);
+ radeon_set_cursor(crtc, obj, gpu_addr);
radeon_show_cursor(crtc);
-
radeon_lock_cursor(crtc, false);
unpin:
if (radeon_crtc->cursor_bo) {
- struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
ret = radeon_bo_reserve(robj, false);
if (likely(ret == 0)) {
radeon_bo_unpin(robj);
@@ -357,28 +212,100 @@ unpin:
radeon_crtc->cursor_bo = obj;
return 0;
+fail:
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
}
-/**
- * radeon_cursor_reset - Re-set the current cursor, if any.
- *
- * @crtc: drm crtc
- *
- * If the CRTC passed in currently has a cursor assigned, this function
- * makes sure it's visible.
- */
-void radeon_cursor_reset(struct drm_crtc *crtc)
+int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct radeon_device *rdev = crtc->dev->dev_private;
+ int xorigin = 0, yorigin = 0;
+ int w = radeon_crtc->cursor_width;
- if (radeon_crtc->cursor_bo) {
- radeon_lock_cursor(crtc, true);
+ if (ASIC_IS_AVIVO(rdev)) {
+ /* avivo cursor are offset into the total surface */
+ x += crtc->x;
+ y += crtc->y;
+ }
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ if (x < 0) {
+ xorigin = min(-x, CURSOR_WIDTH - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, CURSOR_HEIGHT - 1);
+ y = 0;
+ }
+
+ /* fixed on DCE6 and newer */
+ if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
+ int i = 0;
+ struct drm_crtc *crtc_p;
+
+ /* avivo cursor image can't end on 128 pixel boundary or
+ * go past the end of the frame if both crtcs are enabled
+ */
+ list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
+ if (crtc_p->enabled)
+ i++;
+ }
+ if (i > 1) {
+ int cursor_end, frame_end;
- radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
- radeon_crtc->cursor_y);
+ cursor_end = x - xorigin + w;
+ frame_end = crtc->x + crtc->mode.crtc_hdisplay;
+ if (cursor_end >= frame_end) {
+ w = w - (cursor_end - frame_end);
+ if (!(frame_end & 0x7f))
+ w--;
+ } else {
+ if (!(cursor_end & 0x7f))
+ w--;
+ }
+ if (w <= 0) {
+ w = 1;
+ cursor_end = x - xorigin + w;
+ if (!(cursor_end & 0x7f)) {
+ x--;
+ WARN_ON_ONCE(x < 0);
+ }
+ }
+ }
+ }
- radeon_show_cursor(crtc);
+ radeon_lock_cursor(crtc, true);
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+ WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
+ ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+ WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
+ ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ } else {
+ if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+ y *= 2;
- radeon_lock_cursor(crtc, false);
+ WREG32(RADEON_CUR_HORZ_VERT_OFF + radeon_crtc->crtc_offset,
+ (RADEON_CUR_LOCK
+ | (xorigin << 16)
+ | yorigin));
+ WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset,
+ (RADEON_CUR_LOCK
+ | (x << 16)
+ | y));
+ /* offset is from DISP(2)_BASE_ADDRESS */
+ WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
+ (yorigin * 256)));
}
+ radeon_lock_cursor(crtc, false);
+
+ return 0;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_device.c b/sys/dev/pci/drm/radeon/radeon_device.c
index ec631f66cd9..b9d1e72d15f 100644
--- a/sys/dev/pci/drm/radeon/radeon_device.c
+++ b/sys/dev/pci/drm/radeon/radeon_device.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_device.c,v 1.16 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -88,119 +89,9 @@ static const char radeon_family_name[][16] = {
"TAHITI",
"PITCAIRN",
"VERDE",
- "OLAND",
- "HAINAN",
- "BONAIRE",
- "KAVERI",
- "KABINI",
- "HAWAII",
- "MULLINS",
"LAST",
};
-#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
-#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
-
-struct radeon_px_quirk {
- u32 chip_vendor;
- u32 chip_device;
- u32 subsys_vendor;
- u32 subsys_device;
- u32 px_quirk_flags;
-};
-
-static struct radeon_px_quirk radeon_px_quirk_list[] = {
- /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
- * https://bugzilla.kernel.org/show_bug.cgi?id=74551
- */
- { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
- /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
- * https://bugzilla.kernel.org/show_bug.cgi?id=51381
- */
- { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
- /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
- * https://bugzilla.kernel.org/show_bug.cgi?id=51381
- */
- { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
- /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
- * https://bugs.freedesktop.org/show_bug.cgi?id=101491
- */
- { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
- /* macbook pro 8.2 */
- { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
- { 0, 0, 0, 0, 0 },
-};
-
-bool radeon_is_px(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
-
- if (rdev->flags & RADEON_IS_PX)
- return true;
- return false;
-}
-
-static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
-{
- struct radeon_px_quirk *p = radeon_px_quirk_list;
-
- /* Apply PX quirks */
- while (p && p->chip_device != 0) {
- if (rdev->pdev->vendor == p->chip_vendor &&
- rdev->pdev->device == p->chip_device &&
- rdev->pdev->subsystem_vendor == p->subsys_vendor &&
- rdev->pdev->subsystem_device == p->subsys_device) {
- rdev->px_quirk_flags = p->px_quirk_flags;
- break;
- }
- ++p;
- }
-
- if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
- rdev->flags &= ~RADEON_IS_PX;
-}
-
-/**
- * radeon_program_register_sequence - program an array of registers.
- *
- * @rdev: radeon_device pointer
- * @registers: pointer to the register array
- * @array_size: size of the register array
- *
- * Programs an array or registers with and and or masks.
- * This is a helper for setting golden registers.
- */
-void radeon_program_register_sequence(struct radeon_device *rdev,
- const u32 *registers,
- const u32 array_size)
-{
- u32 tmp, reg, and_mask, or_mask;
- int i;
-
- if (array_size % 3)
- return;
-
- for (i = 0; i < array_size; i +=3) {
- reg = registers[i + 0];
- and_mask = registers[i + 1];
- or_mask = registers[i + 2];
-
- if (and_mask == 0xffffffff) {
- tmp = or_mask;
- } else {
- tmp = RREG32(reg);
- tmp &= ~and_mask;
- tmp |= or_mask;
- }
- WREG32(reg, tmp);
- }
-}
-
-void radeon_pci_config_reset(struct radeon_device *rdev)
-{
- pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
-}
-
/**
* radeon_surface_init - Clear GPU surface registers.
*
@@ -296,129 +187,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
}
/*
- * GPU doorbell aperture helpers function.
- */
-/**
- * radeon_doorbell_init - Init doorbell driver information.
- *
- * @rdev: radeon_device pointer
- *
- * Init doorbell driver information (CIK)
- * Returns 0 on success, error on failure.
- */
-static int radeon_doorbell_init(struct radeon_device *rdev)
-{
- /* doorbell bar mapping */
-#ifdef __linux__
- rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
- rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
-#endif
-
- rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
- if (rdev->doorbell.num_doorbells == 0)
- return -EINVAL;
-
-#ifdef __linux__
- rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
- if (rdev->doorbell.ptr == NULL) {
- return -ENOMEM;
- }
-#endif
- DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
- DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
-
- memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
-
- return 0;
-}
-
-/**
- * radeon_doorbell_fini - Tear down doorbell driver information.
- *
- * @rdev: radeon_device pointer
- *
- * Tear down doorbell driver information (CIK)
- */
-static void radeon_doorbell_fini(struct radeon_device *rdev)
-{
-#ifdef __linux__
- iounmap(rdev->doorbell.ptr);
- rdev->doorbell.ptr = NULL;
-#else
- if (rdev->doorbell.size > 0)
- bus_space_unmap(rdev->memt, rdev->doorbell.bsh,
- rdev->doorbell.size);
- rdev->doorbell.size = 0;
-#endif
-}
-
-/**
- * radeon_doorbell_get - Allocate a doorbell entry
- *
- * @rdev: radeon_device pointer
- * @doorbell: doorbell index
- *
- * Allocate a doorbell for use by the driver (all asics).
- * Returns 0 on success or -EINVAL on failure.
- */
-int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
-{
- unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
- if (offset < rdev->doorbell.num_doorbells) {
- __set_bit(offset, rdev->doorbell.used);
- *doorbell = offset;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-/**
- * radeon_doorbell_free - Free a doorbell entry
- *
- * @rdev: radeon_device pointer
- * @doorbell: doorbell index
- *
- * Free a doorbell allocated for use by the driver (all asics)
- */
-void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
-{
- if (doorbell < rdev->doorbell.num_doorbells)
- __clear_bit(doorbell, rdev->doorbell.used);
-}
-
-/**
- * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
- * setup KFD
- *
- * @rdev: radeon_device pointer
- * @aperture_base: output returning doorbell aperture base physical address
- * @aperture_size: output returning doorbell aperture size in bytes
- * @start_offset: output returning # of doorbell bytes reserved for radeon.
- *
- * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
- * takes doorbells required for its own rings and reports the setup to KFD.
- * Radeon reserved doorbells are at the start of the doorbell aperture.
- */
-void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset)
-{
- /* The first num_doorbells are used by radeon.
- * KFD takes whatever's left in the aperture. */
- if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
- *aperture_base = rdev->doorbell.base;
- *aperture_size = rdev->doorbell.size;
- *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
- } else {
- *aperture_base = 0;
- *aperture_size = 0;
- *start_offset = 0;
- }
-}
-
-/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
* in memory with the status of certain GPU events (fences, ring pointers,
@@ -434,6 +202,16 @@ void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
*/
void radeon_wb_disable(struct radeon_device *rdev)
{
+ int r;
+
+ if (rdev->wb.wb_obj) {
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0))
+ return;
+ radeon_bo_kunmap(rdev->wb.wb_obj);
+ radeon_bo_unpin(rdev->wb.wb_obj);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ }
rdev->wb.enabled = false;
}
@@ -449,11 +227,6 @@ void radeon_wb_fini(struct radeon_device *rdev)
{
radeon_wb_disable(rdev);
if (rdev->wb.wb_obj) {
- if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
- radeon_bo_kunmap(rdev->wb.wb_obj);
- radeon_bo_unpin(rdev->wb.wb_obj);
- radeon_bo_unreserve(rdev->wb.wb_obj);
- }
radeon_bo_unref(&rdev->wb.wb_obj);
rdev->wb.wb = NULL;
rdev->wb.wb_obj = NULL;
@@ -475,32 +248,31 @@ int radeon_wb_init(struct radeon_device *rdev)
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
- &rdev->wb.wb_obj);
+ RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
}
- r = radeon_bo_reserve(rdev->wb.wb_obj, false);
- if (unlikely(r != 0)) {
- radeon_wb_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
- &rdev->wb.gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->wb.wb_obj);
- dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
- radeon_wb_fini(rdev);
- return r;
- }
- r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ }
+ r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+ if (unlikely(r != 0)) {
+ radeon_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+ &rdev->wb.gpu_addr);
+ if (r) {
radeon_bo_unreserve(rdev->wb.wb_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
- radeon_wb_fini(rdev);
- return r;
- }
+ dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+ radeon_wb_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+ radeon_bo_unreserve(rdev->wb.wb_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+ radeon_wb_fini(rdev);
+ return r;
}
/* clear wb memory */
@@ -531,7 +303,9 @@ int radeon_wb_init(struct radeon_device *rdev)
rdev->wb.use_event = true;
}
+#ifdef DRMDEBUG
dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
+#endif
return 0;
}
@@ -582,7 +356,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
uint64_t limit = (uint64_t)radeon_vram_limit << 20;
mc->vram_start = base;
- if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
+ if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
@@ -617,7 +391,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_af, size_bf;
- size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+ size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
size_bf = mc->vram_start & ~mc->gtt_base_align;
if (size_bf > size_af) {
if (mc->gtt_size > size_bf) {
@@ -640,23 +414,6 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
/*
* GPU helpers function.
*/
-
-/**
- * radeon_device_is_virtual - check if we are running is a virtual environment
- *
- * Check if the asic has been passed through to a VM (all asics).
- * Used at driver startup.
- * Returns true if virtual or false if not.
- */
-static bool radeon_device_is_virtual(void)
-{
-#ifdef CONFIG_X86
- return boot_cpu_has(X86_FEATURE_HYPERVISOR);
-#else
- return false;
-#endif
-}
-
/**
* radeon_card_posted - check if the hw has already been initialized
*
@@ -670,22 +427,12 @@ bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
- /* for pass through, always force asic_init for CI */
- if (rdev->family >= CHIP_BONAIRE &&
- radeon_device_is_virtual())
- return false;
-
- /* required for EFI mode on macbook2,1 which uses an r5xx asic */
#ifdef notyet
if (efi_enabled(EFI_BOOT) &&
- (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
- (rdev->family < CHIP_R600))
+ rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
return false;
#endif
- if (ASIC_IS_NODCE(rdev))
- goto check_memsize;
-
/* first check CRTCs */
if (ASIC_IS_DCE4(rdev)) {
reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
@@ -714,7 +461,6 @@ bool radeon_card_posted(struct radeon_device *rdev)
}
}
-check_memsize:
/* then check MEM_SIZE, in case the crtcs are off */
if (rdev->family >= CHIP_R600)
reg = RREG32(R600_CONFIG_MEMSIZE);
@@ -802,8 +548,6 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
if (!rdev->dummy_page.dmah)
return -ENOMEM;
rdev->dummy_page.addr = (bus_addr_t)rdev->dummy_page.dmah->map->dm_segs[0].ds_addr;
- rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
- RADEON_GART_PAGE_DUMMY);
return 0;
}
@@ -1007,13 +751,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->pll_write = cail_pll_write;
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
- if (!rdev->mode_info.atom_context) {
- radeon_atombios_fini(rdev);
- return -ENOMEM;
- }
-
rw_init(&rdev->mode_info.atom_context->mutex, "atomcon");
- rw_init(&rdev->mode_info.atom_context->scratch_mutex, "atomscr");
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
return 0;
@@ -1032,11 +770,9 @@ void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch);
+ kfree(rdev->mode_info.atom_context);
}
- kfree(rdev->mode_info.atom_context);
- rdev->mode_info.atom_context = NULL;
kfree(rdev->mode_info.atom_card_info);
- rdev->mode_info.atom_card_info = NULL;
}
/* COMBIOS */
@@ -1110,22 +846,6 @@ static bool radeon_check_pot_argument(int arg)
}
/**
- * Determine a sensible default GART size according to ASIC family.
- *
- * @family ASIC family name
- */
-static int radeon_gart_size_auto(enum radeon_family family)
-{
- /* default to a larger gart size on newer asics */
- if (family >= CHIP_TAHITI)
- return 2048;
- else if (family >= CHIP_RV770)
- return 1024;
- else
- return 512;
-}
-
-/**
* radeon_check_arguments - validate module params
*
* @rdev: radeon_device pointer
@@ -1142,18 +862,16 @@ static void radeon_check_arguments(struct radeon_device *rdev)
radeon_vram_limit = 0;
}
- if (radeon_gart_size == -1) {
- radeon_gart_size = radeon_gart_size_auto(rdev->family);
- }
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
- dev_warn(rdev->dev, "gart size (%d) too small\n",
+ dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
radeon_gart_size);
- radeon_gart_size = radeon_gart_size_auto(rdev->family);
+ radeon_gart_size = 512;
+
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
- radeon_gart_size = radeon_gart_size_auto(rdev->family);
+ radeon_gart_size = 512;
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
@@ -1172,75 +890,41 @@ static void radeon_check_arguments(struct radeon_device *rdev)
radeon_agpmode = 0;
break;
}
+}
- if (!radeon_check_pot_argument(radeon_vm_size)) {
- dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
- radeon_vm_size);
- radeon_vm_size = 4;
- }
-
- if (radeon_vm_size < 1) {
- dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
- radeon_vm_size);
- radeon_vm_size = 4;
- }
-
- /*
- * Max GPUVM size for Cayman, SI and CI are 40 bits.
- */
- if (radeon_vm_size > 1024) {
- dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
- radeon_vm_size);
- radeon_vm_size = 4;
- }
-
- /* defines number of bits in page table versus page directory,
- * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
- * page table and the remaining bits are in the page directory */
- if (radeon_vm_block_size == -1) {
-
- /* Total bits covered by PD + PTs */
- unsigned bits = ilog2(radeon_vm_size) + 18;
-
- /* Make sure the PD is 4K in size up to 8GB address space.
- Above that split equal between PD and PTs */
- if (radeon_vm_size <= 8)
- radeon_vm_block_size = bits - 9;
- else
- radeon_vm_block_size = (bits + 3) / 2;
+#ifdef notyet
+/**
+ * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
+ * needed for waking up.
+ *
+ * @pdev: pci dev pointer
+ */
+static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
+{
- } else if (radeon_vm_block_size < 9) {
- dev_warn(rdev->dev, "VM page table size (%d) too small\n",
- radeon_vm_block_size);
- radeon_vm_block_size = 9;
+ /* 6600m in a macbook pro */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
+ pdev->subsystem_device == 0x00e2) {
+ printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
+ return true;
}
- if (radeon_vm_block_size > 24 ||
- (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
- dev_warn(rdev->dev, "VM page table size (%d) too large\n",
- radeon_vm_block_size);
- radeon_vm_block_size = 9;
- }
+ return false;
}
/**
* radeon_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
- * @state: vga_switcheroo state
+ * @state: vga switcheroo state
*
* Callback for the switcheroo driver. Suspends or resumes the
* the asics before or after it is powered up using ACPI methods.
*/
-#ifdef notyet
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct radeon_device *rdev = dev->dev_private;
-
- if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
- return;
-
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (state == VGA_SWITCHEROO_ON) {
unsigned d3_delay = dev->pdev->d3_delay;
@@ -1248,10 +932,10 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
+ if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
dev->pdev->d3_delay = 20;
- radeon_resume_kms(dev, true, true);
+ radeon_resume_kms(dev);
dev->pdev->d3_delay = d3_delay;
@@ -1261,7 +945,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
printk(KERN_INFO "radeon: switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- radeon_suspend_kms(dev, true, true);
+ radeon_suspend_kms(dev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1278,13 +962,12 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
- /*
- * FIXME: open_count is protected by drm_global_mutex but that would lead to
- * locking inversion with the driver load path. And the access here is
- * completely racy anyway. So don't bother with locking for now.
- */
- return dev->open_count == 0;
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
}
static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
@@ -1307,30 +990,26 @@ static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
* Called at driver startup.
*/
int radeon_device_init(struct radeon_device *rdev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
- uint32_t flags)
+ struct drm_device *ddev)
{
+#ifdef DRMDEBUG
+ struct pci_dev *pdev = ddev->pdev;
+#endif
int r, i;
int dma_bits;
- bool runtime = false;
rdev->shutdown = false;
- rdev->ddev = ddev;
- rdev->pdev = pdev;
- rdev->flags = flags;
- rdev->family = flags & RADEON_FAMILY_MASK;
+ rdev->family = rdev->flags & RADEON_FAMILY_MASK;
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
- rdev->mc.gtt_size = 512 * 1024 * 1024;
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->accel_working = false;
/* set up ring ids */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
- rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
- printf("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
+ DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
@@ -1342,27 +1021,26 @@ int radeon_device_init(struct radeon_device *rdev,
rw_init(&rdev->gem.mutex, "gem");
rw_init(&rdev->pm.mutex, "pm");
rw_init(&rdev->gpu_clock_mutex, "gpuclk");
- rw_init(&rdev->srbm_mutex, "srbm");
- rw_init(&rdev->grbm_idx_mutex, "grbm");
rw_init(&rdev->pm.mclk_lock, "mclk");
rw_init(&rdev->exclusive_lock, "rdnexc");
init_waitqueue_head(&rdev->irq.vblank_queue);
- rw_init(&rdev->mn_lock, "mnlk");
- hash_init(rdev->mn_hash);
r = radeon_gem_init(rdev);
if (r)
return r;
-
- radeon_check_arguments(rdev);
+ /* initialize vm here */
+ rw_init(&rdev->vm_manager.lock, "vmmgr");
/* Adjust VM size here.
- * Max GPUVM size for cayman+ is 40 bits.
+ * Currently set to 4GB ((1 << 20) 4k pages).
+ * Max GPUVM size for cayman and SI is 40 bits.
*/
- rdev->vm_manager.max_pfn = radeon_vm_size << 18;
+ rdev->vm_manager.max_pfn = 1 << 20;
+ INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
/* Set asic functions */
r = radeon_asic_init(rdev);
if (r)
return r;
+ radeon_check_arguments(rdev);
/* all of the newer IGP chips have an internal gart
* However some rs4xx report as AGP, so remove that here.
@@ -1376,17 +1054,6 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_agp_disable(rdev);
}
- /* Set the internal MC address mask
- * This is the max address of the GPU's
- * internal address space.
- */
- if (rdev->family >= CHIP_CAYMAN)
- rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
- else if (rdev->family >= CHIP_CEDAR)
- rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
- else
- rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
-
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits
@@ -1413,44 +1080,22 @@ int radeon_device_init(struct radeon_device *rdev,
pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
printk(KERN_WARNING "radeon: No coherent DMA available.\n");
}
-#endif
/* Registers mapping */
/* TODO: block userspace mapping of io register */
+#endif
mtx_init(&rdev->mmio_idx_lock, IPL_TTY);
- mtx_init(&rdev->smc_idx_lock, IPL_TTY);
- mtx_init(&rdev->pll_idx_lock, IPL_TTY);
- mtx_init(&rdev->mc_idx_lock, IPL_TTY);
- mtx_init(&rdev->pcie_idx_lock, IPL_TTY);
- mtx_init(&rdev->pciep_idx_lock, IPL_TTY);
- mtx_init(&rdev->pif_idx_lock, IPL_TTY);
- mtx_init(&rdev->cg_idx_lock, IPL_TTY);
- mtx_init(&rdev->uvd_idx_lock, IPL_TTY);
- mtx_init(&rdev->rcu_idx_lock, IPL_TTY);
- mtx_init(&rdev->didt_idx_lock, IPL_TTY);
- mtx_init(&rdev->end_idx_lock, IPL_TTY);
-#ifdef __linux__
- if (rdev->family >= CHIP_BONAIRE) {
- rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
- rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
- } else {
- rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
- rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
- }
+#ifdef notyet
+ rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
+ rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
if (rdev->rmmio == NULL) {
return -ENOMEM;
}
-#endif
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
- /* doorbell bar mapping */
- if (rdev->family >= CHIP_BONAIRE)
- radeon_doorbell_init(rdev);
-
/* io port mapping */
-#ifdef linux
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
@@ -1460,39 +1105,21 @@ int radeon_device_init(struct radeon_device *rdev,
}
if (rdev->rio_mem == NULL)
DRM_ERROR("Unable to find PCI I/O BAR\n");
-#endif
-
- if (rdev->flags & RADEON_IS_PX)
- radeon_device_handle_px_quirks(rdev);
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
-#ifdef notyet
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
-#endif
-
- if (rdev->flags & RADEON_IS_PX)
- runtime = true;
-#ifdef notyet
- vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
- if (runtime)
- vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
+ vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
#endif
r = radeon_init(rdev);
if (r)
- goto failed;
-
- r = radeon_gem_debugfs_init(rdev);
- if (r) {
- DRM_ERROR("registering gem debugfs failed (%d).\n", r);
- }
+ return r;
- r = radeon_mst_debugfs_init(rdev);
- if (r) {
- DRM_ERROR("registering mst debugfs failed (%d).\n", r);
- }
+ r = radeon_ib_ring_tests(rdev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
/* Acceleration not working on AGP card try again
@@ -1503,28 +1130,8 @@ int radeon_device_init(struct radeon_device *rdev,
radeon_agp_disable(rdev);
r = radeon_init(rdev);
if (r)
- goto failed;
- }
-
- r = radeon_ib_ring_tests(rdev);
- if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
-
- /*
- * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
- * after the CP ring have chew one packet at least. Hence here we stop
- * and restart DPM after the radeon_ib_ring_tests().
- */
- if (rdev->pm.dpm_enabled &&
- (rdev->pm.pm_method == PM_METHOD_DPM) &&
- (rdev->family == CHIP_TURKS) &&
- (rdev->flags & RADEON_IS_MOBILITY)) {
- mutex_lock(&rdev->pm.mutex);
- radeon_dpm_disable(rdev);
- radeon_dpm_enable(rdev);
- mutex_unlock(&rdev->pm.mutex);
+ return r;
}
-
if ((radeon_testing & 1)) {
if (rdev->accel_working)
radeon_test_moves(rdev);
@@ -1544,16 +1151,11 @@ int radeon_device_init(struct radeon_device *rdev,
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
}
return 0;
-
-failed:
-#ifdef notyet
- if (runtime)
- vga_switcheroo_fini_domain_pm_ops(rdev->dev);
-#endif
- return r;
}
+#ifdef __linux__
static void radeon_debugfs_remove_files(struct radeon_device *rdev);
+#endif
/**
* radeon_device_fini - tear down the driver
@@ -1570,31 +1172,18 @@ void radeon_device_fini(struct radeon_device *rdev)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
radeon_fini(rdev);
+#ifdef notyet
vga_switcheroo_unregister_client(rdev->pdev);
- if (rdev->flags & RADEON_IS_PX)
- vga_switcheroo_fini_domain_pm_ops(rdev->dev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
-#ifdef __linux__
if (rdev->rio_mem)
pci_iounmap(rdev->pdev, rdev->rio_mem);
rdev->rio_mem = NULL;
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
-#else
- if (rdev->rio_mem_size > 0)
- bus_space_unmap(rdev->iot, rdev->rio_mem, rdev->rio_mem_size);
- rdev->rio_mem_size = 0;
-
- if (rdev->rmmio_size > 0)
- bus_space_unmap(rdev->memt, rdev->rmmio_bsh, rdev->rmmio_size);
- rdev->rmmio_size = 0;
-#endif
- if (rdev->family >= CHIP_BONAIRE)
- radeon_doorbell_fini(rdev);
radeon_debugfs_remove_files(rdev);
+#endif
}
-
/*
* Suspend & resume.
*/
@@ -1608,17 +1197,22 @@ void radeon_device_fini(struct radeon_device *rdev)
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
-int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
+int radeon_suspend_kms(struct drm_device *dev)
{
struct radeon_device *rdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
+ bool force_completion = false;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
}
-
+#ifdef notyet
+ if (state.event == PM_EVENT_PRETHAW) {
+ return 0;
+ }
+#endif
rdev = dev->dev_private;
if (rdev->shutdown)
return 0;
@@ -1630,28 +1224,16 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
- drm_modeset_lock_all(dev);
/* turn off display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
- drm_modeset_unlock_all(dev);
- /* unpin the front buffers and cursors */
+ /* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
struct radeon_bo *robj;
- if (radeon_crtc->cursor_bo) {
- struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
- r = radeon_bo_reserve(robj, false);
- if (r == 0) {
- radeon_bo_unpin(robj);
- radeon_bo_unreserve(robj);
- }
- }
-
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
@@ -1668,17 +1250,23 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
+ mutex_lock(&rdev->ring_lock);
/* wait for gpu to finish processing current batch */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
- r = radeon_fence_wait_empty(rdev, i);
+ r = radeon_fence_wait_empty_locked(rdev, i);
if (r) {
/* delay GPU reset to resume */
- radeon_fence_driver_force_completion(rdev, i);
+ force_completion = true;
}
}
+ if (force_completion) {
+ radeon_fence_driver_force_completion(rdev);
+ }
+ mutex_unlock(&rdev->ring_lock);
radeon_save_bios_scratch_regs(rdev);
+ radeon_pm_suspend(rdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
/* evict remaining vram memory */
@@ -1686,18 +1274,17 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
radeon_agp_suspend(rdev);
+#ifdef notyet
pci_save_state(dev->pdev);
- if (suspend) {
+ if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
-
- if (fbcon) {
- console_lock();
- radeon_fbdev_set_suspend(rdev, 1);
- console_unlock();
- }
+#endif
+ console_lock();
+ radeon_fbdev_set_suspend(rdev, 1);
+ console_unlock();
return 0;
}
@@ -1710,11 +1297,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
-int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+int radeon_resume_kms(struct drm_device *dev)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
- struct drm_crtc *crtc;
int r;
#ifdef notyet
@@ -1722,18 +1308,15 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
return 0;
#endif
- if (fbcon) {
- console_lock();
- }
- if (resume) {
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
- if (pci_enable_device(dev->pdev)) {
- if (fbcon)
- console_unlock();
- return -1;
- }
+ console_lock();
+#ifdef notyet
+ pci_set_power_state(dev->pdev, PCI_D0);
+ pci_restore_state(dev->pdev);
+ if (pci_enable_device(dev->pdev)) {
+ console_unlock();
+ return -1;
}
+#endif
/* resume AGP if in use */
radeon_agp_resume(rdev);
radeon_resume(rdev);
@@ -1742,40 +1325,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
- if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- /* do dpm late init */
- r = radeon_pm_late_init(rdev);
- if (r) {
- rdev->pm.dpm_enabled = false;
- DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
- }
- } else {
- /* resume old pm late */
- radeon_pm_resume(rdev);
- }
-
+ radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
- /* pin cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-
- if (radeon_crtc->cursor_bo) {
- struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
- r = radeon_bo_reserve(robj, false);
- if (r == 0) {
- /* Only 27 bit offset for legacy cursor */
- r = radeon_bo_pin_restricted(robj,
- RADEON_GEM_DOMAIN_VRAM,
- ASIC_IS_AVIVO(rdev) ?
- 0 : 1 << 27,
- &radeon_crtc->cursor_addr);
- if (r != 0)
- DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
- radeon_bo_unreserve(robj);
- }
- }
- }
+ radeon_fbdev_set_suspend(rdev, 0);
+ console_unlock();
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
@@ -1792,27 +1346,13 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
/* reset hpd state */
radeon_hpd_init(rdev);
/* blat the mode back in */
- if (fbcon) {
- drm_helper_resume_force_mode(dev);
- /* turn on display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- drm_modeset_unlock_all(dev);
+ drm_helper_resume_force_mode(dev);
+ /* turn on display hw */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
drm_kms_helper_poll_enable(dev);
-
- /* set the power state here in case we are a PX system or headless */
- if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
- radeon_pm_compute_clocks(rdev);
-
- if (fbcon) {
- radeon_fbdev_set_suspend(rdev, 0);
- console_unlock();
- }
-
return 0;
}
@@ -1835,19 +1375,10 @@ int radeon_gpu_reset(struct radeon_device *rdev)
int resched;
down_write(&rdev->exclusive_lock);
-
- if (!rdev->needs_reset) {
- up_write(&rdev->exclusive_lock);
- return 0;
- }
-
- atomic_inc(&rdev->gpu_reset_counter);
-
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
- radeon_hpd_fini(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
@@ -1859,6 +1390,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
}
}
+retry:
r = radeon_asic_reset(rdev);
if (!r) {
dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
@@ -1867,75 +1399,44 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_restore_bios_scratch_regs(rdev);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!r && ring_data[i]) {
+ if (!r) {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_ring_restore(rdev, &rdev->ring[i],
ring_sizes[i], ring_data[i]);
- } else {
- radeon_fence_driver_force_completion(rdev, i);
- kfree(ring_data[i]);
+ ring_sizes[i] = 0;
+ ring_data[i] = NULL;
}
- }
- if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- /* do dpm late init */
- r = radeon_pm_late_init(rdev);
+ r = radeon_ib_ring_tests(rdev);
if (r) {
- rdev->pm.dpm_enabled = false;
- DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+ dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
+ if (saved) {
+ saved = false;
+ radeon_suspend(rdev);
+ goto retry;
+ }
}
} else {
- /* resume old pm late */
- radeon_pm_resume(rdev);
- }
-
- /* init dig PHYs, disp eng pll */
- if (rdev->is_atom_bios) {
- radeon_atom_encoder_init(rdev);
- radeon_atom_disp_eng_pll_init(rdev);
- /* turn on the BL */
- if (rdev->mode_info.bl_encoder) {
- u8 bl_level = radeon_get_backlight_level(rdev,
- rdev->mode_info.bl_encoder);
- radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
- bl_level);
+ radeon_fence_driver_force_completion(rdev);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ kfree(ring_data[i]);
}
}
- /* reset hpd state */
- radeon_hpd_init(rdev);
-
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
-
- rdev->in_reset = true;
- rdev->needs_reset = false;
-
-#ifdef notyet
- downgrade_write(&rdev->exclusive_lock);
-#endif
drm_helper_resume_force_mode(rdev->ddev);
- /* set the power state here in case we are a PX system or headless */
- if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
- radeon_pm_compute_clocks(rdev);
-
- if (!r) {
- r = radeon_ib_ring_tests(rdev);
- if (r && saved)
- r = -EAGAIN;
- } else {
+ ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+ if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
}
- rdev->needs_reset = r == -EAGAIN;
- rdev->in_reset = false;
-
- up_read(&rdev->exclusive_lock);
+ up_write(&rdev->exclusive_lock);
return r;
}
+#ifdef __linux__
/*
* Debugfs
*/
@@ -1999,3 +1500,4 @@ void radeon_debugfs_cleanup(struct drm_minor *minor)
{
}
#endif
+#endif // __linux__
diff --git a/sys/dev/pci/drm/radeon/radeon_devlist.h b/sys/dev/pci/drm/radeon/radeon_devlist.h
index e333d35624e..67edd716f53 100644
--- a/sys/dev/pci/drm/radeon/radeon_devlist.h
+++ b/sys/dev/pci/drm/radeon/radeon_devlist.h
@@ -1,27 +1,5 @@
/* THIS FILE AUTOMATICALLY GENERATED. DO NOT EDIT. */
static const struct pci_matchid radeon_devices[] = {
- { 0x1002, 0x1304 },
- { 0x1002, 0x1305 },
- { 0x1002, 0x1306 },
- { 0x1002, 0x1307 },
- { 0x1002, 0x1309 },
- { 0x1002, 0x130A },
- { 0x1002, 0x130B },
- { 0x1002, 0x130C },
- { 0x1002, 0x130D },
- { 0x1002, 0x130E },
- { 0x1002, 0x130F },
- { 0x1002, 0x1310 },
- { 0x1002, 0x1311 },
- { 0x1002, 0x1312 },
- { 0x1002, 0x1313 },
- { 0x1002, 0x1315 },
- { 0x1002, 0x1316 },
- { 0x1002, 0x1317 },
- { 0x1002, 0x1318 },
- { 0x1002, 0x131B },
- { 0x1002, 0x131C },
- { 0x1002, 0x131D },
{ 0x1002, 0x3150 },
{ 0x1002, 0x3151 },
{ 0x1002, 0x3152 },
@@ -75,6 +53,7 @@ static const struct pci_matchid radeon_devices[] = {
{ 0x1002, 0x4C64 },
{ 0x1002, 0x4C66 },
{ 0x1002, 0x4C67 },
+ { 0x1002, 0x4C6E },
{ 0x1002, 0x4E44 },
{ 0x1002, 0x4E45 },
{ 0x1002, 0x4E46 },
@@ -161,40 +140,6 @@ static const struct pci_matchid radeon_devices[] = {
{ 0x1002, 0x5e4c },
{ 0x1002, 0x5e4d },
{ 0x1002, 0x5e4f },
- { 0x1002, 0x6600 },
- { 0x1002, 0x6601 },
- { 0x1002, 0x6602 },
- { 0x1002, 0x6603 },
- { 0x1002, 0x6604 },
- { 0x1002, 0x6605 },
- { 0x1002, 0x6606 },
- { 0x1002, 0x6607 },
- { 0x1002, 0x6608 },
- { 0x1002, 0x6610 },
- { 0x1002, 0x6611 },
- { 0x1002, 0x6613 },
- { 0x1002, 0x6617 },
- { 0x1002, 0x6620 },
- { 0x1002, 0x6621 },
- { 0x1002, 0x6623 },
- { 0x1002, 0x6631 },
- { 0x1002, 0x6640 },
- { 0x1002, 0x6641 },
- { 0x1002, 0x6646 },
- { 0x1002, 0x6647 },
- { 0x1002, 0x6649 },
- { 0x1002, 0x6650 },
- { 0x1002, 0x6651 },
- { 0x1002, 0x6658 },
- { 0x1002, 0x665c },
- { 0x1002, 0x665d },
- { 0x1002, 0x665f },
- { 0x1002, 0x6660 },
- { 0x1002, 0x6663 },
- { 0x1002, 0x6664 },
- { 0x1002, 0x6665 },
- { 0x1002, 0x6667 },
- { 0x1002, 0x666F },
{ 0x1002, 0x6700 },
{ 0x1002, 0x6701 },
{ 0x1002, 0x6702 },
@@ -269,18 +214,6 @@ static const struct pci_matchid radeon_devices[] = {
{ 0x1002, 0x679B },
{ 0x1002, 0x679E },
{ 0x1002, 0x679F },
- { 0x1002, 0x67A0 },
- { 0x1002, 0x67A1 },
- { 0x1002, 0x67A2 },
- { 0x1002, 0x67A8 },
- { 0x1002, 0x67A9 },
- { 0x1002, 0x67AA },
- { 0x1002, 0x67B0 },
- { 0x1002, 0x67B1 },
- { 0x1002, 0x67B8 },
- { 0x1002, 0x67B9 },
- { 0x1002, 0x67BA },
- { 0x1002, 0x67BE },
{ 0x1002, 0x6800 },
{ 0x1002, 0x6801 },
{ 0x1002, 0x6802 },
@@ -305,7 +238,6 @@ static const struct pci_matchid radeon_devices[] = {
{ 0x1002, 0x6829 },
{ 0x1002, 0x682A },
{ 0x1002, 0x682B },
- { 0x1002, 0x682C },
{ 0x1002, 0x682D },
{ 0x1002, 0x682F },
{ 0x1002, 0x6830 },
@@ -630,38 +562,6 @@ static const struct pci_matchid radeon_devices[] = {
{ 0x1002, 0x9808 },
{ 0x1002, 0x9809 },
{ 0x1002, 0x980A },
- { 0x1002, 0x9830 },
- { 0x1002, 0x9831 },
- { 0x1002, 0x9832 },
- { 0x1002, 0x9833 },
- { 0x1002, 0x9834 },
- { 0x1002, 0x9835 },
- { 0x1002, 0x9836 },
- { 0x1002, 0x9837 },
- { 0x1002, 0x9838 },
- { 0x1002, 0x9839 },
- { 0x1002, 0x983a },
- { 0x1002, 0x983b },
- { 0x1002, 0x983c },
- { 0x1002, 0x983d },
- { 0x1002, 0x983e },
- { 0x1002, 0x983f },
- { 0x1002, 0x9850 },
- { 0x1002, 0x9851 },
- { 0x1002, 0x9852 },
- { 0x1002, 0x9853 },
- { 0x1002, 0x9854 },
- { 0x1002, 0x9855 },
- { 0x1002, 0x9856 },
- { 0x1002, 0x9857 },
- { 0x1002, 0x9858 },
- { 0x1002, 0x9859 },
- { 0x1002, 0x985A },
- { 0x1002, 0x985B },
- { 0x1002, 0x985C },
- { 0x1002, 0x985D },
- { 0x1002, 0x985E },
- { 0x1002, 0x985F },
{ 0x1002, 0x9900 },
{ 0x1002, 0x9901 },
{ 0x1002, 0x9903 },
diff --git a/sys/dev/pci/drm/radeon/radeon_display.c b/sys/dev/pci/drm/radeon/radeon_display.c
index 821c42c85b1..bd47050d20f 100644
--- a/sys/dev/pci/drm/radeon/radeon_display.c
+++ b/sys/dev/pci/drm/radeon/radeon_display.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_display.c,v 1.15 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -33,7 +34,6 @@
#include <dev/pci/drm/drm_plane_helper.h>
#include <dev/pci/drm/drm_edid.h>
-
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -64,8 +64,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
(radeon_crtc->lut_b[i] << 0));
}
- /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */
- WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1);
+ WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
}
static void dce4_crtc_load_lut(struct drm_crtc *crtc)
@@ -151,17 +150,11 @@ static void dce5_crtc_load_lut(struct drm_crtc *crtc)
(NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
- (NI_OUTPUT_CSC_GRPH_MODE(radeon_crtc->output_csc) |
+ (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
- if (ASIC_IS_DCE8(rdev)) {
- /* XXX this only needs to be programmed once per crtc at startup,
- * not sure where the best place for it is
- */
- WREG32(CIK_ALPHA_CONTROL + radeon_crtc->crtc_offset,
- CIK_CURSOR_ALPHA_BLND_ENA);
- }
+
}
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
@@ -248,21 +241,15 @@ static void radeon_crtc_destroy(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
drm_crtc_cleanup(crtc);
- destroy_workqueue(radeon_crtc->flip_queue);
kfree(radeon_crtc);
}
-/**
- * radeon_unpin_work_func - unpin old buffer object
- *
- * @__work - kernel work item
- *
- * Unpin the old frame buffer object outside of the interrupt handler
+/*
+ * Handle unpin events outside the interrupt handler proper.
*/
-static void radeon_unpin_work_func(struct work_struct *__work)
+static void radeon_unpin_work_func(void *arg1)
{
- struct radeon_flip_work *work =
- container_of(__work, struct radeon_flip_work, unpin_work);
+ struct radeon_unpin_work *work = arg1;
int r;
/* unpin of the old buffer */
@@ -280,50 +267,41 @@ static void radeon_unpin_work_func(struct work_struct *__work)
kfree(work);
}
-void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+ struct radeon_unpin_work *work;
unsigned long flags;
u32 update_pending;
int vpos, hpos;
- /* can happen during initialization */
- if (radeon_crtc == NULL)
- return;
-
- /* Skip the pageflip completion check below (based on polling) on
- * asics which reliably support hw pageflip completion irqs. pflip
- * irqs are a reliable and race-free method of handling pageflip
- * completion detection. A use_pflipirq module parameter < 2 allows
- * to override this in case of asics with faulty pflip irqs.
- * A module parameter of 0 would only use this polling based path,
- * a parameter of 1 would use pflip irq only as a backup to this
- * path, as in Linux 3.16.
- */
- if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev))
- return;
-
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
- if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
- DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
- "RADEON_FLIP_SUBMITTED(%d)\n",
- radeon_crtc->flip_status,
- RADEON_FLIP_SUBMITTED);
+ work = radeon_crtc->unpin_work;
+ if (work == NULL ||
+ (work->fence && !radeon_fence_signaled(work->fence))) {
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
-
- update_pending = radeon_page_flip_pending(rdev, crtc_id);
+ /* New pageflip, or just completion of a previous one? */
+ if (!radeon_crtc->deferred_flip_completion) {
+ /* do the flip (mmio) */
+ update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
+ } else {
+ /* This is just a completion of a flip queued in crtc
+ * at last invocation. Make sure we go directly to
+ * completion routine.
+ */
+ update_pending = 0;
+ radeon_crtc->deferred_flip_completion = 0;
+ }
/* Has the pageflip already completed in crtc, or is it certain
* to complete in this vblank?
*/
if (update_pending &&
(DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev,
- crtc_id,
- USE_REAL_VBLANKSTART,
- &vpos, &hpos, NULL, NULL,
- &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
+ crtc_id, 0,
+ &vpos, &hpos, NULL, NULL, &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) &&
((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) ||
(vpos < 0 && !ASIC_IS_AVIVO(rdev)))) {
/* crtc didn't flip in this target vblank interval,
@@ -334,43 +312,19 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
*/
update_pending = 0;
}
- spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
- if (!update_pending)
- radeon_crtc_handle_flip(rdev, crtc_id);
-}
-
-/**
- * radeon_crtc_handle_flip - page flip completed
- *
- * @rdev: radeon device pointer
- * @crtc_id: crtc number this event is for
- *
- * Called when we are sure that a page flip for this crtc is completed.
- */
-void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
-{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
- struct radeon_flip_work *work;
- unsigned long flags;
-
- /* this can happen at init */
- if (radeon_crtc == NULL)
- return;
-
- spin_lock_irqsave(&rdev->ddev->event_lock, flags);
- work = radeon_crtc->flip_work;
- if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
- DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
- "RADEON_FLIP_SUBMITTED(%d)\n",
- radeon_crtc->flip_status,
- RADEON_FLIP_SUBMITTED);
+ if (update_pending) {
+ /* crtc didn't flip in this target vblank interval,
+ * but flip is pending in crtc. It will complete it
+ * in next vblank interval, so complete the flip at
+ * next vblank irq.
+ */
+ radeon_crtc->deferred_flip_completion = 1;
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
- /* Pageflip completed. Clean up. */
- radeon_crtc->flip_status = RADEON_FLIP_NONE;
- radeon_crtc->flip_work = NULL;
+ /* Pageflip (will be) certainly completed in this vblank. Clean up. */
+ radeon_crtc->unpin_work = NULL;
/* wakeup userspace */
if (work->event)
@@ -379,119 +333,9 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
- radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
- queue_work(radeon_crtc->flip_queue, &work->unpin_work);
-}
-
-/**
- * radeon_flip_work_func - page flip framebuffer
- *
- * @work - kernel work item
- *
- * Wait for the buffer object to become idle and do the actual page flip
- */
-static void radeon_flip_work_func(struct work_struct *__work)
-{
- struct radeon_flip_work *work =
- container_of(__work, struct radeon_flip_work, flip_work);
- struct radeon_device *rdev = work->rdev;
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
-
- struct drm_crtc *crtc = &radeon_crtc->base;
- unsigned long flags;
- int r;
- int vpos, hpos, stat, min_udelay = 0;
- unsigned repcnt = 4;
- struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
-
- down_read(&rdev->exclusive_lock);
- if (work->fence) {
- struct radeon_fence *fence;
-
- fence = to_radeon_fence(work->fence);
- if (fence && fence->rdev == rdev) {
- r = radeon_fence_wait(fence, false);
- if (r == -EDEADLK) {
- up_read(&rdev->exclusive_lock);
- do {
- r = radeon_gpu_reset(rdev);
- } while (r == -EAGAIN);
- down_read(&rdev->exclusive_lock);
- }
- } else
- r = fence_wait(work->fence, false);
-
- if (r)
- DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
-
- /* We continue with the page flip even if we failed to wait on
- * the fence, otherwise the DRM core and userspace will be
- * confused about which BO the CRTC is scanning out
- */
-
- fence_put(work->fence);
- work->fence = NULL;
- }
-
- /* We borrow the event spin lock for protecting flip_status */
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
- /* set the proper interrupt */
- radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
-
- /* If this happens to execute within the "virtually extended" vblank
- * interval before the start of the real vblank interval then it needs
- * to delay programming the mmio flip until the real vblank is entered.
- * This prevents completing a flip too early due to the way we fudge
- * our vblank counter and vblank timestamps in order to work around the
- * problem that the hw fires vblank interrupts before actual start of
- * vblank (when line buffer refilling is done for a frame). It
- * complements the fudging logic in radeon_get_crtc_scanoutpos() for
- * timestamping and radeon_get_vblank_counter_kms() for vblank counts.
- *
- * In practice this won't execute very often unless on very fast
- * machines because the time window for this to happen is very small.
- */
- while (radeon_crtc->enabled && --repcnt) {
- /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
- * start in hpos, and to the "fudged earlier" vblank start in
- * vpos.
- */
- stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id,
- GET_DISTANCE_TO_VBLANKSTART,
- &vpos, &hpos, NULL, NULL,
- &crtc->hwmode);
-
- if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
- (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) ||
- !(vpos >= 0 && hpos <= 0))
- break;
-
- /* Sleep at least until estimated real start of hw vblank */
- min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
- if (min_udelay > vblank->framedur_ns / 2000) {
- /* Don't wait ridiculously long - something is wrong */
- repcnt = 0;
- break;
- }
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- usleep_range(min_udelay, 2 * min_udelay);
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- };
-
- if (!repcnt)
- DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
- "framedur %d, linedur %d, stat %d, vpos %d, "
- "hpos %d\n", work->crtc_id, min_udelay,
- vblank->framedur_ns / 1000,
- vblank->linedur_ns / 1000, stat, vpos, hpos);
-
- /* do the flip (mmio) */
- radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
-
- radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- up_read(&rdev->exclusive_lock);
+ radeon_fence_unref(&work->fence);
+ radeon_post_page_flip(work->rdev, work->crtc_id);
+ task_add(systq, &work->task);
}
static int radeon_crtc_page_flip(struct drm_crtc *crtc,
@@ -505,57 +349,69 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct radeon_framebuffer *old_radeon_fb;
struct radeon_framebuffer *new_radeon_fb;
struct drm_gem_object *obj;
- struct radeon_flip_work *work;
- struct radeon_bo *new_rbo;
- uint32_t tiling_flags, pitch_pixels;
- uint64_t base;
+ struct radeon_bo *rbo;
+ struct radeon_unpin_work *work;
unsigned long flags;
+ u32 tiling_flags, pitch_pixels;
+ u64 base;
int r;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
- INIT_WORK(&work->flip_work, radeon_flip_work_func);
- INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
-
+ work->event = event;
work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
- work->event = event;
-
- /* schedule unpin of the old buffer */
old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+ new_radeon_fb = to_radeon_framebuffer(fb);
+ /* schedule unpin of the old buffer */
obj = old_radeon_fb->obj;
-
/* take a reference to the old object */
drm_gem_object_reference(obj);
- work->old_rbo = gem_to_radeon_bo(obj);
-
- new_radeon_fb = to_radeon_framebuffer(fb);
+ rbo = gem_to_radeon_bo(obj);
+ work->old_rbo = rbo;
obj = new_radeon_fb->obj;
- new_rbo = gem_to_radeon_bo(obj);
+ rbo = gem_to_radeon_bo(obj);
+
+ spin_lock(&rbo->tbo.bdev->fence_lock);
+ if (rbo->tbo.sync_obj)
+ work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
+ spin_unlock(&rbo->tbo.bdev->fence_lock);
+
+ task_set(&work->task, radeon_unpin_work_func, work);
+
+ /* We borrow the event spin lock for protecting unpin_work */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (radeon_crtc->unpin_work) {
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+ r = -EBUSY;
+ goto unlock_free;
+ }
+ radeon_crtc->unpin_work = work;
+ radeon_crtc->deferred_flip_completion = 0;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/* pin the new buffer */
- DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
- work->old_rbo, new_rbo);
+ DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
+ work->old_rbo, rbo);
- r = radeon_bo_reserve(new_rbo, false);
+ r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0)) {
DRM_ERROR("failed to reserve new rbo buffer before flip\n");
- goto cleanup;
+ goto pflip_cleanup;
}
/* Only 27 bit offset for legacy CRTC */
- r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
if (unlikely(r != 0)) {
- radeon_bo_unreserve(new_rbo);
+ radeon_bo_unreserve(rbo);
r = -EINVAL;
DRM_ERROR("failed to pin new rbo buffer before flip\n");
- goto cleanup;
+ goto pflip_cleanup;
}
- work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
- radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
- radeon_bo_unreserve(new_rbo);
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
@@ -592,103 +448,52 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
}
base &= ~7;
}
- work->base = base;
-
- r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
- if (r) {
- DRM_ERROR("failed to get vblank before flip\n");
- goto pflip_cleanup;
- }
-
- /* We borrow the event spin lock for protecting flip_work */
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- r = -EBUSY;
- goto vblank_cleanup;
- }
- radeon_crtc->flip_status = RADEON_FLIP_PENDING;
- radeon_crtc->flip_work = work;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ work->new_crtc_base = base;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/* update crtc fb */
crtc->primary->fb = fb;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ r = drm_vblank_get(dev, radeon_crtc->crtc_id);
+ if (r) {
+ DRM_ERROR("failed to get vblank before flip\n");
+ goto pflip_cleanup1;
+ }
- queue_work(radeon_crtc->flip_queue, &work->flip_work);
- return 0;
+ /* set the proper interrupt */
+ radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
-vblank_cleanup:
- drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
+ return 0;
-pflip_cleanup:
- if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
+pflip_cleanup1:
+ if (unlikely(radeon_bo_reserve(rbo, false) != 0)) {
DRM_ERROR("failed to reserve new rbo in error path\n");
- goto cleanup;
+ goto pflip_cleanup;
}
- if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
+ if (unlikely(radeon_bo_unpin(rbo) != 0)) {
DRM_ERROR("failed to unpin new rbo in error path\n");
}
- radeon_bo_unreserve(new_rbo);
+ radeon_bo_unreserve(rbo);
-cleanup:
- drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
- fence_put(work->fence);
+pflip_cleanup:
+ spin_lock_irqsave(&dev->event_lock, flags);
+ radeon_crtc->unpin_work = NULL;
+unlock_free:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
+ radeon_fence_unref(&work->fence);
kfree(work);
+
return r;
}
-static int
-radeon_crtc_set_config(struct drm_mode_set *set)
-{
- struct drm_device *dev;
- struct radeon_device *rdev;
- struct drm_crtc *crtc;
- bool active = false;
- int ret;
-
- if (!set || !set->crtc)
- return -EINVAL;
-
- dev = set->crtc->dev;
-
- ret = pm_runtime_get_sync(dev->dev);
- if (ret < 0)
- return ret;
-
- ret = drm_crtc_helper_set_config(set);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
- if (crtc->enabled)
- active = true;
-
- pm_runtime_mark_last_busy(dev->dev);
-
- rdev = dev->dev_private;
- /* if we have active crtcs and we don't have a power ref,
- take the current one */
- if (active && !rdev->have_disp_power_ref) {
- rdev->have_disp_power_ref = true;
- return ret;
- }
- /* if we have no active crtcs, then drop the power ref
- we got before */
- if (!active && rdev->have_disp_power_ref) {
- pm_runtime_put_autosuspend(dev->dev);
- rdev->have_disp_power_ref = false;
- }
-
- /* drop the power reference we got coming in here */
- pm_runtime_put_autosuspend(dev->dev);
- return ret;
-}
static const struct drm_crtc_funcs radeon_crtc_funcs = {
- .cursor_set2 = radeon_crtc_cursor_set2,
+ .cursor_set = radeon_crtc_cursor_set,
.cursor_move = radeon_crtc_cursor_move,
.gamma_set = radeon_crtc_gamma_set,
- .set_config = radeon_crtc_set_config,
+ .set_config = drm_crtc_helper_set_config,
.destroy = radeon_crtc_destroy,
.page_flip = radeon_crtc_page_flip,
};
@@ -707,19 +512,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
radeon_crtc->crtc_id = index;
- radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc");
rdev->mode_info.crtcs[index] = radeon_crtc;
- if (rdev->family >= CHIP_BONAIRE) {
- radeon_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
- radeon_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
- } else {
- radeon_crtc->max_cursor_width = CURSOR_WIDTH;
- radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
- }
- dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
- dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
-
#if 0
radeon_crtc->mode_set.crtc = &radeon_crtc->base;
radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1);
@@ -739,7 +533,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
}
#ifdef DRMDEBUG
-static const char *encoder_names[38] = {
+static const char *encoder_names[37] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
@@ -776,8 +570,7 @@ static const char *encoder_names[38] = {
"INTERNAL_UNIPHY2",
"NUTMEG",
"TRAVIS",
- "INTERNAL_VCE",
- "INTERNAL_UNIPHY3",
+ "INTERNAL_VCE"
};
static const char *hpd_names[6] = {
@@ -788,10 +581,11 @@ static const char *hpd_names[6] = {
"HPD5",
"HPD6",
};
-#endif
+#endif /* DRMDEBUG */
static void radeon_print_display_setup(struct drm_device *dev)
{
+#ifdef DRMDEBUG
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct drm_encoder *encoder;
@@ -864,6 +658,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
}
i++;
}
+#endif /* DRMDEBUG */
}
static bool radeon_setup_enc_conn(struct drm_device *dev)
@@ -893,230 +688,176 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
return ret;
}
-/* avivo */
-
-/**
- * avivo_reduce_ratio - fractional number reduction
- *
- * @nom: nominator
- * @den: denominator
- * @nom_min: minimum value for nominator
- * @den_min: minimum value for denominator
- *
- * Find the greatest common divisor and apply it on both nominator and
- * denominator, but make nominator and denominator are at least as large
- * as their minimum values.
- */
-static void avivo_reduce_ratio(unsigned *nom, unsigned *den,
- unsigned nom_min, unsigned den_min)
+int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
- unsigned tmp;
-
- /* reduce the numbers to a simpler ratio */
- tmp = gcd(*nom, *den);
- *nom /= tmp;
- *den /= tmp;
-
- /* make sure nominator is large enough */
- if (*nom < nom_min) {
- tmp = DIV_ROUND_UP(nom_min, *nom);
- *nom *= tmp;
- *den *= tmp;
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int ret = 0;
+
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+ ENCODER_OBJECT_ID_NONE) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if (dig->dp_i2c_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &dig->dp_i2c_bus->adapter);
+ } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+ dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &dig->dp_i2c_bus->adapter);
+ else if (radeon_connector->ddc_bus && !radeon_connector->edid)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ } else {
+ if (radeon_connector->ddc_bus && !radeon_connector->edid)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
}
- /* make sure the denominator is large enough */
- if (*den < den_min) {
- tmp = DIV_ROUND_UP(den_min, *den);
- *nom *= tmp;
- *den *= tmp;
+ if (!radeon_connector->edid) {
+ if (rdev->is_atom_bios) {
+ /* some laptops provide a hardcoded edid in rom for LCDs */
+ if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ } else
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ }
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
+ ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
+ drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
+ return ret;
}
+ drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
+ return 0;
}
-/**
- * avivo_get_fb_ref_div - feedback and ref divider calculation
- *
- * @nom: nominator
- * @den: denominator
- * @post_div: post divider
- * @fb_div_max: feedback divider maximum
- * @ref_div_max: reference divider maximum
- * @fb_div: resulting feedback divider
- * @ref_div: resulting reference divider
- *
- * Calculate feedback and reference divider for a given post divider. Makes
- * sure we stay within the limits.
- */
-static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
- unsigned fb_div_max, unsigned ref_div_max,
- unsigned *fb_div, unsigned *ref_div)
+/* avivo */
+static void avivo_get_fb_div(struct radeon_pll *pll,
+ u32 target_clock,
+ u32 post_div,
+ u32 ref_div,
+ u32 *fb_div,
+ u32 *frac_fb_div)
{
- /* limit reference * post divider to a maximum */
- ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
+ u32 tmp = post_div * ref_div;
- /* get matching reference and feedback divider */
- *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
- *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
+ tmp *= target_clock;
+ *fb_div = tmp / pll->reference_freq;
+ *frac_fb_div = tmp % pll->reference_freq;
- /* limit fb divider to its maximum */
- if (*fb_div > fb_div_max) {
- *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
- *fb_div = fb_div_max;
- }
+ if (*fb_div > pll->max_feedback_div)
+ *fb_div = pll->max_feedback_div;
+ else if (*fb_div < pll->min_feedback_div)
+ *fb_div = pll->min_feedback_div;
}
-/**
- * radeon_compute_pll_avivo - compute PLL paramaters
- *
- * @pll: information about the PLL
- * @dot_clock_p: resulting pixel clock
- * fb_div_p: resulting feedback divider
- * frac_fb_div_p: fractional part of the feedback divider
- * ref_div_p: resulting reference divider
- * post_div_p: resulting reference divider
- *
- * Try to calculate the PLL parameters to generate the given frequency:
- * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
- */
-void radeon_compute_pll_avivo(struct radeon_pll *pll,
- u32 freq,
- u32 *dot_clock_p,
- u32 *fb_div_p,
- u32 *frac_fb_div_p,
- u32 *ref_div_p,
- u32 *post_div_p)
+static u32 avivo_get_post_div(struct radeon_pll *pll,
+ u32 target_clock)
{
- unsigned target_clock = pll->flags & RADEON_PLL_USE_FRAC_FB_DIV ?
- freq : freq / 10;
-
- unsigned fb_div_min, fb_div_max, fb_div;
- unsigned post_div_min, post_div_max, post_div;
- unsigned ref_div_min, ref_div_max, ref_div;
- unsigned post_div_best, diff_best;
- unsigned nom, den;
+ u32 vco, post_div, tmp;
- /* determine allowed feedback divider range */
- fb_div_min = pll->min_feedback_div;
- fb_div_max = pll->max_feedback_div;
+ if (pll->flags & RADEON_PLL_USE_POST_DIV)
+ return pll->post_div;
- if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
- fb_div_min *= 10;
- fb_div_max *= 10;
+ if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+ if (pll->flags & RADEON_PLL_IS_LCD)
+ vco = pll->lcd_pll_out_min;
+ else
+ vco = pll->pll_out_min;
+ } else {
+ if (pll->flags & RADEON_PLL_IS_LCD)
+ vco = pll->lcd_pll_out_max;
+ else
+ vco = pll->pll_out_max;
}
- /* determine allowed ref divider range */
- if (pll->flags & RADEON_PLL_USE_REF_DIV)
- ref_div_min = pll->reference_div;
- else
- ref_div_min = pll->min_ref_div;
-
- if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
- pll->flags & RADEON_PLL_USE_REF_DIV)
- ref_div_max = pll->reference_div;
- else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
- /* fix for problems on RS880 */
- ref_div_max = min(pll->max_ref_div, 7u);
- else
- ref_div_max = pll->max_ref_div;
+ post_div = vco / target_clock;
+ tmp = vco % target_clock;
- /* determine allowed post divider range */
- if (pll->flags & RADEON_PLL_USE_POST_DIV) {
- post_div_min = pll->post_div;
- post_div_max = pll->post_div;
+ if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+ if (tmp)
+ post_div++;
} else {
- unsigned vco_min, vco_max;
-
- if (pll->flags & RADEON_PLL_IS_LCD) {
- vco_min = pll->lcd_pll_out_min;
- vco_max = pll->lcd_pll_out_max;
- } else {
- vco_min = pll->pll_out_min;
- vco_max = pll->pll_out_max;
- }
-
- if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
- vco_min *= 10;
- vco_max *= 10;
- }
-
- post_div_min = vco_min / target_clock;
- if ((target_clock * post_div_min) < vco_min)
- ++post_div_min;
- if (post_div_min < pll->min_post_div)
- post_div_min = pll->min_post_div;
-
- post_div_max = vco_max / target_clock;
- if ((target_clock * post_div_max) > vco_max)
- --post_div_max;
- if (post_div_max > pll->max_post_div)
- post_div_max = pll->max_post_div;
+ if (!tmp)
+ post_div--;
}
- /* represent the searched ratio as fractional number */
- nom = target_clock;
- den = pll->reference_freq;
+ if (post_div > pll->max_post_div)
+ post_div = pll->max_post_div;
+ else if (post_div < pll->min_post_div)
+ post_div = pll->min_post_div;
- /* reduce the numbers to a simpler ratio */
- avivo_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
+ return post_div;
+}
- /* now search for a post divider */
- if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
- post_div_best = post_div_min;
- else
- post_div_best = post_div_max;
- diff_best = ~0;
+#define MAX_TOLERANCE 10
- for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
- unsigned diff;
- avivo_get_fb_ref_div(nom, den, post_div, fb_div_max,
- ref_div_max, &fb_div, &ref_div);
- diff = abs(target_clock - (pll->reference_freq * fb_div) /
- (ref_div * post_div));
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+ u32 freq,
+ u32 *dot_clock_p,
+ u32 *fb_div_p,
+ u32 *frac_fb_div_p,
+ u32 *ref_div_p,
+ u32 *post_div_p)
+{
+ u32 target_clock = freq / 10;
+ u32 post_div = avivo_get_post_div(pll, target_clock);
+ u32 ref_div = pll->min_ref_div;
+ u32 fb_div = 0, frac_fb_div = 0, tmp;
- if (diff < diff_best || (diff == diff_best &&
- !(pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP))) {
+ if (pll->flags & RADEON_PLL_USE_REF_DIV)
+ ref_div = pll->reference_div;
- post_div_best = post_div;
- diff_best = diff;
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
+ frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
+ if (frac_fb_div >= 5) {
+ frac_fb_div -= 5;
+ frac_fb_div = frac_fb_div / 10;
+ frac_fb_div++;
}
- }
- post_div = post_div_best;
-
- /* get the feedback and reference divider for the optimal value */
- avivo_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
- &fb_div, &ref_div);
-
- /* reduce the numbers to a simpler ratio once more */
- /* this also makes sure that the reference divider is large enough */
- avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
-
- /* avoid high jitter with small fractional dividers */
- if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
- fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50);
- if (fb_div < fb_div_min) {
- unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
- fb_div *= tmp;
- ref_div *= tmp;
+ if (frac_fb_div >= 10) {
+ fb_div++;
+ frac_fb_div = 0;
}
- }
-
- /* and finally save the result */
- if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
- *fb_div_p = fb_div / 10;
- *frac_fb_div_p = fb_div % 10;
} else {
- *fb_div_p = fb_div;
- *frac_fb_div_p = 0;
+ while (ref_div <= pll->max_ref_div) {
+ avivo_get_fb_div(pll, target_clock, post_div, ref_div,
+ &fb_div, &frac_fb_div);
+ if (frac_fb_div >= (pll->reference_freq / 2))
+ fb_div++;
+ frac_fb_div = 0;
+ tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
+ tmp = (tmp * 10000) / target_clock;
+
+ if (tmp > (10000 + MAX_TOLERANCE))
+ ref_div++;
+ else if (tmp >= (10000 - MAX_TOLERANCE))
+ break;
+ else
+ ref_div++;
+ }
}
- *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
- (pll->reference_freq * *frac_fb_div_p)) /
- (ref_div * post_div * 10);
+ *dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
+ (ref_div * post_div * 10);
+ *fb_div_p = fb_div;
+ *frac_fb_div_p = frac_fb_div;
*ref_div_p = ref_div;
*post_div_p = post_div;
-
- DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
- freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
- ref_div, post_div);
+ DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+ *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
}
/* pre-avivo */
@@ -1126,7 +867,7 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
n += d / 2;
- mod = do_div(n, d);
+ mod = (n /= d);
return n;
}
@@ -1348,12 +1089,12 @@ radeon_framebuffer_init(struct drm_device *dev,
{
int ret;
rfb->obj = obj;
- drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
if (ret) {
rfb->obj = NULL;
return ret;
}
+ drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
return 0;
}
@@ -1373,14 +1114,6 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
-#ifdef notyet
- /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
- if (obj->import_attach) {
- DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
- return ERR_PTR(-EINVAL);
- }
-#endif
-
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
@@ -1430,25 +1163,6 @@ static struct drm_prop_enum_list radeon_underscan_enum_list[] =
{ UNDERSCAN_AUTO, "auto" },
};
-static struct drm_prop_enum_list radeon_audio_enum_list[] =
-{ { RADEON_AUDIO_DISABLE, "off" },
- { RADEON_AUDIO_ENABLE, "on" },
- { RADEON_AUDIO_AUTO, "auto" },
-};
-
-/* XXX support different dither options? spatial, temporal, both, etc. */
-static struct drm_prop_enum_list radeon_dither_enum_list[] =
-{ { RADEON_FMT_DITHER_DISABLE, "off" },
- { RADEON_FMT_DITHER_ENABLE, "on" },
-};
-
-static struct drm_prop_enum_list radeon_output_csc_enum_list[] =
-{ { RADEON_OUTPUT_CSC_BYPASS, "bypass" },
- { RADEON_OUTPUT_CSC_TVRGB, "tvrgb" },
- { RADEON_OUTPUT_CSC_YCBCR601, "ycbcr601" },
- { RADEON_OUTPUT_CSC_YCBCR709, "ycbcr709" },
-};
-
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int sz;
@@ -1499,24 +1213,6 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
- sz = ARRAY_SIZE(radeon_audio_enum_list);
- rdev->mode_info.audio_property =
- drm_property_create_enum(rdev->ddev, 0,
- "audio",
- radeon_audio_enum_list, sz);
-
- sz = ARRAY_SIZE(radeon_dither_enum_list);
- rdev->mode_info.dither_property =
- drm_property_create_enum(rdev->ddev, 0,
- "dither",
- radeon_dither_enum_list, sz);
-
- sz = ARRAY_SIZE(radeon_output_csc_enum_list);
- rdev->mode_info.output_csc_property =
- drm_property_create_enum(rdev->ddev, 0,
- "output_csc",
- radeon_output_csc_enum_list, sz);
-
return 0;
}
@@ -1550,41 +1246,41 @@ static void radeon_afmt_init(struct radeon_device *rdev)
for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
rdev->mode_info.afmt[i] = NULL;
- if (ASIC_IS_NODCE(rdev)) {
- /* nothing to do */
+ if (ASIC_IS_DCE6(rdev)) {
+ /* todo */
} else if (ASIC_IS_DCE4(rdev)) {
- static uint32_t eg_offsets[] = {
- EVERGREEN_CRTC0_REGISTER_OFFSET,
- EVERGREEN_CRTC1_REGISTER_OFFSET,
- EVERGREEN_CRTC2_REGISTER_OFFSET,
- EVERGREEN_CRTC3_REGISTER_OFFSET,
- EVERGREEN_CRTC4_REGISTER_OFFSET,
- EVERGREEN_CRTC5_REGISTER_OFFSET,
- 0x13830 - 0x7030,
- };
- int num_afmt;
-
- /* DCE8 has 7 audio blocks tied to DIG encoders */
- /* DCE6 has 6 audio blocks tied to DIG encoders */
/* DCE4/5 has 6 audio blocks tied to DIG encoders */
/* DCE4.1 has 2 audio blocks tied to DIG encoders */
- if (ASIC_IS_DCE8(rdev))
- num_afmt = 7;
- else if (ASIC_IS_DCE6(rdev))
- num_afmt = 6;
- else if (ASIC_IS_DCE5(rdev))
- num_afmt = 6;
- else if (ASIC_IS_DCE41(rdev))
- num_afmt = 2;
- else /* DCE4 */
- num_afmt = 6;
-
- BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets));
- for (i = 0; i < num_afmt; i++) {
- rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[i]) {
- rdev->mode_info.afmt[i]->offset = eg_offsets[i];
- rdev->mode_info.afmt[i]->id = i;
+ rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[0]) {
+ rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+ rdev->mode_info.afmt[0]->id = 0;
+ }
+ rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[1]) {
+ rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+ rdev->mode_info.afmt[1]->id = 1;
+ }
+ if (!ASIC_IS_DCE41(rdev)) {
+ rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[2]) {
+ rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+ rdev->mode_info.afmt[2]->id = 2;
+ }
+ rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[3]) {
+ rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+ rdev->mode_info.afmt[3]->id = 3;
+ }
+ rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[4]) {
+ rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+ rdev->mode_info.afmt[4]->id = 4;
+ }
+ rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[5]) {
+ rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+ rdev->mode_info.afmt[5]->id = 5;
}
}
} else if (ASIC_IS_DCE3(rdev)) {
@@ -1690,12 +1386,12 @@ int radeon_modeset_init(struct radeon_device *rdev)
/* setup afmt */
radeon_afmt_init(rdev);
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+
radeon_fbdev_init(rdev);
drm_kms_helper_poll_init(rdev->ddev);
- /* do pm late init */
- ret = radeon_pm_late_init(rdev);
-
return 0;
}
@@ -1703,6 +1399,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
{
radeon_fbdev_fini(rdev);
kfree(rdev->mode_info.bios_hardcoded_edid);
+ radeon_pm_fini(rdev);
if (rdev->mode_info.mode_config_initialized) {
radeon_afmt_fini(rdev);
@@ -1775,7 +1472,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
(!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
- drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
+ drm_detect_hdmi_monitor(radeon_connector->edid) &&
is_hdtv_mode(mode)))) {
if (radeon_encoder->underscan_hborder != 0)
radeon_crtc->h_border = radeon_encoder->underscan_hborder;
@@ -1821,27 +1518,12 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
}
/*
- * Retrieve current video scanout position of crtc on a given gpu, and
- * an optional accurate timestamp of when query happened.
+ * Retrieve current video scanout position of crtc on a given gpu.
*
* \param dev Device to query.
* \param crtc Crtc to query.
- * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
- * For driver internal use only also supports these flags:
- *
- * USE_REAL_VBLANKSTART to use the real start of vblank instead
- * of a fudged earlier start of vblank.
- *
- * GET_DISTANCE_TO_VBLANKSTART to return distance to the
- * fudged earlier start of vblank in *vpos and the distance
- * to true start of vblank in *hpos.
- *
* \param *vpos Location where vertical scanout position should be stored.
* \param *hpos Location where horizontal scanout position should go.
- * \param *stime Target location for timestamp taken immediately before
- * scanout position query. Can be NULL to skip timestamp.
- * \param *etime Target location for timestamp taken immediately after
- * scanout position query. Can be NULL to skip timestamp.
*
* Returns vpos as a positive number while in active scanout area.
* Returns vpos as a negative number inside vblank, counting the number
@@ -1857,10 +1539,8 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* unknown small number of scanlines wrt. real scanout position.
*
*/
-int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- unsigned int flags, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc, unsigned int flags,
+ int *vpos, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode)
{
u32 stat_crtc = 0, vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
@@ -1875,42 +1555,42 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
*stime = ktime_get();
if (ASIC_IS_DCE4(rdev)) {
- if (pipe == 0) {
+ if (crtc == 0) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC0_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC0_REGISTER_OFFSET);
ret |= DRM_SCANOUTPOS_VALID;
}
- if (pipe == 1) {
+ if (crtc == 1) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC1_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC1_REGISTER_OFFSET);
ret |= DRM_SCANOUTPOS_VALID;
}
- if (pipe == 2) {
+ if (crtc == 2) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC2_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC2_REGISTER_OFFSET);
ret |= DRM_SCANOUTPOS_VALID;
}
- if (pipe == 3) {
+ if (crtc == 3) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC3_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC3_REGISTER_OFFSET);
ret |= DRM_SCANOUTPOS_VALID;
}
- if (pipe == 4) {
+ if (crtc == 4) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC4_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
EVERGREEN_CRTC4_REGISTER_OFFSET);
ret |= DRM_SCANOUTPOS_VALID;
}
- if (pipe == 5) {
+ if (crtc == 5) {
vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
EVERGREEN_CRTC5_REGISTER_OFFSET);
position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
@@ -1918,19 +1598,19 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
ret |= DRM_SCANOUTPOS_VALID;
}
} else if (ASIC_IS_AVIVO(rdev)) {
- if (pipe == 0) {
+ if (crtc == 0) {
vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
ret |= DRM_SCANOUTPOS_VALID;
}
- if (pipe == 1) {
+ if (crtc == 1) {
vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
ret |= DRM_SCANOUTPOS_VALID;
}
} else {
/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
- if (pipe == 0) {
+ if (crtc == 0) {
/* Assume vbl_end == 0, get vbl_start from
* upper 16 bits.
*/
@@ -1944,7 +1624,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
ret |= DRM_SCANOUTPOS_VALID;
}
- if (pipe == 1) {
+ if (crtc == 1) {
vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
RADEON_CRTC_V_DISP) >> RADEON_CRTC_V_DISP_SHIFT;
position = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
@@ -1975,44 +1655,14 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
}
else {
/* No: Fake something reasonable which gives at least ok results. */
- vbl_start = mode->crtc_vdisplay;
+ vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
vbl_end = 0;
}
- /* Called from driver internal vblank counter query code? */
- if (flags & GET_DISTANCE_TO_VBLANKSTART) {
- /* Caller wants distance from real vbl_start in *hpos */
- *hpos = *vpos - vbl_start;
- }
-
- /* Fudge vblank to start a few scanlines earlier to handle the
- * problem that vblank irqs fire a few scanlines before start
- * of vblank. Some driver internal callers need the true vblank
- * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
- *
- * The cause of the "early" vblank irq is that the irq is triggered
- * by the line buffer logic when the line buffer read position enters
- * the vblank, whereas our crtc scanout position naturally lags the
- * line buffer read position.
- */
- if (!(flags & USE_REAL_VBLANKSTART))
- vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
-
/* Test scanout position against vblank region. */
if ((*vpos < vbl_start) && (*vpos >= vbl_end))
in_vbl = false;
- /* In vblank? */
- if (in_vbl)
- ret |= DRM_SCANOUTPOS_IN_VBLANK;
-
- /* Called from driver internal vblank counter query code? */
- if (flags & GET_DISTANCE_TO_VBLANKSTART) {
- /* Caller wants distance from fudged earlier vbl_start */
- *vpos -= vbl_start;
- return ret;
- }
-
/* Check if inside vblank area and apply corrective offsets:
* vpos will then be >=0 in video scanout area, but negative
* within vblank area, counting down the number of lines until
@@ -2021,12 +1671,16 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
- vtotal = mode->crtc_vtotal;
+ vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
*vpos = *vpos - vtotal;
}
/* Correct for shifted end of vbl at vbl_end. */
*vpos = *vpos - vbl_end;
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
+
return ret;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_encoders.c b/sys/dev/pci/drm/radeon/radeon_encoders.c
index b1fb98e1e93..c3f9b6e0c0d 100644
--- a/sys/dev/pci/drm/radeon/radeon_encoders.c
+++ b/sys/dev/pci/drm/radeon/radeon_encoders.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_encoders.c,v 1.4 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -158,48 +159,10 @@ radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8
return ret;
}
-static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
- struct drm_connector *connector)
-{
- struct drm_device *dev = radeon_encoder->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- bool use_bl = false;
-
- if (!(radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)))
- return;
-
- if (radeon_backlight == 0) {
- return;
- } else if (radeon_backlight == 1) {
- use_bl = true;
- } else if (radeon_backlight == -1) {
- /* Quirks */
- /* Amilo Xi 2550 only works with acpi bl */
- if ((rdev->pdev->device == 0x9583) &&
- (rdev->pdev->subsystem_vendor == 0x1734) &&
- (rdev->pdev->subsystem_device == 0x1107))
- use_bl = false;
-/* Older PPC macs use on-GPU backlight controller */
-#ifndef CONFIG_PPC_PMAC
- /* disable native backlight control on older asics */
- else if (rdev->family < CHIP_R600)
- use_bl = false;
-#endif
- else
- use_bl = true;
- }
-
- if (use_bl) {
- if (rdev->is_atom_bios)
- radeon_atom_backlight_init(radeon_encoder, connector);
- else
- radeon_legacy_backlight_init(radeon_encoder, connector);
- }
-}
-
void
radeon_link_encoder_connector(struct drm_device *dev)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct drm_encoder *encoder;
@@ -212,8 +175,13 @@ radeon_link_encoder_connector(struct drm_device *dev)
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & radeon_connector->devices) {
drm_mode_connector_attach_encoder(connector, encoder);
- if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- radeon_encoder_add_backlight(radeon_encoder, connector);
+ if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ if (rdev->is_atom_bios)
+ radeon_atom_backlight_init(radeon_encoder, connector);
+ else
+ radeon_legacy_backlight_init(radeon_encoder, connector);
+ rdev->mode_info.bl_encoder = radeon_encoder;
+ }
}
}
}
@@ -246,16 +214,7 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
- if (radeon_encoder->is_mst_encoder) {
- struct radeon_encoder_mst *mst_enc;
-
- if (!radeon_connector->is_mst_connector)
- continue;
-
- mst_enc = radeon_encoder->enc_priv;
- if (mst_enc->connector == radeon_connector->mst_port)
- return connector;
- } else if (radeon_encoder->active_device & radeon_connector->devices)
+ if (radeon_encoder->active_device & radeon_connector->devices)
return connector;
}
return NULL;
@@ -385,7 +344,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
@@ -401,16 +360,13 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_DisplayPort:
- if (radeon_connector->is_mst_connector)
- return false;
-
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
@@ -427,24 +383,3 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
}
}
-bool radeon_encoder_is_digital(struct drm_encoder *encoder)
-{
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_LVDS:
- case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- case ENCODER_OBJECT_ID_INTERNAL_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- return true;
- default:
- return false;
- }
-}
diff --git a/sys/dev/pci/drm/radeon/radeon_family.h b/sys/dev/pci/drm/radeon/radeon_family.h
index 4b7b87f71a6..a56e9759ed4 100644
--- a/sys/dev/pci/drm/radeon/radeon_family.h
+++ b/sys/dev/pci/drm/radeon/radeon_family.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_family.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -91,13 +92,6 @@ enum radeon_family {
CHIP_TAHITI,
CHIP_PITCAIRN,
CHIP_VERDE,
- CHIP_OLAND,
- CHIP_HAINAN,
- CHIP_BONAIRE,
- CHIP_KAVERI,
- CHIP_KABINI,
- CHIP_HAWAII,
- CHIP_MULLINS,
CHIP_LAST,
};
@@ -116,7 +110,6 @@ enum radeon_chip_flags {
RADEON_NEW_MEMMAP = 0x00400000UL,
RADEON_IS_PCI = 0x00800000UL,
RADEON_IS_IGPGART = 0x01000000UL,
- RADEON_IS_PX = 0x02000000UL,
};
#endif
diff --git a/sys/dev/pci/drm/radeon/radeon_fb.c b/sys/dev/pci/drm/radeon/radeon_fb.c
index 2d4f221c0fc..e94dddbd651 100644
--- a/sys/dev/pci/drm/radeon/radeon_fb.c
+++ b/sys/dev/pci/drm/radeon/radeon_fb.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_fb.c,v 1.12 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright © 2007 David Airlie
*
@@ -32,7 +33,6 @@
#include <dev/pci/drm/drm_fb_helper.h>
-
/* object hierarchy -
this contains a helper + a radeon fb
the helper contains a pointer to radeon framebuffer baseclass.
@@ -49,9 +49,9 @@ static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = drm_fb_helper_cfb_fillrect,
- .fb_copyarea = drm_fb_helper_cfb_copyarea,
- .fb_imageblit = drm_fb_helper_cfb_imageblit,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
@@ -60,6 +60,7 @@ static struct fb_ops radeonfb_ops = {
};
#endif
+void radeondrm_burner_cb(void *);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
@@ -122,10 +123,11 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
if (rdev->family >= CHIP_R600)
height = roundup2(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
- aligned_size = roundup2(size, PAGE_SIZE);
+ aligned_size = PAGE_ALIGN(size);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
- 0, true, &gobj);
+ false, true,
+ &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
@@ -184,11 +186,9 @@ out_unref:
return ret;
}
-static int radeonfb_create(struct drm_fb_helper *helper,
+static int radeonfb_create(struct radeon_fbdev *rfbdev,
struct drm_fb_helper_surface_size *sizes)
{
- struct radeon_fbdev *rfbdev =
- container_of(helper, struct radeon_fbdev, helper);
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct rasops_info *ri = &rdev->ro;
@@ -196,8 +196,13 @@ static int radeonfb_create(struct drm_fb_helper *helper,
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
+#if 0
+ struct device *device = &rdev->pdev->dev;
+#endif
int ret;
+#if 0
unsigned long tmp;
+#endif
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
@@ -218,9 +223,9 @@ static int radeonfb_create(struct drm_fb_helper *helper,
rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
+ info = framebuffer_alloc(0, device);
+ if (info == NULL) {
+ ret = -ENOMEM;
goto out_unref;
}
@@ -228,28 +233,27 @@ static int radeonfb_create(struct drm_fb_helper *helper,
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
- DRM_ERROR("failed to initialize framebuffer %d\n", ret);
- goto out_destroy_fbi;
+ DRM_ERROR("failed to initalise framebuffer %d\n", ret);
+ goto out_unref;
}
fb = &rfbdev->rfb.base;
/* setup helper */
rfbdev->helper.fb = fb;
+ rfbdev->helper.fbdev = info;
+#ifdef notyet
memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
-#ifdef __linux__
strcpy(info->fix.id, "radeondrmfb");
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
-#endif
tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
-#ifdef __linux__
info->fix.smem_start = rdev->mc.aper_base + tmp;
info->fix.smem_len = radeon_bo_size(rbo);
info->screen_base = rbo->kptr;
@@ -258,24 +262,37 @@ static int radeonfb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_unref;
+ }
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
-#endif
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-#ifdef __linux__
if (info->screen_base == NULL) {
ret = -ENOSPC;
- goto out_destroy_fbi;
+ goto out_unref;
+ }
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_unref;
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
#endif
+#ifdef DRMDEBUG
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
- DRM_INFO("fb depth is %d\n", fb->depth);
+ DRM_INFO("fb depth is %d bpp is %d\n", fb->depth, fb->bits_per_pixel);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
+#endif
+
+ memset(rbo->kptr, 0x0, radeon_bo_size(rbo));
ri->ri_bits = rbo->kptr;
ri->ri_depth = fb->bits_per_pixel;
@@ -302,57 +319,76 @@ static int radeonfb_create(struct drm_fb_helper *helper,
break;
}
-#ifdef __linux__
+#if 0
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
#endif
return 0;
-out_destroy_fbi:
-#ifdef __linux__
- drm_fb_helper_release_fbi(helper);
-#endif
out_unref:
if (rbo) {
}
if (fb && ret) {
drm_gem_object_unreference(gobj);
- drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
return ret;
}
+static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = radeonfb_create(rfbdev, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
-#ifdef notyet
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
+ printf("%s stub\n", __func__);
+ return -ENOSYS;
+#ifdef notyet
+ struct fb_info *info;
struct radeon_framebuffer *rfb = &rfbdev->rfb;
- drm_fb_helper_unregister_fbi(&rfbdev->helper);
- drm_fb_helper_release_fbi(&rfbdev->helper);
+ if (rfbdev->helper.fbdev) {
+ info = rfbdev->helper.fbdev;
+
+ unregister_framebuffer(info);
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
if (rfb->obj) {
radeonfb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL;
}
drm_fb_helper_fini(&rfbdev->helper);
- drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
return 0;
-}
#endif
+}
-static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
- .fb_probe = radeonfb_create,
+ .fb_probe = radeon_fb_find_or_create_single,
};
int radeon_fbdev_init(struct radeon_device *rdev)
@@ -378,27 +414,38 @@ int radeon_fbdev_init(struct radeon_device *rdev)
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
rdev->num_crtc,
RADEONFB_CONN_LIMIT);
- if (ret)
- goto free;
-
- ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
- if (ret)
- goto fini;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(rdev->ddev);
+ if (ret) {
+ kfree(rfbdev);
+ return ret;
+ }
- ret = drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
- if (ret)
- goto fini;
+ task_set(&rdev->burner_task, radeondrm_burner_cb, rdev);
+ drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
+#ifdef __sparc64__
+{
+ struct drm_fb_helper *fb_helper = &rfbdev->helper;
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ struct drm_cmdline_mode *mode;
+ struct drm_connector *connector;
+
+ fb_helper_conn = fb_helper->connector_info[i];
+ connector = fb_helper_conn->connector;
+ mode = &connector->cmdline_mode;
+
+ mode->specified = true;
+ mode->xres = rdev->sf.sf_width;
+ mode->yres = rdev->sf.sf_height;
+ mode->bpp_specified = true;
+ mode->bpp = rdev->sf.sf_depth;
+ }
+}
+#endif
+ drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
-
-fini:
- drm_fb_helper_fini(&rfbdev->helper);
-free:
- kfree(rfbdev);
- return ret;
}
void radeon_fbdev_fini(struct radeon_device *rdev)
@@ -406,9 +453,9 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
if (!rdev->mode_info.rfbdev)
return;
-#ifdef notyet
+ task_del(systq, &rdev->burner_task);
+
radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
-#endif
kfree(rdev->mode_info.rfbdev);
rdev->mode_info.rfbdev = NULL;
}
@@ -420,37 +467,21 @@ void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
#endif
}
-bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+int radeon_fbdev_total_size(struct radeon_device *rdev)
{
- if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
- return true;
- return false;
-}
+ struct radeon_bo *robj;
+ int size = 0;
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
- drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
+ size += radeon_bo_size(robj);
+ return size;
}
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
- drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-}
-
-void radeon_fbdev_restore_mode(struct radeon_device *rdev)
+bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
- struct radeon_fbdev *rfbdev = rdev->mode_info.rfbdev;
- struct drm_fb_helper *fb_helper;
- int ret;
-
- if (!rfbdev)
- return;
-
- fb_helper = &rfbdev->helper;
-
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (ret)
- DRM_DEBUG("failed to restore crtc mode\n");
+ if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
+ return true;
+ return false;
}
void
diff --git a/sys/dev/pci/drm/radeon/radeon_fence.c b/sys/dev/pci/drm/radeon/radeon_fence.c
index 6fd9cf59975..5c2cae52499 100644
--- a/sys/dev/pci/drm/radeon/radeon_fence.c
+++ b/sys/dev/pci/drm/radeon/radeon_fence.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_fence.c,v 1.12 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
@@ -28,7 +29,6 @@
* Jerome Glisse <glisse@freedesktop.org>
* Dave Airlie
*/
-#include <dev/pci/drm/drm_linux.h>
#include <dev/pci/drm/drmP.h>
#include "radeon_reg.h"
#include "radeon.h"
@@ -58,9 +58,7 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{
struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
- if (drv->cpu_addr) {
- *drv->cpu_addr = cpu_to_le32(seq);
- }
+ *drv->cpu_addr = cpu_to_le32(seq);
} else {
WREG32(drv->scratch_reg, seq);
}
@@ -81,11 +79,7 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
u32 seq = 0;
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
- if (drv->cpu_addr) {
- seq = le32_to_cpu(*drv->cpu_addr);
- } else {
- seq = lower_32_bits(atomic64_read(&drv->last_seq));
- }
+ seq = le32_to_cpu(*drv->cpu_addr);
} else {
seq = RREG32(drv->scratch_reg);
}
@@ -93,25 +87,6 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
}
/**
- * radeon_fence_schedule_check - schedule lockup check
- *
- * @rdev: radeon_device pointer
- * @ring: ring index we should work with
- *
- * Queues a delayed work item to check for lockups.
- */
-static void radeon_fence_schedule_check(struct radeon_device *rdev, int ring)
-{
- /*
- * Do not reset the timer here with mod_delayed_work,
- * this can livelock in an interaction with TTM delayed destroy.
- */
- queue_delayed_work(system_power_efficient_wq,
- &rdev->fence_drv[ring].lockup_work,
- RADEON_FENCE_JIFFIES_TIMEOUT);
-}
-
-/**
* radeon_fence_emit - emit a fence on the requested ring
*
* @rdev: radeon_device pointer
@@ -125,71 +100,30 @@ int radeon_fence_emit(struct radeon_device *rdev,
struct radeon_fence **fence,
int ring)
{
- u64 seq = ++rdev->fence_drv[ring].sync_seq[ring];
-
/* we are protected by the ring emission mutex */
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
+ kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
- (*fence)->seq = seq;
+ (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
- (*fence)->is_vm_update = false;
- fence_init(&(*fence)->base, &radeon_fence_ops,
- &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
radeon_fence_ring_emit(rdev, ring, *fence);
- trace_radeon_fence_emit(rdev->ddev, ring, (*fence)->seq);
- radeon_fence_schedule_check(rdev, ring);
- return 0;
-}
-
-/**
- * radeon_fence_check_signaled - callback from fence_queue
- *
- * this function is called with fence_queue lock held, which is also used
- * for the fence locking itself, so unlocked variants are used for
- * fence_signal, and remove_wait_queue.
- */
-static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
-{
- struct radeon_fence *fence;
- u64 seq;
-
- fence = container_of(wait, struct radeon_fence, fence_wake);
-
- /*
- * We cannot use radeon_fence_process here because we're already
- * in the waitqueue, in a call from wake_up_all.
- */
- seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
- if (seq >= fence->seq) {
- int ret = fence_signal_locked(&fence->base);
-
- if (!ret)
- FENCE_TRACE(&fence->base, "signaled from irq context\n");
- else
- FENCE_TRACE(&fence->base, "was already signaled\n");
-
- radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
- __remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
- fence_put(&fence->base);
- } else
- FENCE_TRACE(&fence->base, "pending\n");
+ trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
return 0;
}
/**
- * radeon_fence_activity - check for fence activity
+ * radeon_fence_process - process a fence
*
* @rdev: radeon_device pointer
* @ring: ring index the fence is associated with
*
- * Checks the current fence value and calculates the last
- * signalled fence value. Returns true if activity occured
- * on the ring, and the fence_queue should be waken up.
+ * Checks the current fence value and wakes the fence queue
+ * if the sequence number has increased (all asics).
*/
-static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
+void radeon_fence_process(struct radeon_device *rdev, int ring)
{
uint64_t seq, last_seq, last_emitted;
unsigned count_loop = 0;
@@ -245,87 +179,35 @@ static bool radeon_fence_activity(struct radeon_device *rdev, int ring)
}
} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
- if (seq < last_emitted)
- radeon_fence_schedule_check(rdev, ring);
-
- return wake;
-}
-
-/**
- * radeon_fence_check_lockup - check for hardware lockup
- *
- * @work: delayed work item
- *
- * Checks for fence activity and if there is none probe
- * the hardware if a lockup occured.
- */
-static void radeon_fence_check_lockup(struct work_struct *work)
-{
- struct radeon_fence_driver *fence_drv;
- struct radeon_device *rdev;
- int ring;
-
- fence_drv = container_of(work, struct radeon_fence_driver,
- lockup_work.work);
- rdev = fence_drv->rdev;
- ring = fence_drv - &rdev->fence_drv[0];
-
- if (!down_read_trylock(&rdev->exclusive_lock)) {
- /* just reschedule the check if a reset is going on */
- radeon_fence_schedule_check(rdev, ring);
- return;
- }
-
- if (fence_drv->delayed_irq && rdev->ddev->irq_enabled) {
- unsigned long irqflags;
-
- fence_drv->delayed_irq = false;
- spin_lock_irqsave(&rdev->irq.lock, irqflags);
- radeon_irq_set(rdev);
- spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
- }
-
- if (radeon_fence_activity(rdev, ring))
- wake_up_all(&rdev->fence_queue);
-
- else if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
-
- /* good news we believe it's a lockup */
- dev_warn(rdev->dev, "GPU lockup (current fence id "
- "0x%016llx last fence id 0x%016llx on ring %d)\n",
- (uint64_t)atomic64_read(&fence_drv->last_seq),
- fence_drv->sync_seq[ring], ring);
-
- /* remember that we need an reset */
- rdev->needs_reset = true;
+ if (wake) {
+ rdev->fence_drv[ring].last_activity = jiffies;
wake_up_all(&rdev->fence_queue);
}
- up_read(&rdev->exclusive_lock);
}
/**
- * radeon_fence_process - process a fence
+ * radeon_fence_destroy - destroy a fence
*
- * @rdev: radeon_device pointer
- * @ring: ring index the fence is associated with
+ * @kref: fence kref
*
- * Checks the current fence value and wakes the fence queue
- * if the sequence number has increased (all asics).
+ * Frees the fence object (all asics).
*/
-void radeon_fence_process(struct radeon_device *rdev, int ring)
+static void radeon_fence_destroy(struct kref *kref)
{
- if (radeon_fence_activity(rdev, ring))
- wake_up_all(&rdev->fence_queue);
+ struct radeon_fence *fence;
+
+ fence = container_of(kref, struct radeon_fence, kref);
+ kfree(fence);
}
/**
- * radeon_fence_seq_signaled - check if a fence sequence number has signaled
+ * radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
*
* @rdev: radeon device pointer
* @seq: sequence number
* @ring: ring index the fence is associated with
*
- * Check if the last signaled fence sequnce number is >= the requested
+ * Check if the last singled fence sequnce number is >= the requested
* sequence number (all asics).
* Returns true if the fence has signaled (current fence value
* is >= requested value) or false if it has not (current fence
@@ -346,215 +228,311 @@ static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
return false;
}
-static bool radeon_fence_is_signaled(struct fence *f)
+/**
+ * radeon_fence_signaled - check if a fence has signaled
+ *
+ * @fence: radeon fence object
+ *
+ * Check if the requested fence has signaled (all asics).
+ * Returns true if the fence has signaled or false if it has not.
+ */
+bool radeon_fence_signaled(struct radeon_fence *fence)
{
- struct radeon_fence *fence = to_radeon_fence(f);
- struct radeon_device *rdev = fence->rdev;
- unsigned ring = fence->ring;
- u64 seq = fence->seq;
-
- if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
+ if (!fence) {
return true;
}
-
- if (down_read_trylock(&rdev->exclusive_lock)) {
- radeon_fence_process(rdev, ring);
- up_read(&rdev->exclusive_lock);
-
- if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
- return true;
- }
+ if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ return true;
+ }
+ if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return true;
}
return false;
}
/**
- * radeon_fence_enable_signaling - enable signalling on fence
- * @fence: fence
+ * radeon_fence_wait_seq - wait for a specific sequence number
+ *
+ * @rdev: radeon device pointer
+ * @target_seq: sequence number we want to wait for
+ * @ring: ring index the fence is associated with
+ * @intr: use interruptable sleep
+ * @lock_ring: whether the ring should be locked or not
*
- * This function is called with fence_queue lock held, and adds a callback
- * to fence_queue that checks if this fence is signaled, and if so it
- * signals the fence and removes itself.
+ * Wait for the requested sequence number to be written (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the sequence number. Helper function
+ * for radeon_fence_wait(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
+ * -EDEADLK is returned when a GPU lockup has been detected and the ring is
+ * marked as not ready so no further jobs get scheduled until a successful
+ * reset.
*/
-static bool radeon_fence_enable_signaling(struct fence *f)
+static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
+ unsigned ring, bool intr, bool lock_ring)
{
- struct radeon_fence *fence = to_radeon_fence(f);
- struct radeon_device *rdev = fence->rdev;
-
- if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq)
- return false;
-
- if (down_read_trylock(&rdev->exclusive_lock)) {
- radeon_irq_kms_sw_irq_get(rdev, fence->ring);
-
- if (radeon_fence_activity(rdev, fence->ring))
- wake_up_all_locked(&rdev->fence_queue);
+ unsigned long timeout, last_activity;
+ uint64_t seq;
+ unsigned i;
+ bool signaled;
+ int r, error;
- /* did fence get signaled after we enabled the sw irq? */
- if (atomic64_read(&rdev->fence_drv[fence->ring].last_seq) >= fence->seq) {
- radeon_irq_kms_sw_irq_put(rdev, fence->ring);
- up_read(&rdev->exclusive_lock);
- return false;
+ while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ if (!rdev->ring[ring].ready) {
+ return -EBUSY;
}
- up_read(&rdev->exclusive_lock);
- } else {
- /* we're probably in a lockup, lets not fiddle too much */
- if (radeon_irq_kms_sw_irq_get_delayed(rdev, fence->ring))
- rdev->fence_drv[fence->ring].delayed_irq = true;
- radeon_fence_schedule_check(rdev, fence->ring);
+ timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = rdev->fence_drv[ring].last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
+ */
+ timeout = 1;
+ }
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
+ /* Save current last activity valuee, used to check for GPU lockups */
+ last_activity = rdev->fence_drv[ring].last_activity;
+
+ trace_radeon_fence_wait_begin(rdev->ddev, seq);
+ radeon_irq_kms_sw_irq_get(rdev, ring);
+ r = timeout;
+ while (r > 0) {
+ signaled = radeon_fence_seq_signaled(rdev, target_seq, ring);
+ if (signaled)
+ break;
+ error = tsleep(&rdev->fence_queue,
+ PZERO | (intr ? PCATCH : 0), "rfnwt", timeout);
+ if (error == ERESTART)
+ error = EINTR; /* XXX */
+ if (error == EWOULDBLOCK)
+ error = 0;
+ r = -error;
+ }
+ radeon_irq_kms_sw_irq_put(rdev, ring);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ trace_radeon_fence_wait_end(rdev->ddev, seq);
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ /* check if sequence value has changed since last_activity */
+ if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
+ continue;
+ }
+
+ if (lock_ring) {
+ mutex_lock(&rdev->ring_lock);
+ }
+
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != rdev->fence_drv[ring].last_activity) {
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ continue;
+ }
+
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
+ target_seq, seq);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
+
+ /* mark the ring as not ready any more */
+ rdev->ring[ring].ready = false;
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ return -EDEADLK;
+ }
+
+ if (lock_ring) {
+ mutex_unlock(&rdev->ring_lock);
+ }
+ }
}
-
- fence->fence_wake.flags = 0;
- fence->fence_wake.private = NULL;
- fence->fence_wake.func = radeon_fence_check_signaled;
- __add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
- fence_get(f);
-
- FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
- return true;
+ return 0;
}
/**
- * radeon_fence_signaled - check if a fence has signaled
+ * radeon_fence_wait - wait for a fence to signal
*
* @fence: radeon fence object
+ * @intr: use interruptable sleep
*
- * Check if the requested fence has signaled (all asics).
- * Returns true if the fence has signaled or false if it has not.
+ * Wait for the requested fence to signal (all asics).
+ * @intr selects whether to use interruptable (true) or non-interruptable
+ * (false) sleep when waiting for the fence.
+ * Returns 0 if the fence has passed, error for all other cases.
*/
-bool radeon_fence_signaled(struct radeon_fence *fence)
+int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
- if (!fence)
- return true;
+ int r;
- if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
- int ret;
+ if (fence == NULL) {
+ WARN(1, "Querying an invalid fence : %p !\n", fence);
+ return -EINVAL;
+ }
- ret = fence_signal(&fence->base);
- if (!ret)
- FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
- return true;
+ r = radeon_fence_wait_seq(fence->rdev, fence->seq,
+ fence->ring, intr, true);
+ if (r) {
+ return r;
}
- return false;
+ fence->seq = RADEON_FENCE_SIGNALED_SEQ;
+ return 0;
}
-/**
- * radeon_fence_any_seq_signaled - check if any sequence number is signaled
- *
- * @rdev: radeon device pointer
- * @seq: sequence numbers
- *
- * Check if the last signaled fence sequnce number is >= the requested
- * sequence number (all asics).
- * Returns true if any has signaled (current value is >= requested value)
- * or false if it has not. Helper function for radeon_fence_wait_seq.
- */
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
{
unsigned i;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
+ if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
return true;
+ }
}
return false;
}
/**
- * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers
+ * radeon_fence_wait_any_seq - wait for a sequence number on any ring
*
* @rdev: radeon device pointer
* @target_seq: sequence number(s) we want to wait for
* @intr: use interruptable sleep
- * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait
*
* Wait for the requested sequence number(s) to be written by any ring
* (all asics). Sequnce number array is indexed by ring id.
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
- * for radeon_fence_wait_*().
- * Returns remaining time if the sequence number has passed, 0 when
- * the wait timeout, or an error for all other cases.
- * -EDEADLK is returned when a GPU lockup has been detected.
+ * for radeon_fence_wait_any(), et al.
+ * Returns 0 if the sequence number has passed, error for all other cases.
*/
-static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev,
- u64 *target_seq, bool intr,
- long timeout)
+static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
+ u64 *target_seq, bool intr)
{
- long r;
- int i;
-
- if (radeon_fence_any_seq_signaled(rdev, target_seq))
- return timeout;
+ unsigned long timeout, last_activity, tmp;
+ unsigned i, ring = RADEON_NUM_RINGS;
+ bool signaled;
+ int r, error;
- /* enable IRQs and tracing */
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!target_seq[i])
+ for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!target_seq[i]) {
continue;
+ }
- trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]);
- radeon_irq_kms_sw_irq_get(rdev, i);
- }
+ /* use the most recent one as indicator */
+ if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
+ last_activity = rdev->fence_drv[i].last_activity;
+ }
- if (intr) {
- r = wait_event_interruptible_timeout(rdev->fence_queue, (
- radeon_fence_any_seq_signaled(rdev, target_seq)
- || rdev->needs_reset), timeout);
- } else {
- r = wait_event_timeout(rdev->fence_queue, (
- radeon_fence_any_seq_signaled(rdev, target_seq)
- || rdev->needs_reset), timeout);
+ /* For lockup detection just pick the lowest ring we are
+ * actively waiting for
+ */
+ if (i < ring) {
+ ring = i;
+ }
}
- if (rdev->needs_reset)
- r = -EDEADLK;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!target_seq[i])
- continue;
-
- radeon_irq_kms_sw_irq_put(rdev, i);
- trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]);
+ /* nothing to wait for ? */
+ if (ring == RADEON_NUM_RINGS) {
+ return -ENOENT;
}
- return r;
-}
-
-/**
- * radeon_fence_wait - wait for a fence to signal
- *
- * @fence: radeon fence object
- * @intr: use interruptible sleep
- *
- * Wait for the requested fence to signal (all asics).
- * @intr selects whether to use interruptable (true) or non-interruptable
- * (false) sleep when waiting for the fence.
- * Returns 0 if the fence has passed, error for all other cases.
- */
-int radeon_fence_wait(struct radeon_fence *fence, bool intr)
-{
- uint64_t seq[RADEON_NUM_RINGS] = {};
- long r;
-
- /*
- * This function should not be called on !radeon fences.
- * If this is the case, it would mean this function can
- * also be called on radeon fences belonging to another card.
- * exclusive_lock is not held in that case.
- */
- if (WARN_ON_ONCE(!to_radeon_fence(&fence->base)))
- return fence_wait(&fence->base, intr);
+ while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
+ timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
+ if (time_after(last_activity, timeout)) {
+ /* the normal case, timeout is somewhere before last_activity */
+ timeout = last_activity - timeout;
+ } else {
+ /* either jiffies wrapped around, or no fence was signaled in the last 500ms
+ * anyway we will just wait for the minimum amount and then check for a lockup
+ */
+ timeout = 1;
+ }
- seq[fence->ring] = fence->seq;
- r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
- if (r < 0) {
- return r;
+ trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_get(rdev, i);
+ }
+ }
+ r = timeout;
+ while (r > 0) {
+ signaled = radeon_fence_any_seq_signaled(rdev, target_seq);
+ if (signaled)
+ break;
+ error = tsleep(&rdev->fence_queue,
+ PZERO | (intr ? PCATCH : 0), "rfwa", timeout);
+ if (error == ERESTART)
+ error = EINTR; /* XXX */
+ if (error == EWOULDBLOCK)
+ error = 0;
+ r = -error;
+ }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (target_seq[i]) {
+ radeon_irq_kms_sw_irq_put(rdev, i);
+ }
+ }
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);
+
+ if (unlikely(!signaled)) {
+ /* we were interrupted for some reason and fence
+ * isn't signaled yet, resume waiting */
+ if (r) {
+ continue;
+ }
+
+ mutex_lock(&rdev->ring_lock);
+ for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
+ tmp = rdev->fence_drv[i].last_activity;
+ }
+ }
+ /* test if somebody else has already decided that this is a lockup */
+ if (last_activity != tmp) {
+ last_activity = tmp;
+ mutex_unlock(&rdev->ring_lock);
+ continue;
+ }
+
+ if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
+ /* good news we believe it's a lockup */
+ dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
+ target_seq[ring]);
+
+ /* change last activity so nobody else think there is a lockup */
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ rdev->fence_drv[i].last_activity = jiffies;
+ }
+
+ /* mark the ring as not ready any more */
+ rdev->ring[ring].ready = false;
+ mutex_unlock(&rdev->ring_lock);
+ return -EDEADLK;
+ }
+ mutex_unlock(&rdev->ring_lock);
+ }
}
-
- r = fence_signal(&fence->base);
- if (!r)
- FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
return 0;
}
@@ -576,8 +554,8 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
bool intr)
{
uint64_t seq[RADEON_NUM_RINGS];
- unsigned i, num_rings = 0;
- long r;
+ unsigned i;
+ int r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
seq[i] = 0;
@@ -586,23 +564,23 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
continue;
}
+ if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
+ /* something was allready signaled */
+ return 0;
+ }
+
seq[i] = fences[i]->seq;
- ++num_rings;
}
- /* nothing to wait for ? */
- if (num_rings == 0)
- return -ENOENT;
-
- r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT);
- if (r < 0) {
+ r = radeon_fence_wait_any_seq(rdev, seq, intr);
+ if (r) {
return r;
}
return 0;
}
/**
- * radeon_fence_wait_next - wait for the next fence to signal
+ * radeon_fence_wait_next_locked - wait for the next fence to signal
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
@@ -611,25 +589,21 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
* Returns 0 if the next fence has passed, error for all other cases.
* Caller must hold ring lock.
*/
-int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
- uint64_t seq[RADEON_NUM_RINGS] = {};
- long r;
+ uint64_t seq;
- seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
- if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
+ seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
+ if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is
already the last emited fence */
return -ENOENT;
}
- r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- return r;
- return 0;
+ return radeon_fence_wait_seq(rdev, seq, ring, false, false);
}
/**
- * radeon_fence_wait_empty - wait for all fences to signal
+ * radeon_fence_wait_empty_locked - wait for all fences to signal
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
@@ -638,21 +612,17 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
* Returns 0 if the fences have passed, error for all other cases.
* Caller must hold ring lock.
*/
-int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
+int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
- uint64_t seq[RADEON_NUM_RINGS] = {};
- long r;
-
- seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
- if (!seq[ring])
- return 0;
+ uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+ int r;
- r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT);
- if (r < 0) {
- if (r == -EDEADLK)
+ r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ if (r) {
+ if (r == -EDEADLK) {
return -EDEADLK;
-
- dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n",
+ }
+ dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
ring, r);
}
return 0;
@@ -668,7 +638,7 @@ int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
*/
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
- fence_get(&fence->base);
+ kref_get(&fence->kref);
return fence;
}
@@ -685,7 +655,7 @@ void radeon_fence_unref(struct radeon_fence **fence)
*fence = NULL;
if (tmp) {
- fence_put(&tmp->base);
+ kref_put(&tmp->kref, radeon_fence_destroy);
}
}
@@ -801,19 +771,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
rdev->fence_drv[ring].scratch_reg = 0;
- if (ring != R600_RING_TYPE_UVD_INDEX) {
- index = R600_WB_EVENT_OFFSET + ring * 4;
- rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
- rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
- index;
-
- } else {
- /* put fence directly behind firmware */
- index = roundup2(rdev->uvd_fw->size, 8);
- rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
- rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
- }
-
+ index = R600_WB_EVENT_OFFSET + ring * 4;
} else {
r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
if (r) {
@@ -823,13 +781,15 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
index = RADEON_WB_SCRATCH_OFFSET +
rdev->fence_drv[ring].scratch_reg -
rdev->scratch.reg_base;
- rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
- rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
}
+ rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+ rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
rdev->fence_drv[ring].initialized = true;
+#ifdef DRMDEBUG
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
+#endif
return 0;
}
@@ -853,10 +813,8 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
for (i = 0; i < RADEON_NUM_RINGS; ++i)
rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
+ rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false;
- INIT_DELAYED_WORK(&rdev->fence_drv[ring].lockup_work,
- radeon_fence_check_lockup);
- rdev->fence_drv[ring].rdev = rdev;
}
/**
@@ -901,12 +859,11 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
- r = radeon_fence_wait_empty(rdev, ring);
+ r = radeon_fence_wait_empty_locked(rdev, ring);
if (r) {
/* no need to trigger GPU reset as we are unloading */
- radeon_fence_driver_force_completion(rdev, ring);
+ radeon_fence_driver_force_completion(rdev);
}
- cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
rdev->fence_drv[ring].initialized = false;
@@ -918,16 +875,18 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
* radeon_fence_driver_force_completion - force all fence waiter to complete
*
* @rdev: radeon device pointer
- * @ring: the ring to complete
*
* In case of GPU reset failure make sure no process keep waiting on fence
* that will never complete.
*/
-void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
+void radeon_fence_driver_force_completion(struct radeon_device *rdev)
{
- if (rdev->fence_drv[ring].initialized) {
+ int ring;
+
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ if (!rdev->fence_drv[ring].initialized)
+ continue;
radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
- cancel_delayed_work_sync(&rdev->fence_drv[ring].lockup_work);
}
}
@@ -947,8 +906,6 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
if (!rdev->fence_drv[i].initialized)
continue;
- radeon_fence_process(rdev, i);
-
seq_printf(m, "--- ring %d ---\n", i);
seq_printf(m, "Last signaled fence 0x%016llx\n",
(unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
@@ -964,128 +921,16 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
return 0;
}
-/**
- * radeon_debugfs_gpu_reset - manually trigger a gpu reset
- *
- * Manually trigger a gpu reset at the next fence wait.
- */
-static int radeon_debugfs_gpu_reset(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- down_read(&rdev->exclusive_lock);
- seq_printf(m, "%d\n", rdev->needs_reset);
- rdev->needs_reset = true;
- wake_up_all(&rdev->fence_queue);
- up_read(&rdev->exclusive_lock);
-
- return 0;
-}
-
static struct drm_info_list radeon_debugfs_fence_list[] = {
{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
- {"radeon_gpu_reset", &radeon_debugfs_gpu_reset, 0, NULL}
};
#endif
int radeon_debugfs_fence_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 2);
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
#else
return 0;
#endif
}
-
-static const char *radeon_fence_get_driver_name(struct fence *fence)
-{
- return "radeon";
-}
-
-static const char *radeon_fence_get_timeline_name(struct fence *f)
-{
- struct radeon_fence *fence = to_radeon_fence(f);
- switch (fence->ring) {
- case RADEON_RING_TYPE_GFX_INDEX: return "radeon.gfx";
- case CAYMAN_RING_TYPE_CP1_INDEX: return "radeon.cp1";
- case CAYMAN_RING_TYPE_CP2_INDEX: return "radeon.cp2";
- case R600_RING_TYPE_DMA_INDEX: return "radeon.dma";
- case CAYMAN_RING_TYPE_DMA1_INDEX: return "radeon.dma1";
- case R600_RING_TYPE_UVD_INDEX: return "radeon.uvd";
- case TN_RING_TYPE_VCE1_INDEX: return "radeon.vce1";
- case TN_RING_TYPE_VCE2_INDEX: return "radeon.vce2";
- default: WARN_ON_ONCE(1); return "radeon.unk";
- }
-}
-
-static inline bool radeon_test_signaled(struct radeon_fence *fence)
-{
- return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
-}
-
-struct radeon_wait_cb {
- struct fence_cb base;
- void *task;
-};
-
-static void
-radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
-{
- struct radeon_wait_cb *wait =
- container_of(cb, struct radeon_wait_cb, base);
- wake_up_process(wait->task);
-}
-
-static signed long radeon_fence_default_wait(struct fence *f, bool intr,
- signed long t)
-{
- struct radeon_fence *fence = to_radeon_fence(f);
- struct radeon_device *rdev = fence->rdev;
- struct radeon_wait_cb cb;
-
- cb.task = curproc;
-
- if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
- return t;
-
- while (t > 0) {
- if (intr)
- set_current_state(TASK_INTERRUPTIBLE);
- else
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- /*
- * radeon_test_signaled must be called after
- * set_current_state to prevent a race with wake_up_process
- */
- if (radeon_test_signaled(fence))
- break;
-
- if (rdev->needs_reset) {
- t = -EDEADLK;
- break;
- }
-
- KASSERT(sch_ident != NULL);
- t = schedule_timeout(t);
-
- if (t > 0 && intr && signal_pending(current))
- t = -ERESTARTSYS;
- }
-
- __set_current_state(TASK_RUNNING);
- fence_remove_callback(f, &cb.base);
-
- return t;
-}
-
-const struct fence_ops radeon_fence_ops = {
- .get_driver_name = radeon_fence_get_driver_name,
- .get_timeline_name = radeon_fence_get_timeline_name,
- .enable_signaling = radeon_fence_enable_signaling,
- .signaled = radeon_fence_is_signaled,
- .wait = radeon_fence_default_wait,
- .release = NULL,
-};
diff --git a/sys/dev/pci/drm/radeon/radeon_gart.c b/sys/dev/pci/drm/radeon/radeon_gart.c
index 4ab155bd0c9..f77ac75f490 100644
--- a/sys/dev/pci/drm/radeon/radeon_gart.c
+++ b/sys/dev/pci/drm/radeon/radeon_gart.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_gart.c,v 1.10 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -28,6 +29,7 @@
#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/radeon_drm.h>
#include "radeon.h"
+#include "radeon_reg.h"
/*
* GART
@@ -78,6 +80,7 @@ int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
if (dmah == NULL) {
return -ENOMEM;
}
+
rdev->gart.dmah = dmah;
rdev->gart.table_addr = dmah->map->dm_segs[0].ds_addr;
rdev->gart.ptr = dmah->kva;
@@ -99,13 +102,6 @@ void radeon_gart_table_ram_free(struct radeon_device *rdev)
if (rdev->gart.ptr == NULL) {
return;
}
-#ifdef CONFIG_X86
- if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
- rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
- set_memory_wb((unsigned long)rdev->gart.ptr,
- rdev->gart.table_size >> PAGE_SHIFT);
- }
-#endif
drm_dmamem_free(rdev->dmat, rdev->gart.dmah);
rdev->gart.ptr = NULL;
rdev->gart.table_addr = 0;
@@ -128,7 +124,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- 0, NULL, NULL, &rdev->gart.robj);
+ NULL, &rdev->gart.robj);
if (r) {
return r;
}
@@ -165,19 +161,6 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.table_addr = gpu_addr;
-
- if (!r) {
- int i;
-
- /* We might have dropped some GART table updates while it wasn't
- * mapped, restore all entries
- */
- for (i = 0; i < rdev->gart.num_gpu_pages; i++)
- radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
- mb();
- radeon_gart_tlb_flush(rdev);
- }
-
return r;
}
@@ -219,6 +202,7 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
return;
}
+ radeon_gart_table_vram_unpin(rdev);
radeon_bo_unref(&rdev->gart.robj);
}
@@ -241,6 +225,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t;
unsigned p;
int i, j;
+ u64 page_base;
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -251,19 +236,18 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
rdev->gart.pages[p] = NULL;
+ rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
+ page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t,
- rdev->dummy_page.entry);
+ radeon_gart_set_page(rdev, t, page_base);
}
+ page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
- if (rdev->gart.ptr) {
- mb();
- radeon_gart_tlb_flush(rdev);
- }
+ mb();
+ radeon_gart_tlb_flush(rdev);
}
/**
@@ -274,19 +258,17 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
* @pages: number of pages to bind
* @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
- * @flags: RADEON_GART_PAGE_* flags
*
* Binds the requested pages to the gart page table
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct vm_page **pagelist, dma_addr_t *dma_addr,
- uint32_t flags)
+ int pages, struct vm_page **pagelist, bus_addr_t *dma_addr)
{
unsigned t;
unsigned p;
- uint64_t page_base, page_entry;
+ uint64_t page_base;
int i, j;
if (!rdev->gart.ready) {
@@ -297,25 +279,49 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
+ rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i];
- page_base = dma_addr[i];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- page_entry = radeon_gart_get_page_entry(page_base, flags);
- rdev->gart.pages_entry[t] = page_entry;
- if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t, page_entry);
+ if (rdev->gart.ptr) {
+ page_base = rdev->gart.pages_addr[p];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
}
- page_base += RADEON_GPU_PAGE_SIZE;
}
}
- if (rdev->gart.ptr) {
- mb();
- radeon_gart_tlb_flush(rdev);
- }
+ mb();
+ radeon_gart_tlb_flush(rdev);
return 0;
}
/**
+ * radeon_gart_restore - bind all pages in the gart page table
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Binds all pages in the gart page table (all asics).
+ * Used to rebuild the gart table on device startup or resume.
+ */
+void radeon_gart_restore(struct radeon_device *rdev)
+{
+ int i, j, t;
+ u64 page_base;
+
+ if (!rdev->gart.ptr) {
+ return;
+ }
+ for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
+ page_base = rdev->gart.pages_addr[i];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
+ }
+ }
+ mb();
+ radeon_gart_tlb_flush(rdev);
+}
+
+/**
* radeon_gart_init - init the driver info for managing the gart
*
* @rdev: radeon_device pointer
@@ -341,23 +347,26 @@ int radeon_gart_init(struct radeon_device *rdev)
/* Compute table size */
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
+#ifdef DRMDEBUG
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
+#endif
/* Allocate pages table */
rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
if (rdev->gart.pages == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
- rdev->gart.num_gpu_pages);
- if (rdev->gart.pages_entry == NULL) {
+ rdev->gart.pages_addr = vzalloc(sizeof(bus_addr_t) *
+ rdev->gart.num_cpu_pages);
+ if (rdev->gart.pages_addr == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */
- for (i = 0; i < rdev->gart.num_gpu_pages; i++)
- rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
+ for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
+ rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
+ }
return 0;
}
@@ -370,15 +379,918 @@ int radeon_gart_init(struct radeon_device *rdev)
*/
void radeon_gart_fini(struct radeon_device *rdev)
{
- if (rdev->gart.ready) {
+ if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
/* unbind pages */
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
vfree(rdev->gart.pages);
- vfree(rdev->gart.pages_entry);
+ vfree(rdev->gart.pages_addr);
rdev->gart.pages = NULL;
- rdev->gart.pages_entry = NULL;
+ rdev->gart.pages_addr = NULL;
radeon_dummy_page_fini(rdev);
}
+
+/*
+ * GPUVM
+ * GPUVM is similar to the legacy gart on older asics, however
+ * rather than there being a single global gart table
+ * for the entire GPU, there are multiple VM page tables active
+ * at any given time. The VM page tables can contain a mix
+ * vram pages and system memory pages and system memory pages
+ * can be mapped as snooped (cached system pages) or unsnooped
+ * (uncached system pages).
+ * Each VM has an ID associated with it and there is a page table
+ * associated with each VMID. When execting a command buffer,
+ * the kernel tells the the ring what VMID to use for that command
+ * buffer. VMIDs are allocated dynamically as commands are submitted.
+ * The userspace drivers maintain their own address space and the kernel
+ * sets up their pages tables accordingly when they submit their
+ * command buffers and a VMID is assigned.
+ * Cayman/Trinity support up to 8 active VMs at any given time;
+ * SI supports 16.
+ */
+
+/*
+ * vm helpers
+ *
+ * TODO bind a default page at vm initialization for default address
+ */
+
+/**
+ * radeon_vm_num_pde - return the number of page directory entries
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the number of page directory entries (cayman+).
+ */
+static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
+{
+ return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
+}
+
+/**
+ * radeon_vm_directory_size - returns the size of the page directory in bytes
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Calculate the size of the page directory in bytes (cayman+).
+ */
+static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
+{
+ return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
+}
+
+/**
+ * radeon_vm_manager_init - init the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Init the vm manager (cayman+).
+ * Returns 0 for success, error for failure.
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+ struct radeon_vm *vm;
+ struct radeon_bo_va *bo_va;
+ int r;
+ unsigned size;
+
+ if (!rdev->vm_manager.enabled) {
+ /* allocate enough for 2 full VM pts */
+ size = radeon_vm_directory_size(rdev);
+ size += rdev->vm_manager.max_pfn * 8;
+ size *= 2;
+ r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
+ RADEON_GPU_PAGE_ALIGN(size),
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_VRAM);
+ if (r) {
+ dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
+ (rdev->vm_manager.max_pfn * 8) >> 10);
+ return r;
+ }
+
+ r = radeon_asic_vm_init(rdev);
+ if (r)
+ return r;
+
+ rdev->vm_manager.enabled = true;
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
+ if (r)
+ return r;
+ }
+
+ /* restore page table */
+ list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
+ if (vm->page_directory == NULL)
+ continue;
+
+ list_for_each_entry(bo_va, &vm->va, vm_list) {
+ bo_va->valid = false;
+ }
+ }
+ return 0;
+}
+
+/**
+ * radeon_vm_free_pt - free the page table for a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to unbind
+ *
+ * Free the page table of a specific vm (cayman+).
+ *
+ * Global and local mutex must be lock!
+ */
+static void radeon_vm_free_pt(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va;
+ int i;
+
+ if (!vm->page_directory)
+ return;
+
+ list_del_init(&vm->list);
+ radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+
+ list_for_each_entry(bo_va, &vm->va, vm_list) {
+ bo_va->valid = false;
+ }
+
+ if (vm->page_tables == NULL)
+ return;
+
+ for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
+ radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
+
+ kfree(vm->page_tables);
+}
+
+/**
+ * radeon_vm_manager_fini - tear down the vm manager
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the VM manager (cayman+).
+ */
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+ struct radeon_vm *vm, *tmp;
+ int i;
+
+ if (!rdev->vm_manager.enabled)
+ return;
+
+ mutex_lock(&rdev->vm_manager.lock);
+ /* free all allocated page tables */
+ list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
+ mutex_lock(&vm->mutex);
+ radeon_vm_free_pt(rdev, vm);
+ mutex_unlock(&vm->mutex);
+ }
+ for (i = 0; i < RADEON_NUM_VM; ++i) {
+ radeon_fence_unref(&rdev->vm_manager.active[i]);
+ }
+ radeon_asic_vm_fini(rdev);
+ mutex_unlock(&rdev->vm_manager.lock);
+
+ radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+ radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
+ rdev->vm_manager.enabled = false;
+}
+
+/**
+ * radeon_vm_evict - evict page table to make room for new one
+ *
+ * @rdev: radeon_device pointer
+ * @vm: VM we want to allocate something for
+ *
+ * Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
+ * Returns 0 for success, -ENOMEM for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_vm *vm_evict;
+
+ if (list_empty(&rdev->vm_manager.lru_vm))
+ return -ENOMEM;
+
+ vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
+ struct radeon_vm, list);
+ if (vm_evict == vm)
+ return -ENOMEM;
+
+ mutex_lock(&vm_evict->mutex);
+ radeon_vm_free_pt(rdev, vm_evict);
+ mutex_unlock(&vm_evict->mutex);
+ return 0;
+}
+
+/**
+ * radeon_vm_alloc_pt - allocates a page table for a VM
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to bind
+ *
+ * Allocate a page table for the requested vm (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ unsigned pd_size, pts_size;
+ u64 *pd_addr;
+ int r;
+
+ if (vm == NULL) {
+ return -EINVAL;
+ }
+
+ if (vm->page_directory != NULL) {
+ return 0;
+ }
+
+retry:
+ pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
+ r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
+ &vm->page_directory, pd_size,
+ RADEON_GPU_PAGE_SIZE, false);
+ if (r == -ENOMEM) {
+ r = radeon_vm_evict(rdev, vm);
+ if (r)
+ return r;
+ goto retry;
+
+ } else if (r) {
+ return r;
+ }
+
+ vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
+
+ /* Initially clear the page directory */
+ pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
+ memset(pd_addr, 0, pd_size);
+
+ pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
+ vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+
+ if (vm->page_tables == NULL) {
+ DRM_ERROR("Cannot allocate memory for page table array\n");
+ radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * radeon_vm_add_to_lru - add VMs page table to LRU list
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to add to LRU
+ *
+ * Add the allocated page table to the LRU list (cayman+).
+ *
+ * Global mutex must be locked!
+ */
+void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ list_del_init(&vm->list);
+ list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+}
+
+/**
+ * radeon_vm_grab_id - allocate the next free VMID
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ *
+ * Allocate an id for the vm (cayman+).
+ * Returns the fence we need to sync to (if any).
+ *
+ * Global and local mutex must be locked!
+ */
+struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
+ struct radeon_vm *vm, int ring)
+{
+ struct radeon_fence *best[RADEON_NUM_RINGS] = {};
+ unsigned choices[2] = {};
+ unsigned i;
+
+ /* check if the id is still valid */
+ if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
+ return NULL;
+
+ /* we definately need to flush */
+ radeon_fence_unref(&vm->last_flush);
+
+ /* skip over VMID 0, since it is the system VM */
+ for (i = 1; i < rdev->vm_manager.nvm; ++i) {
+ struct radeon_fence *fence = rdev->vm_manager.active[i];
+
+ if (fence == NULL) {
+ /* found a free one */
+ vm->id = i;
+ return NULL;
+ }
+
+ if (radeon_fence_is_earlier(fence, best[fence->ring])) {
+ best[fence->ring] = fence;
+ choices[fence->ring == ring ? 0 : 1] = i;
+ }
+ }
+
+ for (i = 0; i < 2; ++i) {
+ if (choices[i]) {
+ vm->id = choices[i];
+ return rdev->vm_manager.active[choices[i]];
+ }
+ }
+
+ /* should never happen */
+ BUG();
+ return NULL;
+}
+
+/**
+ * radeon_vm_fence - remember fence for vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to fence
+ * @fence: fence to remember
+ *
+ * Fence the vm (cayman+).
+ * Set the fence used to protect page table and id.
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_fence(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_fence *fence)
+{
+ radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
+ rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(fence);
+}
+
+/**
+ * radeon_vm_bo_find - find the bo_va for a specific vm & bo
+ *
+ * @vm: requested vm
+ * @bo: requested buffer object
+ *
+ * Find @bo inside the requested vm (cayman+).
+ * Search inside the @bos vm list for the requested vm
+ * Returns the found bo_va or NULL if none is found
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ list_for_each_entry(bo_va, &bo->va, bo_list) {
+ if (bo_va->vm == vm) {
+ return bo_va;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * radeon_vm_bo_add - add a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Add @bo into the requested vm (cayman+).
+ * Add @bo to the list of bos associated with the vm
+ * Returns newly added bo_va or NULL for failure
+ *
+ * Object has to be reserved!
+ */
+struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (bo_va == NULL) {
+ return NULL;
+ }
+ bo_va->vm = vm;
+ bo_va->bo = bo;
+ bo_va->soffset = 0;
+ bo_va->eoffset = 0;
+ bo_va->flags = 0;
+ bo_va->valid = false;
+ bo_va->ref_count = 1;
+ INIT_LIST_HEAD(&bo_va->bo_list);
+ INIT_LIST_HEAD(&bo_va->vm_list);
+
+ mutex_lock(&vm->mutex);
+ list_add(&bo_va->vm_list, &vm->va);
+ list_add_tail(&bo_va->bo_list, &bo->va);
+ mutex_unlock(&vm->mutex);
+
+ return bo_va;
+}
+
+/**
+ * radeon_vm_bo_set_addr - set bos virtual address inside a vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: bo_va to store the address
+ * @soffset: requested offset of the buffer in the VM address space
+ * @flags: attributes of pages (read/write/valid/etc.)
+ *
+ * Set offset of @bo_va (cayman+).
+ * Validate and set the offset requested within the vm address space.
+ * Returns 0 for success, error for failure.
+ *
+ * Object has to be reserved!
+ */
+int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va,
+ uint64_t soffset,
+ uint32_t flags)
+{
+ uint64_t size = radeon_bo_size(bo_va->bo);
+ uint64_t eoffset, last_offset = 0;
+ struct radeon_vm *vm = bo_va->vm;
+ struct radeon_bo_va *tmp;
+ struct list_head *head;
+ unsigned last_pfn;
+
+ if (soffset) {
+ /* make sure object fit at this offset */
+ eoffset = soffset + size;
+ if (soffset >= eoffset) {
+ return -EINVAL;
+ }
+
+ last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+ if (last_pfn > rdev->vm_manager.max_pfn) {
+ dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ last_pfn, rdev->vm_manager.max_pfn);
+ return -EINVAL;
+ }
+
+ } else {
+ eoffset = last_pfn = 0;
+ }
+
+ mutex_lock(&vm->mutex);
+ head = &vm->va;
+ last_offset = 0;
+ list_for_each_entry(tmp, &vm->va, vm_list) {
+ if (bo_va == tmp) {
+ /* skip over currently modified bo */
+ continue;
+ }
+
+ if (soffset >= last_offset && eoffset <= tmp->soffset) {
+ /* bo can be added before this one */
+ break;
+ }
+ if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
+ /* bo and tmp overlap, invalid offset */
+ dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
+ bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
+ (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+ mutex_unlock(&vm->mutex);
+ return -EINVAL;
+ }
+ last_offset = tmp->eoffset;
+ head = &tmp->vm_list;
+ }
+
+ bo_va->soffset = soffset;
+ bo_va->eoffset = eoffset;
+ bo_va->flags = flags;
+ bo_va->valid = false;
+ list_move(&bo_va->vm_list, head);
+
+ mutex_unlock(&vm->mutex);
+ return 0;
+}
+
+/**
+ * radeon_vm_map_gart - get the physical address of a gart page
+ *
+ * @rdev: radeon_device pointer
+ * @addr: the unmapped addr
+ *
+ * Look up the physical address of the page that the pte resolves
+ * to (cayman+).
+ * Returns the physical address of the page.
+ */
+uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
+{
+ uint64_t result;
+
+ /* page table offset */
+ result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
+
+ /* in case cpu page size != gpu page size*/
+ result |= addr & (PAGE_MASK);
+
+ return result;
+}
+
+/**
+ * radeon_vm_update_pdes - make sure that page directory is valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ *
+ * Allocates new page tables if necessary
+ * and updates the page directory (cayman+).
+ * Returns 0 for success, error for failure.
+ *
+ * Global and local mutex must be locked!
+ */
+static int radeon_vm_update_pdes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint64_t start, uint64_t end)
+{
+ static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
+
+ uint64_t last_pde = ~0, last_pt = ~0;
+ unsigned count = 0;
+ uint64_t pt_idx;
+ int r;
+
+ start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+ end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
+
+ /* walk over the address space and update the page directory */
+ for (pt_idx = start; pt_idx <= end; ++pt_idx) {
+ uint64_t pde, pt;
+
+ if (vm->page_tables[pt_idx])
+ continue;
+
+retry:
+ r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
+ &vm->page_tables[pt_idx],
+ RADEON_VM_PTE_COUNT * 8,
+ RADEON_GPU_PAGE_SIZE, false);
+
+ if (r == -ENOMEM) {
+ r = radeon_vm_evict(rdev, vm);
+ if (r)
+ return r;
+ goto retry;
+ } else if (r) {
+ return r;
+ }
+
+ pde = vm->pd_gpu_addr + pt_idx * 8;
+
+ pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+
+ if (((last_pde + 8 * count) != pde) ||
+ ((last_pt + incr * count) != pt)) {
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pde,
+ last_pt, count, incr,
+ RADEON_VM_PAGE_VALID);
+ }
+
+ count = 1;
+ last_pde = pde;
+ last_pt = pt;
+ } else {
+ ++count;
+ }
+ }
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
+ incr, RADEON_VM_PAGE_VALID);
+
+ }
+
+ return 0;
+}
+
+/**
+ * radeon_vm_update_ptes - make sure that page tables are valid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+static void radeon_vm_update_ptes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
+{
+ static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
+
+ uint64_t last_pte = ~0, last_dst = ~0;
+ unsigned count = 0;
+ uint64_t addr;
+
+ start = start / RADEON_GPU_PAGE_SIZE;
+ end = end / RADEON_GPU_PAGE_SIZE;
+
+ /* walk over the address space and update the page tables */
+ for (addr = start; addr < end; ) {
+ uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
+ unsigned nptes;
+ uint64_t pte;
+
+ if ((addr & ~mask) == (end & ~mask))
+ nptes = end - addr;
+ else
+ nptes = RADEON_VM_PTE_COUNT - (addr & mask);
+
+ pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
+ pte += (addr & mask) * 8;
+
+ if ((last_pte + 8 * count) != pte) {
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pte,
+ last_dst, count,
+ RADEON_GPU_PAGE_SIZE,
+ flags);
+ }
+
+ count = nptes;
+ last_pte = pte;
+ last_dst = dst;
+ } else {
+ count += nptes;
+ }
+
+ addr += nptes;
+ dst += nptes * RADEON_GPU_PAGE_SIZE;
+ }
+
+ if (count) {
+ radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
+ RADEON_GPU_PAGE_SIZE, flags);
+ }
+}
+
+/**
+ * radeon_vm_bo_update_pte - map a bo into the vm page table
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ * @mem: ttm mem
+ *
+ * Fill in the page table entries for @bo (cayman+).
+ * Returns 0 for success, -EINVAL for failure.
+ *
+ * Object have to be reserved & global and local mutex must be locked!
+ */
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem)
+{
+ unsigned ridx = rdev->asic->vm.pt_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ struct radeon_semaphore *sem = NULL;
+ struct radeon_bo_va *bo_va;
+ unsigned nptes, npdes, ndw;
+ uint64_t addr;
+ int r;
+
+ /* nothing to do if vm isn't bound */
+ if (vm->page_directory == NULL)
+ return 0;
+
+ bo_va = radeon_vm_bo_find(vm, bo);
+ if (bo_va == NULL) {
+ dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+ return -EINVAL;
+ }
+
+ if (!bo_va->soffset) {
+ dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
+ bo, vm);
+ return -EINVAL;
+ }
+
+ if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
+ return 0;
+
+ bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+ bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+ if (mem) {
+ addr = mem->start << PAGE_SHIFT;
+ if (mem->mem_type != TTM_PL_SYSTEM) {
+ bo_va->flags |= RADEON_VM_PAGE_VALID;
+ bo_va->valid = true;
+ }
+ if (mem->mem_type == TTM_PL_TT) {
+ bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+ } else {
+ addr += rdev->vm_manager.vram_base_offset;
+ }
+ } else {
+ addr = 0;
+ bo_va->valid = false;
+ }
+
+ if (vm->fence && radeon_fence_signaled(vm->fence)) {
+ radeon_fence_unref(&vm->fence);
+ }
+
+ if (vm->fence && vm->fence->ring != ridx) {
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ return r;
+ }
+ }
+
+ nptes = radeon_bo_ngpu_pages(bo);
+
+ /* assume two extra pdes in case the mapping overlaps the borders */
+ npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
+
+ /* estimate number of dw needed */
+ /* semaphore, fence and padding */
+ ndw = 32;
+
+ if (RADEON_VM_BLOCK_SIZE > 11)
+ /* reserve space for one header for every 2k dwords */
+ ndw += (nptes >> 11) * 4;
+ else
+ /* reserve space for one header for
+ every (1 << BLOCK_SIZE) entries */
+ ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
+
+ /* reserve space for pte addresses */
+ ndw += nptes * 2;
+
+ /* reserve space for one header for every 2k dwords */
+ ndw += (npdes >> 11) * 4;
+
+ /* reserve space for pde addresses */
+ ndw += npdes * 2;
+
+ r = radeon_ring_lock(rdev, ring, ndw);
+ if (r) {
+ return r;
+ }
+
+ if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
+ radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
+ radeon_fence_note_sync(vm->fence, ridx);
+ }
+
+ r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
+ addr, bo_va->flags);
+
+ radeon_fence_unref(&vm->fence);
+ r = radeon_fence_emit(rdev, &vm->fence, ridx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, vm->fence);
+ radeon_fence_unref(&vm->last_flush);
+
+ return 0;
+}
+
+/**
+ * radeon_vm_bo_rmv - remove a bo to a specific vm
+ *
+ * @rdev: radeon_device pointer
+ * @bo_va: requested bo_va
+ *
+ * Remove @bo_va->bo from the requested vm (cayman+).
+ * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
+ * remove the ptes for @bo_va in the page table.
+ * Returns 0 for success.
+ *
+ * Object have to be reserved!
+ */
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va)
+{
+ int r = 0;
+
+ mutex_lock(&rdev->vm_manager.lock);
+ mutex_lock(&bo_va->vm->mutex);
+ if (bo_va->soffset) {
+ r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+ }
+ mutex_unlock(&rdev->vm_manager.lock);
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&bo_va->vm->mutex);
+ list_del(&bo_va->bo_list);
+
+ kfree(bo_va);
+ return r;
+}
+
+/**
+ * radeon_vm_bo_invalidate - mark the bo as invalid
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ * @bo: radeon buffer object
+ *
+ * Mark @bo as invalid (cayman+).
+ */
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ list_for_each_entry(bo_va, &bo->va, bo_list) {
+ bo_va->valid = false;
+ }
+}
+
+/**
+ * radeon_vm_init - initialize a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Init @vm fields (cayman+).
+ */
+void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ vm->id = 0;
+ vm->fence = NULL;
+ rw_init(&vm->mutex, "vmlk");
+ INIT_LIST_HEAD(&vm->list);
+ INIT_LIST_HEAD(&vm->va);
+}
+
+/**
+ * radeon_vm_fini - tear down a vm instance
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Tear down @vm (cayman+).
+ * Unbind the VM and remove all bos from the vm bo list
+ */
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ mutex_lock(&rdev->vm_manager.lock);
+ mutex_lock(&vm->mutex);
+ radeon_vm_free_pt(rdev, vm);
+ mutex_unlock(&rdev->vm_manager.lock);
+
+ if (!list_empty(&vm->va)) {
+ dev_err(rdev->dev, "still active bo inside vm\n");
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
+ list_del_init(&bo_va->vm_list);
+ r = radeon_bo_reserve(bo_va->bo, false);
+ if (!r) {
+ list_del_init(&bo_va->bo_list);
+ radeon_bo_unreserve(bo_va->bo);
+ kfree(bo_va);
+ }
+ }
+ radeon_fence_unref(&vm->fence);
+ radeon_fence_unref(&vm->last_flush);
+ mutex_unlock(&vm->mutex);
+}
diff --git a/sys/dev/pci/drm/radeon/radeon_gem.c b/sys/dev/pci/drm/radeon/radeon_gem.c
index c9a4478b254..338fdc2a1ca 100644
--- a/sys/dev/pci/drm/radeon/radeon_gem.c
+++ b/sys/dev/pci/drm/radeon/radeon_gem.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_gem.c,v 1.10 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -38,14 +39,13 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj->gem_base.import_attach)
drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
#endif
- radeon_mn_unregister(robj);
radeon_bo_unref(&robj);
}
}
-int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
+int radeon_gem_object_create(struct radeon_device *rdev, int size,
int alignment, int initial_domain,
- u32 flags, bool kernel,
+ bool discardable, bool kernel,
struct drm_gem_object **obj)
{
struct radeon_bo *robj;
@@ -58,32 +58,28 @@ int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
alignment = PAGE_SIZE;
}
- /* Maximum bo size is the unpinned gtt size since we use the gtt to
- * handle vram to system pool migrations.
- */
- max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
+ /* maximun bo size is the minimun btw visible vram and gtt size */
+ max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
if (size > max_size) {
- DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
- size >> 20, max_size >> 20);
+ printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
+ __func__, __LINE__, size >> 20, max_size >> 20);
return -ENOMEM;
}
retry:
- r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
- flags, NULL, NULL, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
initial_domain |= RADEON_GEM_DOMAIN_GTT;
goto retry;
}
- DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
+ DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r;
}
*obj = &robj->gem_base;
- robj->pid = curproc->p_p->ps_pid;
mutex_lock(&rdev->gem.mutex);
list_add_tail(&robj->list, &rdev->gem.objects);
@@ -92,12 +88,12 @@ retry:
return 0;
}
-static int radeon_gem_set_domain(struct drm_gem_object *gobj,
+int radeon_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
struct radeon_bo *robj;
uint32_t domain;
- long r;
+ int r;
/* FIXME: reeimplement */
robj = gem_to_radeon_bo(gobj);
@@ -113,12 +109,9 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
- if (!r)
- r = -EBUSY;
-
- if (r < 0 && r != -EINTR) {
- printk(KERN_ERR "Failed to wait for object: %li\n", r);
+ r = radeon_bo_wait(robj, NULL, false);
+ if (r) {
+ printk(KERN_ERR "Failed to wait for object !\n");
return r;
}
}
@@ -149,8 +142,7 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
struct radeon_bo_va *bo_va;
int r;
- if ((rdev->family < CHIP_CAYMAN) ||
- (!rdev->accel_working)) {
+ if (rdev->family < CHIP_CAYMAN) {
return 0;
}
@@ -180,8 +172,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct radeon_bo_va *bo_va;
int r;
- if ((rdev->family < CHIP_CAYMAN) ||
- (!rdev->accel_working)) {
+ if (rdev->family < CHIP_CAYMAN) {
return;
}
@@ -219,15 +210,18 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
+ unsigned i;
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
args->vram_size = rdev->mc.real_vram_size;
args->vram_visible = (u64)man->size << PAGE_SHIFT;
- args->vram_visible -= rdev->vram_pin_size;
- args->gart_size = rdev->mc.gtt_size;
- args->gart_size -= rdev->gart_pin_size;
-
+ if (rdev->stollen_vga_memory)
+ args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
+ args->vram_visible -= radeon_fbdev_total_size(rdev);
+ args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
+ for(i = 0; i < RADEON_NUM_RINGS; ++i)
+ args->gart_size -= rdev->ring[i].ring_size;
return 0;
}
@@ -260,8 +254,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
- args->initial_domain, args->flags,
- false, &gobj);
+ args->initial_domain, false,
+ false, &gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
@@ -280,97 +274,6 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
-{
- return -ENOSYS;
-#ifdef notyet
- struct radeon_device *rdev = dev->dev_private;
- struct drm_radeon_gem_userptr *args = data;
- struct drm_gem_object *gobj;
- struct radeon_bo *bo;
- uint32_t handle;
- int r;
-
- if (offset_in_page(args->addr | args->size))
- return -EINVAL;
-
- /* reject unknown flag values */
- if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
- RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
- RADEON_GEM_USERPTR_REGISTER))
- return -EINVAL;
-
- if (args->flags & RADEON_GEM_USERPTR_READONLY) {
- /* readonly pages not tested on older hardware */
- if (rdev->family < CHIP_R600)
- return -EINVAL;
-
- } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
- !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
-
- /* if we want to write to it we must require anonymous
- memory and install a MMU notifier */
- return -EACCES;
- }
-
- down_read(&rdev->exclusive_lock);
-
- /* create a gem object to contain this object in */
- r = radeon_gem_object_create(rdev, args->size, 0,
- RADEON_GEM_DOMAIN_CPU, 0,
- false, &gobj);
- if (r)
- goto handle_lockup;
-
- bo = gem_to_radeon_bo(gobj);
- r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
- if (r)
- goto release_object;
-
- if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
- r = radeon_mn_register(bo, args->addr);
- if (r)
- goto release_object;
- }
-
- if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
- down_read(&current->mm->mmap_sem);
- r = radeon_bo_reserve(bo, true);
- if (r) {
- up_read(&current->mm->mmap_sem);
- goto release_object;
- }
-
- radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- radeon_bo_unreserve(bo);
- up_read(&current->mm->mmap_sem);
- if (r)
- goto release_object;
- }
-
- r = drm_gem_handle_create(filp, gobj, &handle);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
- if (r)
- goto handle_lockup;
-
- args->handle = handle;
- up_read(&rdev->exclusive_lock);
- return 0;
-
-release_object:
- drm_gem_object_unreference_unlocked(gobj);
-
-handle_lockup:
- up_read(&rdev->exclusive_lock);
- r = radeon_gem_handle_lockup(rdev, r);
-
- return r;
-#endif
-}
-
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -414,10 +317,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
- drm_gem_object_unreference_unlocked(gobj);
- return -EPERM;
- }
*offset_p = radeon_bo_mmap_offset(robj);
drm_gem_object_unreference_unlocked(gobj);
return 0;
@@ -434,6 +333,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -445,16 +345,21 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
-
- r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
- if (r == 0)
- r = -EBUSY;
- else
- r = 0;
-
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
- args->domain = radeon_mem_type_to_domain(cur_placement);
+ r = radeon_bo_wait(robj, &cur_placement, true);
+ switch (cur_placement) {
+ case TTM_PL_VRAM:
+ args->domain = RADEON_GEM_DOMAIN_VRAM;
+ break;
+ case TTM_PL_TT:
+ args->domain = RADEON_GEM_DOMAIN_GTT;
+ break;
+ case TTM_PL_SYSTEM:
+ args->domain = RADEON_GEM_DOMAIN_CPU;
+ default:
+ break;
+ }
drm_gem_object_unreference_unlocked(gobj);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -465,27 +370,17 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
- int r = 0;
- uint32_t cur_placement = 0;
- long ret;
+ int r;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
-
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
- if (ret == 0)
- r = -EBUSY;
- else if (ret < 0)
- r = ret;
-
- /* Flush HDP cache via MMIO if necessary */
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
- if (rdev->asic->mmio_hdp_flush &&
- radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
- robj->rdev->asic->mmio_hdp_flush(rdev);
+ r = radeon_bo_wait(robj, NULL, false);
+ /* callback hw specific functions if any */
+ if (rdev->asic->ioctl_wait_idle)
+ robj->rdev->asic->ioctl_wait_idle(rdev, robj);
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
@@ -532,68 +427,6 @@ out:
return r;
}
-/**
- * radeon_gem_va_update_vm -update the bo_va in its VM
- *
- * @rdev: radeon_device pointer
- * @bo_va: bo_va to update
- *
- * Update the bo_va directly after setting it's address. Errors are not
- * vital here, so they are not reported back to userspace.
- */
-static void radeon_gem_va_update_vm(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va)
-{
- struct ttm_validate_buffer tv, *entry;
- struct radeon_bo_list *vm_bos;
- struct ww_acquire_ctx ticket;
- struct list_head list;
- unsigned domain;
- int r;
-
- INIT_LIST_HEAD(&list);
-
- tv.bo = &bo_va->bo->tbo;
- tv.shared = true;
- list_add(&tv.head, &list);
-
- vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
- if (!vm_bos)
- return;
-
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
- if (r)
- goto error_free;
-
- list_for_each_entry(entry, &list, head) {
- domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (domain == RADEON_GEM_DOMAIN_CPU)
- goto error_unreserve;
- }
-
- mutex_lock(&bo_va->vm->mutex);
- r = radeon_vm_clear_freed(rdev, bo_va->vm);
- if (r)
- goto error_unlock;
-
- if (bo_va->it.start)
- r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
-
-error_unlock:
- mutex_unlock(&bo_va->vm->mutex);
-
-error_unreserve:
- ttm_eu_backoff_reservation(&ticket, &list);
-
-error_free:
- drm_free_large(vm_bos);
-
- if (r && r != -ERESTARTSYS)
- DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
-}
-
int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -641,6 +474,11 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
}
+ if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
+ dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
switch (args->operation) {
case RADEON_VA_MAP:
@@ -674,10 +512,9 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case RADEON_VA_MAP:
- if (bo_va->it.start) {
+ if (bo_va->soffset) {
args->operation = RADEON_VA_RESULT_VA_EXIST;
- args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
- radeon_bo_unreserve(rbo);
+ args->offset = bo_va->soffset;
goto out;
}
r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
@@ -688,54 +525,12 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
default:
break;
}
- if (!r)
- radeon_gem_va_update_vm(rdev, bo_va);
args->operation = RADEON_VA_RESULT_OK;
if (r) {
args->operation = RADEON_VA_RESULT_ERROR;
}
out:
- drm_gem_object_unreference_unlocked(gobj);
- return r;
-}
-
-int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
-{
- struct drm_radeon_gem_op *args = data;
- struct drm_gem_object *gobj;
- struct radeon_bo *robj;
- int r;
-
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
- if (gobj == NULL) {
- return -ENOENT;
- }
- robj = gem_to_radeon_bo(gobj);
-
- r = -EPERM;
- if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
- goto out;
-
- r = radeon_bo_reserve(robj, false);
- if (unlikely(r))
- goto out;
-
- switch (args->op) {
- case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
- args->value = robj->initial_domain;
- break;
- case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
- robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
- RADEON_GEM_DOMAIN_GTT |
- RADEON_GEM_DOMAIN_CPU);
- break;
- default:
- r = -EINVAL;
- }
-
- radeon_bo_unreserve(robj);
-out:
+ radeon_bo_unreserve(rbo);
drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -751,11 +546,12 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
args->size = args->pitch * args->height;
- args->size = roundup2(args->size, PAGE_SIZE);
+ args->size = PAGE_ALIGN(args->size);
r = radeon_gem_object_create(rdev, args->size, 0,
- RADEON_GEM_DOMAIN_VRAM, 0,
- false, &gobj);
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_device,
+ &gobj);
if (r)
return -ENOMEM;
@@ -769,51 +565,9 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
return 0;
}
-#if defined(CONFIG_DEBUG_FS)
-static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_bo *rbo;
- unsigned i = 0;
-
- mutex_lock(&rdev->gem.mutex);
- list_for_each_entry(rbo, &rdev->gem.objects, list) {
- unsigned domain;
- const char *placement;
-
- domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
- switch (domain) {
- case RADEON_GEM_DOMAIN_VRAM:
- placement = "VRAM";
- break;
- case RADEON_GEM_DOMAIN_GTT:
- placement = " GTT";
- break;
- case RADEON_GEM_DOMAIN_CPU:
- default:
- placement = " CPU";
- break;
- }
- seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
- i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
- placement, (unsigned long)rbo->pid);
- i++;
- }
- mutex_unlock(&rdev->gem.mutex);
- return 0;
-}
-
-static struct drm_info_list radeon_debugfs_gem_list[] = {
- {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
-};
-#endif
-
-int radeon_gem_debugfs_init(struct radeon_device *rdev)
-{
-#if defined(CONFIG_DEBUG_FS)
- return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
-#endif
- return 0;
+ return drm_gem_handle_delete(file_priv, handle);
}
diff --git a/sys/dev/pci/drm/radeon/radeon_i2c.c b/sys/dev/pci/drm/radeon/radeon_i2c.c
index 2794f28b9d0..eba4d3a6553 100644
--- a/sys/dev/pci/drm/radeon/radeon_i2c.c
+++ b/sys/dev/pci/drm/radeon/radeon_i2c.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_i2c.c,v 1.9 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -89,15 +90,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
/* bit banging i2c */
-static int pre_xfer(struct i2c_adapter *i2c_adap)
+static int pre_xfer(void *cookie)
{
- struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_i2c_chan *i2c = cookie;
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
- mutex_lock(&i2c->mutex);
-
/* RV410 appears to have a bug where the hw i2c in reset
* holds the i2c port in a bad state - switch hw i2c away before
* doing DDC - do this for all r200s/r300s/r400s for safety sake
@@ -159,9 +158,9 @@ static int pre_xfer(struct i2c_adapter *i2c_adap)
return 0;
}
-static void post_xfer(struct i2c_adapter *i2c_adap)
+static void post_xfer(void *cookie)
{
- struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_i2c_chan *i2c = cookie;
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
@@ -174,8 +173,6 @@ static void post_xfer(struct i2c_adapter *i2c_adap)
temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
-
- mutex_unlock(&i2c->mutex);
}
static int get_clock(void *i2c_priv)
@@ -283,16 +280,14 @@ radeon_bb_read_bits(void *cookie)
int
radeon_acquire_bus(void *cookie, int flags)
{
- struct radeon_i2c_chan *i2c = cookie;
- pre_xfer(&i2c->adapter);
+ pre_xfer(cookie);
return (0);
}
void
radeon_release_bus(void *cookie, int flags)
{
- struct radeon_i2c_chan *i2c = cookie;
- post_xfer(&i2c->adapter);
+ post_xfer(cookie);
}
int
@@ -325,7 +320,6 @@ radeon_write_byte(void *cookie, u_int8_t byte, int flags)
return (i2c_bitbang_write_byte(cookie, byte, flags, &radeon_bbops));
}
-
/* hw i2c */
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
@@ -912,8 +906,6 @@ static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct radeon_i2c_bus_rec *rec = &i2c->rec;
int ret = 0;
- mutex_lock(&i2c->mutex);
-
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -980,8 +972,6 @@ static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
break;
}
- mutex_unlock(&i2c->mutex);
-
return ret;
}
@@ -1006,7 +996,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_i2c_chan *i2c;
- int ret;
+ int ret = 0;
/* don't add the mm_i2c bus unless hw_i2c is enabled */
if (rec->mm_i2c && (radeon_hw_i2c == 0))
@@ -1017,14 +1007,13 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
return NULL;
i2c->rec = *rec;
-#ifdef __linux__
+#ifdef notyet
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
i2c->adapter.dev.parent = &dev->pdev->dev;
#endif
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
- rw_init(&i2c->mutex, "riic");
if (rec->mm_i2c ||
(rec->hw_capable &&
radeon_hw_i2c &&
@@ -1055,28 +1044,28 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
/* set the radeon bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"Radeon i2c bit bus %s", name);
- i2c->adapter.algo_data = &i2c->bit;
#ifdef notyet
- i2c->bit.pre_xfer = pre_xfer;
- i2c->bit.post_xfer = post_xfer;
- i2c->bit.setsda = set_data;
- i2c->bit.setscl = set_clock;
- i2c->bit.getsda = get_data;
- i2c->bit.getscl = get_clock;
- i2c->bit.udelay = 10;
- i2c->bit.timeout = usecs_to_jiffies(2200); /* from VESA */
- i2c->bit.data = i2c;
+ i2c->adapter.algo_data = &i2c->algo.bit;
+ i2c->algo.bit.pre_xfer = pre_xfer;
+ i2c->algo.bit.post_xfer = post_xfer;
+ i2c->algo.bit.setsda = set_data;
+ i2c->algo.bit.setscl = set_clock;
+ i2c->algo.bit.getsda = get_data;
+ i2c->algo.bit.getscl = get_clock;
+ i2c->algo.bit.udelay = 10;
+ i2c->algo.bit.timeout = usecs_to_jiffies(2200); /* from VESA */
+ i2c->algo.bit.data = i2c;
+ ret = i2c_bit_add_bus(&i2c->adapter);
#else
- i2c->bit.ic.ic_cookie = i2c;
- i2c->bit.ic.ic_acquire_bus = radeon_acquire_bus;
- i2c->bit.ic.ic_release_bus = radeon_release_bus;
- i2c->bit.ic.ic_send_start = radeon_send_start;
- i2c->bit.ic.ic_send_stop = radeon_send_stop;
- i2c->bit.ic.ic_initiate_xfer = radeon_initiate_xfer;
- i2c->bit.ic.ic_read_byte = radeon_read_byte;
- i2c->bit.ic.ic_write_byte = radeon_write_byte;
+ i2c->adapter.ic.ic_cookie = i2c;
+ i2c->adapter.ic.ic_acquire_bus = radeon_acquire_bus;
+ i2c->adapter.ic.ic_release_bus = radeon_release_bus;
+ i2c->adapter.ic.ic_send_start = radeon_send_start;
+ i2c->adapter.ic.ic_send_stop = radeon_send_stop;
+ i2c->adapter.ic.ic_initiate_xfer = radeon_initiate_xfer;
+ i2c->adapter.ic.ic_read_byte = radeon_read_byte;
+ i2c->adapter.ic.ic_write_byte = radeon_write_byte;
#endif
- ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
DRM_ERROR("Failed to register bit i2c %s\n", name);
goto out_free;
@@ -1087,7 +1076,6 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
out_free:
kfree(i2c);
return NULL;
-
}
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
@@ -1156,6 +1144,11 @@ struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
return NULL;
}
+struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
+{
+ return NULL;
+}
+
void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
diff --git a/sys/dev/pci/drm/radeon/radeon_irq_kms.c b/sys/dev/pci/drm/radeon/radeon_irq_kms.c
index f75204ddb25..81c45bbca4a 100644
--- a/sys/dev/pci/drm/radeon/radeon_irq_kms.c
+++ b/sys/dev/pci/drm/radeon/radeon_irq_kms.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_irq_kms.c,v 1.12 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -32,13 +33,12 @@
#include "radeon.h"
#include "atom.h"
-
#define RADEON_WAIT_IDLE_TIMEOUT 200
/**
* radeon_driver_irq_handler_kms - irq handler for KMS
*
- * @int irq, void *arg: args
+ * @DRM_IRQ_ARGS: args
*
* This is the irq handler for the radeon KMS driver (all asics).
* radeon_irq_process is a macro that points to the per-asic
@@ -46,17 +46,13 @@
*/
irqreturn_t radeon_driver_irq_handler_kms(void *arg)
{
- struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_device *dev = arg;
struct radeon_device *rdev = dev->dev_private;
- irqreturn_t ret;
if (!rdev->irq.installed)
- return 0;
+ return (0);
- ret = radeon_irq_process(rdev);
- if (ret == IRQ_HANDLED)
- pm_runtime_mark_last_busy(dev->dev);
- return ret;
+ return radeon_irq_process(rdev);
}
/*
@@ -73,43 +69,21 @@ irqreturn_t radeon_driver_irq_handler_kms(void *arg)
* and calls the hotplug handler for each one, then sends
* a drm hotplug event to alert userspace.
*/
-static void radeon_hotplug_work_func(struct work_struct *work)
+static void radeon_hotplug_work_func(void *arg1)
{
- struct radeon_device *rdev = container_of(work, struct radeon_device,
- hotplug_work.work);
+ struct radeon_device *rdev = arg1;
struct drm_device *dev = rdev->ddev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
- /* we can race here at startup, some boards seem to trigger
- * hotplug irqs when they shouldn't. */
- if (!rdev->mode_info.mode_config_initialized)
- return;
-
- mutex_lock(&mode_config->mutex);
if (mode_config->num_connector) {
list_for_each_entry(connector, &mode_config->connector_list, head)
radeon_connector_hotplug(connector);
}
- mutex_unlock(&mode_config->mutex);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
-static void radeon_dp_work_func(struct work_struct *work)
-{
- struct radeon_device *rdev = container_of(work, struct radeon_device,
- dp_work);
- struct drm_device *dev = rdev->ddev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
-
- /* this should take a mutex */
- if (mode_config->num_connector) {
- list_for_each_entry(connector, &mode_config->connector_list, head)
- radeon_connector_hotplug(connector);
- }
-}
/**
* radeon_driver_irq_preinstall_kms - drm irq preinstall callback
*
@@ -128,7 +102,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
- rdev->irq.dpm_thermal = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -152,13 +125,7 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
*/
int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{
- struct radeon_device *rdev = dev->dev_private;
-
- if (ASIC_IS_AVIVO(rdev))
- dev->max_vblank_count = 0x00ffffff;
- else
- dev->max_vblank_count = 0x001fffff;
-
+ dev->max_vblank_count = 0x001fffff;
return 0;
}
@@ -182,7 +149,6 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
- rdev->irq.dpm_thermal = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
@@ -214,18 +180,6 @@ bool radeon_msi_ok(struct radeon_device *rdev)
if (rdev->flags & RADEON_IS_AGP)
return false;
- /*
- * Older chips have a HW limitation, they can only generate 40 bits
- * of address for "64-bit" MSIs which breaks on some platforms, notably
- * IBM POWER servers, so we limit them
- */
-#ifdef notyet
- if (rdev->family < CHIP_BONAIRE) {
- dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
- rdev->pdev->no_64bit_msi = 1;
- }
-#endif
-
/* force MSI on */
if (radeon_msi == 1)
return true;
@@ -290,6 +244,9 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
{
int r = 0;
+ task_set(&rdev->hotplug_task, radeon_hotplug_work_func, rdev);
+ task_set(&rdev->audio_task, r600_audio_update_hdmi, rdev);
+
mtx_init(&rdev->irq.lock, IPL_TTY);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
@@ -307,25 +264,18 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
}
}
#endif
-
- INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
- INIT_WORK(&rdev->dp_work, radeon_dp_work_func);
- INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
-
rdev->irq.installed = true;
r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq);
if (r) {
rdev->irq.installed = false;
- flush_delayed_work(&rdev->hotplug_work);
return r;
}
-
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
/**
- * radeon_irq_kms_fini - tear down driver interrupt info
+ * radeon_irq_kms_fini - tear down driver interrrupt info
*
* @rdev: radeon device pointer
*
@@ -337,10 +287,14 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
if (rdev->irq.installed) {
drm_irq_uninstall(rdev->ddev);
rdev->irq.installed = false;
+#ifdef notyet
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
- flush_delayed_work(&rdev->hotplug_work);
+#endif
}
+#ifdef notyet
+ flush_work(&rdev->hotplug_work);
+#endif
}
/**
@@ -368,21 +322,6 @@ void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
}
/**
- * radeon_irq_kms_sw_irq_get_delayed - enable software interrupt
- *
- * @rdev: radeon device pointer
- * @ring: ring whose interrupt you want to enable
- *
- * Enables the software interrupt for a specific ring (all asics).
- * The software interrupt is generally used to signal a fence on
- * a particular ring.
- */
-bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring)
-{
- return atomic_inc_return(&rdev->irq.ring_int[ring]) == 1;
-}
-
-/**
* radeon_irq_kms_sw_irq_put - disable software interrupt
*
* @rdev: radeon device pointer
diff --git a/sys/dev/pci/drm/radeon/radeon_kms.c b/sys/dev/pci/drm/radeon/radeon_kms.c
index ce2ac47fb84..23ae01d75fb 100644
--- a/sys/dev/pci/drm/radeon/radeon_kms.c
+++ b/sys/dev/pci/drm/radeon/radeon_kms.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_kms.c,v 1.54 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -30,14 +31,9 @@
#include "radeon.h"
#include <dev/pci/drm/radeon_drm.h>
#include "radeon_asic.h"
+#include <dev/pci/drm/drm_pciids.h>
-#include "radeon_kfd.h"
-
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool radeon_has_atpx(void);
-#else
-static inline bool radeon_has_atpx(void) { return false; }
-#endif
+/* can't include radeon_drv.h due to duplicated defines in radeon_reg.h */
#include "vga.h"
@@ -47,13 +43,73 @@ extern int vga_console_attached;
#ifdef __amd64__
#include "efifb.h"
-#include <machine/biosvar.h>
#endif
#if NEFIFB > 0
#include <machine/efifbvar.h>
#endif
+#define DRIVER_NAME "radeon"
+#define DRIVER_DESC "ATI Radeon"
+#define DRIVER_DATE "20080613"
+
+#define KMS_DRIVER_MAJOR 2
+#define KMS_DRIVER_MINOR 29
+#define KMS_DRIVER_PATCHLEVEL 0
+
+int radeon_driver_irq_handler_kms(void *);
+void radeon_driver_irq_preinstall_kms(struct drm_device *);
+int radeon_driver_irq_postinstall_kms(struct drm_device *);
+void radeon_driver_irq_uninstall_kms(struct drm_device *d);
+
+void radeon_gem_object_free(struct drm_gem_object *);
+int radeon_gem_object_open(struct drm_gem_object *, struct drm_file *);
+void radeon_gem_object_close(struct drm_gem_object *, struct drm_file *);
+
+int radeon_driver_unload_kms(struct drm_device *);
+int radeon_driver_load_kms(struct drm_device *, unsigned long);
+int radeon_info_ioctl(struct drm_device *, void *, struct drm_file *);
+int radeon_driver_firstopen_kms(struct drm_device *);
+void radeon_driver_lastclose_kms(struct drm_device *);
+int radeon_driver_open_kms(struct drm_device *, struct drm_file *);
+void radeon_driver_postclose_kms(struct drm_device *, struct drm_file *);
+void radeon_driver_preclose_kms(struct drm_device *, struct drm_file *);
+u32 radeon_get_vblank_counter_kms(struct drm_device *, unsigned int);
+int radeon_enable_vblank_kms(struct drm_device *, unsigned int);
+void radeon_disable_vblank_kms(struct drm_device *, unsigned int);
+int radeon_get_vblank_timestamp_kms(struct drm_device *, unsigned int,
+ int *, struct timeval *, unsigned);
+
+int radeon_dma_ioctl_kms(struct drm_device *, struct drm_dma *, struct drm_file *);
+
+int radeon_cp_init_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_start_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_stop_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_reset_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_idle_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_resume_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_engine_reset_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_fullscreen_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_swap_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_clear_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_vertex_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_indices_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_texture_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_stipple_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_indirect_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_vertex2_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_cmdbuf_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_getparam_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_flip_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_mem_alloc_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_mem_free_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_mem_init_heap_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_irq_emit_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_irq_wait_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_cp_setparam_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_surface_alloc_kms(struct drm_device *, void *, struct drm_file *);
+int radeon_surface_free_kms(struct drm_device *, void *, struct drm_file *);
+
int radeondrm_probe(struct device *, void *, void *);
void radeondrm_attach_kms(struct device *, struct device *, void *);
int radeondrm_detach_kms(struct device *, int);
@@ -61,20 +117,9 @@ int radeondrm_activate_kms(struct device *, int);
void radeondrm_attachhook(struct device *);
int radeondrm_forcedetach(struct radeon_device *);
-bool radeon_msi_ok(struct radeon_device *);
-irqreturn_t radeon_driver_irq_handler_kms(void *);
-
-extern const struct drm_pcidev radeondrm_pciidlist[];
-extern struct drm_driver kms_driver;
-const struct drm_ioctl_desc radeon_ioctls_kms[];
+extern struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
-/*
- * set if the mountroot hook has a fatal error
- * such as not being able to find the firmware on newer cards
- */
-int radeon_fatal_error;
-
struct cfattach radeondrm_ca = {
sizeof (struct radeon_device), radeondrm_probe, radeondrm_attach_kms,
radeondrm_detach_kms, radeondrm_activate_kms
@@ -84,6 +129,142 @@ struct cfdriver radeondrm_cd = {
NULL, "radeondrm", DV_DULL
};
+int radeon_no_wb;
+int radeon_modeset = 1;
+int radeon_dynclks = -1;
+int radeon_r4xx_atom = 0;
+int radeon_agpmode = 0;
+int radeon_vram_limit = 0;
+int radeon_gart_size = 512; /* default gart size */
+int radeon_benchmarking = 0;
+int radeon_testing = 0;
+int radeon_connector_table = 0;
+int radeon_tv = 1;
+int radeon_audio = 0;
+int radeon_disp_priority = 0;
+int radeon_hw_i2c = 0;
+int radeon_pcie_gen2 = -1;
+int radeon_msi = -1;
+int radeon_lockup_timeout = 10000;
+int radeon_auxch = -1;
+
+MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
+module_param_named(no_wb, radeon_no_wb, int, 0444);
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, radeon_modeset, int, 0400);
+
+MODULE_PARM_DESC(dynclks, "Disable/Enable dynamic clocks");
+module_param_named(dynclks, radeon_dynclks, int, 0444);
+
+MODULE_PARM_DESC(r4xx_atom, "Enable ATOMBIOS modesetting for R4xx");
+module_param_named(r4xx_atom, radeon_r4xx_atom, int, 0444);
+
+MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing");
+module_param_named(vramlimit, radeon_vram_limit, int, 0600);
+
+MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
+module_param_named(agpmode, radeon_agpmode, int, 0444);
+
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)");
+module_param_named(gartsize, radeon_gart_size, int, 0600);
+
+MODULE_PARM_DESC(benchmark, "Run benchmark");
+module_param_named(benchmark, radeon_benchmarking, int, 0444);
+
+MODULE_PARM_DESC(test, "Run tests");
+module_param_named(test, radeon_testing, int, 0444);
+
+MODULE_PARM_DESC(connector_table, "Force connector table");
+module_param_named(connector_table, radeon_connector_table, int, 0444);
+
+MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
+module_param_named(tv, radeon_tv, int, 0444);
+
+MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
+module_param_named(audio, radeon_audio, int, 0444);
+
+MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
+module_param_named(disp_priority, radeon_disp_priority, int, 0444);
+
+MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
+module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
+
+MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
+module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
+
+MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(msi, radeon_msi, int, 0444);
+
+MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
+module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
+
+/*
+ * set if the mountroot hook has a fatal error
+ * such as not being able to find the firmware on newer cards
+ */
+int radeon_fatal_error = 0;
+
+const struct drm_pcidev radeondrm_pciidlist[] = {
+ radeon_PCI_IDS
+};
+
+static struct drm_driver kms_driver = {
+ .driver_features =
+ DRIVER_USE_AGP |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
+ DRIVER_MODESET,
+ .buf_priv_size = 0,
+ .firstopen = radeon_driver_firstopen_kms,
+ .open = radeon_driver_open_kms,
+ .mmap = radeon_mmap,
+#ifdef notyet
+ .preclose = radeon_driver_preclose_kms,
+ .postclose = radeon_driver_postclose_kms,
+#endif
+ .lastclose = radeon_driver_lastclose_kms,
+#ifdef notyet
+ .suspend = radeon_suspend_kms,
+ .resume = radeon_resume_kms,
+#endif
+ .get_vblank_counter = radeon_get_vblank_counter_kms,
+ .enable_vblank = radeon_enable_vblank_kms,
+ .disable_vblank = radeon_disable_vblank_kms,
+ .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
+ .get_scanout_position = radeon_get_crtc_scanoutpos,
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = radeon_debugfs_init,
+ .debugfs_cleanup = radeon_debugfs_cleanup,
+#endif
+ .irq_preinstall = radeon_driver_irq_preinstall_kms,
+ .irq_postinstall = radeon_driver_irq_postinstall_kms,
+ .irq_uninstall = radeon_driver_irq_uninstall_kms,
+ .ioctls = radeon_ioctls_kms,
+ .gem_free_object = radeon_gem_object_free,
+ .gem_open_object = radeon_gem_object_open,
+ .gem_close_object = radeon_gem_object_close,
+ .gem_size = sizeof(struct radeon_bo),
+ .dma_ioctl = radeon_dma_ioctl_kms,
+ .dumb_create = radeon_mode_dumb_create,
+ .dumb_map_offset = radeon_mode_dumb_mmap,
+ .dumb_destroy = radeon_mode_dumb_destroy,
+#ifdef notyet
+ .fops = &radeon_driver_kms_fops,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = radeon_gem_prime_export,
+ .gem_prime_import = radeon_gem_prime_import,
+#endif
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = KMS_DRIVER_MAJOR,
+ .minor = KMS_DRIVER_MINOR,
+ .patchlevel = KMS_DRIVER_PATCHLEVEL,
+};
+
int
radeondrm_probe(struct device *parent, void *match, void *aux)
{
@@ -105,32 +286,6 @@ radeondrm_probe(struct device *parent, void *match, void *aux)
* the rest of the device (CP, writeback, etc.).
* Returns 0 on success.
*/
-#ifdef __linux__
-int radeon_driver_unload_kms(struct drm_device *dev)
-{
- struct radeon_device *rdev = dev->dev_private;
-
- if (rdev == NULL)
- return 0;
-
- if (rdev->rmmio == NULL)
- goto done_free;
-
- pm_runtime_get_sync(dev->dev);
-
- radeon_kfd_device_fini(rdev);
-
- radeon_acpi_fini(rdev);
-
- radeon_modeset_fini(rdev);
- radeon_device_fini(rdev);
-
-done_free:
- kfree(rdev);
- dev->dev_private = NULL;
- return 0;
-}
-#else
int
radeondrm_detach_kms(struct device *self, int flags)
{
@@ -139,16 +294,7 @@ radeondrm_detach_kms(struct device *self, int flags)
if (rdev == NULL)
return 0;
- pci_intr_disestablish(rdev->pc, rdev->irqh);
-
-#ifdef notyet
- pm_runtime_get_sync(dev->dev);
-
- radeon_kfd_device_fini(rdev);
-#endif
-
radeon_acpi_fini(rdev);
-
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
@@ -157,11 +303,14 @@ radeondrm_detach_kms(struct device *self, int flags)
rdev->ddev = NULL;
}
+ pci_intr_disestablish(rdev->pc, rdev->irqh);
+
+ if (rdev->rmmio_size > 0)
+ bus_space_unmap(rdev->memt, rdev->rmmio, rdev->rmmio_size);
+
return 0;
}
-#endif
-void radeondrm_burner(void *, u_int, u_int);
int radeondrm_wsioctl(void *, u_long, caddr_t, int, struct proc *);
paddr_t radeondrm_wsmmap(void *, off_t, int);
int radeondrm_alloc_screen(void *, const struct wsscreen_descr *,
@@ -331,7 +480,6 @@ radeondrm_setcolor(void *v, u_int index, u_int8_t r, u_int8_t g, u_int8_t b)
}
#endif
-#ifdef __linux__
/**
* radeon_driver_load_kms - Main load function for KMS.
*
@@ -345,84 +493,6 @@ radeondrm_setcolor(void *v, u_int index, u_int8_t r, u_int8_t g, u_int8_t b)
* (crtcs, encoders, hotplug detect, etc.).
* Returns 0 on success, error on failure.
*/
-int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
-{
- struct radeon_device *rdev;
- int r, acpi_status;
-
- rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
- if (rdev == NULL) {
- return -ENOMEM;
- }
- dev->dev_private = (void *)rdev;
-
- /* update BUS flag */
- if (drm_pci_device_is_agp(dev)) {
- flags |= RADEON_IS_AGP;
- } else if (pci_is_pcie(dev->pdev)) {
- flags |= RADEON_IS_PCIE;
- } else {
- flags |= RADEON_IS_PCI;
- }
-
- if ((radeon_runtime_pm != 0) &&
- radeon_has_atpx() &&
- ((flags & RADEON_IS_IGP) == 0))
- flags |= RADEON_IS_PX;
-
- /* radeon_device_init should report only fatal error
- * like memory allocation failure or iomapping failure,
- * or memory manager initialization failure, it must
- * properly initialize the GPU MC controller and permit
- * VRAM allocation
- */
- r = radeon_device_init(rdev, dev, dev->pdev, flags);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
- goto out;
- }
-
- /* Again modeset_init should fail only on fatal error
- * otherwise it should provide enough functionalities
- * for shadowfb to run
- */
- r = radeon_modeset_init(rdev);
- if (r)
- dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
-
- /* Call ACPI methods: require modeset init
- * but failure is not fatal
- */
- if (!r) {
- acpi_status = radeon_acpi_init(rdev);
- if (acpi_status)
- dev_dbg(&dev->pdev->dev,
- "Error during ACPI methods call\n");
- }
-
-#ifdef notyet
- radeon_kfd_device_probe(rdev);
- radeon_kfd_device_init(rdev);
-#endif
-
- if (radeon_is_px(dev)) {
- pm_runtime_use_autosuspend(dev->dev);
- pm_runtime_set_autosuspend_delay(dev->dev, 5000);
- pm_runtime_set_active(dev->dev);
- pm_runtime_allow(dev->dev);
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
- }
-
-out:
- if (r)
- radeon_driver_unload_kms(dev);
-
-
- return r;
-}
-#endif
-
void
radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
{
@@ -432,8 +502,7 @@ radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
const struct drm_pcidev *id_entry;
int is_agp;
pcireg_t type;
- int i;
- uint8_t rmmio_bar;
+ uint8_t iobar;
#if !defined(__sparc64__)
pcireg_t addr, mask;
int s;
@@ -446,7 +515,6 @@ radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
id_entry = drm_find_description(PCI_VENDOR(pa->pa_id),
PCI_PRODUCT(pa->pa_id), radeondrm_pciidlist);
rdev->flags = id_entry->driver_data;
- rdev->family = rdev->flags & RADEON_FAMILY_MASK;
rdev->pc = pa->pa_pc;
rdev->pa_tag = pa->pa_tag;
rdev->iot = pa->pa_iot;
@@ -476,6 +544,9 @@ radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
#endif
#define RADEON_PCI_MEM 0x10
+#define RADEON_PCI_IO 0x14
+#define RADEON_PCI_MMIO 0x18
+#define RADEON_PCI_IO2 0x20
type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RADEON_PCI_MEM);
if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM ||
@@ -485,41 +556,22 @@ radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
return;
}
- for (i = PCI_MAPREG_START; i < PCI_MAPREG_END ; i+= 4) {
- type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
- if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_IO)
- continue;
- if (pci_mapreg_map(pa, i, type, 0, NULL,
- &rdev->rio_mem, NULL, &rdev->rio_mem_size, 0)) {
- printf(": can't map rio space\n");
- return;
- }
-
- if (type & PCI_MAPREG_MEM_TYPE_64BIT)
- i += 4;
- }
-
- if (rdev->family >= CHIP_BONAIRE) {
- type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x18);
- if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM ||
- pci_mapreg_map(pa, 0x18, type, 0, NULL,
- &rdev->doorbell.bsh, &rdev->doorbell.base,
- &rdev->doorbell.size, 0)) {
- printf(": can't map doorbell space\n");
- return;
- }
- }
-
- if (rdev->family >= CHIP_BONAIRE)
- rmmio_bar = 0x24;
+ if (PCI_MAPREG_MEM_TYPE(type) != PCI_MAPREG_MEM_TYPE_64BIT)
+ iobar = RADEON_PCI_IO;
else
- rmmio_bar = 0x18;
+ iobar = RADEON_PCI_IO2;
+
+ if (pci_mapreg_map(pa, iobar, PCI_MAPREG_TYPE_IO, 0,
+ NULL, &rdev->rio_mem, NULL, &rdev->rio_mem_size, 0)) {
+ printf(": can't map IO space\n");
+ return;
+ }
- type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, rmmio_bar);
+ type = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RADEON_PCI_MMIO);
if (PCI_MAPREG_TYPE(type) != PCI_MAPREG_TYPE_MEM ||
- pci_mapreg_map(pa, rmmio_bar, type, 0, NULL,
- &rdev->rmmio_bsh, &rdev->rmmio_base, &rdev->rmmio_size, 0)) {
- printf(": can't map rmmio space\n");
+ pci_mapreg_map(pa, RADEON_PCI_MMIO, type, 0, NULL,
+ &rdev->rmmio, &rdev->rmmio_base, &rdev->rmmio_size, 0)) {
+ printf(": can't map mmio space\n");
return;
}
@@ -562,11 +614,6 @@ radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
rdev->flags |= RADEON_IS_PCI;
}
- if ((radeon_runtime_pm != 0) &&
- radeon_has_atpx() &&
- ((rdev->flags & RADEON_IS_IGP) == 0))
- rdev->flags |= RADEON_IS_PX;
-
DRM_DEBUG("%s card detected\n",
((rdev->flags & RADEON_IS_AGP) ? "AGP" :
(((rdev->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
@@ -577,13 +624,13 @@ radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
printf("\n");
kms_driver.num_ioctls = radeon_max_kms_ioctl;
- kms_driver.driver_features |= DRIVER_MODESET;
dev = (struct drm_device *)drm_attach_pci(&kms_driver, pa, is_agp,
rdev->console, self);
rdev->ddev = dev;
rdev->pdev = dev->pdev;
+ rdev->family = rdev->flags & RADEON_FAMILY_MASK;
if (!radeon_msi_ok(rdev))
pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED;
@@ -594,14 +641,14 @@ radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
printf(": couldn't map interrupt\n");
return;
}
- printf("%s: %s\n", rdev->self.dv_xname,
+ printf("%s: %s\n", rdev->dev.dv_xname,
pci_intr_string(pa->pa_pc, rdev->intrh));
rdev->irqh = pci_intr_establish(pa->pa_pc, rdev->intrh, IPL_TTY,
- radeon_driver_irq_handler_kms, rdev->ddev, rdev->self.dv_xname);
+ radeon_driver_irq_handler_kms, rdev->ddev, rdev->dev.dv_xname);
if (rdev->irqh == NULL) {
printf("%s: couldn't establish interrupt\n",
- rdev->self.dv_xname);
+ rdev->dev.dv_xname);
return;
}
rdev->pdev->irq = -1;
@@ -621,10 +668,10 @@ radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
* an offset from the start of video memory.
*/
rdev->fb_offset =
- bus_space_read_4(rdev->memt, rdev->rmmio_bsh, RADEON_CRTC_OFFSET);
+ bus_space_read_4(rdev->memt, rdev->rmmio, RADEON_CRTC_OFFSET);
if (bus_space_map(rdev->memt, rdev->fb_aper_offset + rdev->fb_offset,
rdev->sf.sf_fbsize, BUS_SPACE_MAP_LINEAR, &rdev->memh)) {
- printf("%s: can't map video memory\n", rdev->self.dv_xname);
+ printf("%s: can't map video memory\n", rdev->dev.dv_xname);
return;
}
@@ -643,12 +690,10 @@ radeondrm_attach_kms(struct device *parent, struct device *self, void *aux)
config_mountroot(self, radeondrm_attachhook);
}
-extern void mainbus_efifb_reattach(void);
-
int
radeondrm_forcedetach(struct radeon_device *rdev)
{
- struct pci_softc *sc = (struct pci_softc *)rdev->self.dv_parent;
+ struct pci_softc *sc = (struct pci_softc *)rdev->dev.dv_parent;
pcitag_t tag = rdev->pa_tag;
#if NVGA > 0
@@ -656,19 +701,8 @@ radeondrm_forcedetach(struct radeon_device *rdev)
vga_console_attached = 0;
#endif
- /* reprobe pci device for non efi systems */
-#if NEFIFB > 0
- if (bios_efiinfo == NULL && !efifb_cb_found()) {
-#endif
- config_detach(&rdev->self, 0);
- return pci_probe_device(sc, tag, NULL, NULL);
-#if NEFIFB > 0
- } else if (rdev->console) {
- mainbus_efifb_reattach();
- }
-#endif
-
- return 0;
+ config_detach(&rdev->dev, 0);
+ return pci_probe_device(sc, tag, NULL, NULL);
}
void
@@ -683,7 +717,7 @@ radeondrm_attachhook(struct device *self)
* properly initialize the GPU MC controller and permit
* VRAM allocation
*/
- r = radeon_device_init(rdev, rdev->ddev, rdev->ddev->pdev, rdev->flags);
+ r = radeon_device_init(rdev, rdev->ddev);
if (r) {
dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
radeon_fatal_error = 1;
@@ -708,20 +742,6 @@ radeondrm_attachhook(struct device *self)
DRM_DEBUG("Error during ACPI methods call\n");
}
-#ifdef notyet
- radeon_kfd_device_probe(rdev);
- radeon_kfd_device_init(rdev);
-#endif
-
- if (radeon_is_px(rdev->ddev)) {
- pm_runtime_use_autosuspend(dev->dev);
- pm_runtime_set_autosuspend_delay(dev->dev, 5000);
- pm_runtime_set_active(dev->dev);
- pm_runtime_allow(dev->dev);
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
- }
-
{
struct drm_fb_helper *fb_helper = (void *)rdev->mode_info.rfbdev;
struct wsemuldisplaydev_attach_args aa;
@@ -772,12 +792,12 @@ radeondrm_attachhook(struct device *self)
* VGA legacy addresses, and opt out of arbitration.
*/
radeon_vga_set_state(rdev, false);
- pci_disable_legacy_vga(&rdev->self);
+ pci_disable_legacy_vga(&rdev->dev);
- printf("%s: %dx%d, %dbpp\n", rdev->self.dv_xname,
+ printf("%s: %dx%d, %dbpp\n", rdev->dev.dv_xname,
ri->ri_width, ri->ri_height, ri->ri_depth);
- config_found_sm(&rdev->self, &aa, wsemuldisplaydevprint,
+ config_found_sm(&rdev->dev, &aa, wsemuldisplaydevprint,
wsemuldisplaydevsubmatch);
}
}
@@ -794,14 +814,14 @@ radeondrm_activate_kms(struct device *self, int act)
switch (act) {
case DVACT_QUIESCE:
rv = config_activate_children(self, act);
- radeon_suspend_kms(rdev->ddev, true, true);
+ radeon_suspend_kms(rdev->ddev);
break;
case DVACT_SUSPEND:
break;
case DVACT_RESUME:
break;
case DVACT_WAKEUP:
- radeon_resume_kms(rdev->ddev, true, true);
+ radeon_resume_kms(rdev->ddev);
rv = config_activate_children(self, act);
break;
}
@@ -809,7 +829,6 @@ radeondrm_activate_kms(struct device *self, int act)
return (rv);
}
-
/**
* radeon_set_filp_rights - Set filp right.
*
@@ -825,9 +844,7 @@ static void radeon_set_filp_rights(struct drm_device *dev,
struct drm_file *applier,
uint32_t *value)
{
- struct radeon_device *rdev = dev->dev_private;
-
- mutex_lock(&rdev->gem.mutex);
+ mutex_lock(&dev->struct_mutex);
if (*value == 1) {
/* wants rights */
if (!*owner)
@@ -838,7 +855,7 @@ static void radeon_set_filp_rights(struct drm_device *dev,
*owner = NULL;
}
*value = *owner == applier ? 1 : 0;
- mutex_unlock(&rdev->gem.mutex);
+ mutex_unlock(&dev->struct_mutex);
}
/*
@@ -856,83 +873,89 @@ static void radeon_set_filp_rights(struct drm_device *dev,
* etc. (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_info *info = data;
struct radeon_mode_info *minfo = &rdev->mode_info;
- uint32_t *value, value_tmp, *value_ptr, value_size;
- uint64_t value64;
+ uint32_t value, *value_ptr;
+ uint64_t value64, *value_ptr64;
struct drm_crtc *crtc;
int i, found;
+ /* TIMESTAMP is a 64-bit value, needs special handling. */
+ if (info->request == RADEON_INFO_TIMESTAMP) {
+ if (rdev->family >= CHIP_R600) {
+ value_ptr64 = (uint64_t*)((unsigned long)info->value);
+ if (rdev->family >= CHIP_TAHITI) {
+ value64 = si_get_gpu_clock(rdev);
+ } else {
+ value64 = r600_get_gpu_clock(rdev);
+ }
+
+ if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
+ DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ return 0;
+ } else {
+ DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
+ return -EINVAL;
+ }
+ }
+
value_ptr = (uint32_t *)((unsigned long)info->value);
- value = &value_tmp;
- value_size = sizeof(uint32_t);
+ if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) {
+ DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
+ return -EFAULT;
+ }
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
- *value = dev->pdev->device;
+ value = dev->pci_device;
break;
case RADEON_INFO_NUM_GB_PIPES:
- *value = rdev->num_gb_pipes;
+ value = rdev->num_gb_pipes;
break;
case RADEON_INFO_NUM_Z_PIPES:
- *value = rdev->num_z_pipes;
+ value = rdev->num_z_pipes;
break;
case RADEON_INFO_ACCEL_WORKING:
/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
- *value = false;
+ value = false;
else
- *value = rdev->accel_working;
+ value = rdev->accel_working;
break;
case RADEON_INFO_CRTC_FROM_ID:
- if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
- DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
- return -EFAULT;
- }
for (i = 0, found = 0; i < rdev->num_crtc; i++) {
crtc = (struct drm_crtc *)minfo->crtcs[i];
- if (crtc && crtc->base.id == *value) {
+ if (crtc && crtc->base.id == value) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- *value = radeon_crtc->crtc_id;
+ value = radeon_crtc->crtc_id;
found = 1;
break;
}
}
if (!found) {
- DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
+ DRM_DEBUG_KMS("unknown crtc id %d\n", value);
return -EINVAL;
}
break;
case RADEON_INFO_ACCEL_WORKING2:
- if (rdev->family == CHIP_HAWAII) {
- if (rdev->accel_working) {
- if (rdev->new_fw)
- *value = 3;
- else
- *value = 2;
- } else {
- *value = 0;
- }
- } else {
- *value = rdev->accel_working;
- }
+ value = rdev->accel_working;
break;
case RADEON_INFO_TILING_CONFIG:
- if (rdev->family >= CHIP_BONAIRE)
- *value = rdev->config.cik.tile_config;
- else if (rdev->family >= CHIP_TAHITI)
- *value = rdev->config.si.tile_config;
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.tile_config;
else if (rdev->family >= CHIP_CAYMAN)
- *value = rdev->config.cayman.tile_config;
+ value = rdev->config.cayman.tile_config;
else if (rdev->family >= CHIP_CEDAR)
- *value = rdev->config.evergreen.tile_config;
+ value = rdev->config.evergreen.tile_config;
else if (rdev->family >= CHIP_RV770)
- *value = rdev->config.rv770.tile_config;
+ value = rdev->config.rv770.tile_config;
else if (rdev->family >= CHIP_R600)
- *value = rdev->config.r600.tile_config;
+ value = rdev->config.r600.tile_config;
else {
DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
return -EINVAL;
@@ -945,88 +968,70 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
*
* When returning, the value is 1 if filp owns hyper-z access,
* 0 otherwise. */
- if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
- DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
- return -EFAULT;
- }
- if (*value >= 2) {
- DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
+ if (value >= 2) {
+ DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
return -EINVAL;
}
- radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
+ radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
break;
case RADEON_INFO_WANT_CMASK:
/* The same logic as Hyper-Z. */
- if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
- DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
- return -EFAULT;
- }
- if (*value >= 2) {
- DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
+ if (value >= 2) {
+ DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
return -EINVAL;
}
- radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
+ radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
break;
case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
/* return clock value in KHz */
- if (rdev->asic->get_xclk)
- *value = radeon_get_xclk(rdev) * 10;
- else
- *value = rdev->clock.spll.reference_freq * 10;
+ value = rdev->clock.spll.reference_freq * 10;
break;
case RADEON_INFO_NUM_BACKENDS:
- if (rdev->family >= CHIP_BONAIRE)
- *value = rdev->config.cik.max_backends_per_se *
- rdev->config.cik.max_shader_engines;
- else if (rdev->family >= CHIP_TAHITI)
- *value = rdev->config.si.max_backends_per_se *
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_backends_per_se *
rdev->config.si.max_shader_engines;
else if (rdev->family >= CHIP_CAYMAN)
- *value = rdev->config.cayman.max_backends_per_se *
+ value = rdev->config.cayman.max_backends_per_se *
rdev->config.cayman.max_shader_engines;
else if (rdev->family >= CHIP_CEDAR)
- *value = rdev->config.evergreen.max_backends;
+ value = rdev->config.evergreen.max_backends;
else if (rdev->family >= CHIP_RV770)
- *value = rdev->config.rv770.max_backends;
+ value = rdev->config.rv770.max_backends;
else if (rdev->family >= CHIP_R600)
- *value = rdev->config.r600.max_backends;
+ value = rdev->config.r600.max_backends;
else {
return -EINVAL;
}
break;
case RADEON_INFO_NUM_TILE_PIPES:
- if (rdev->family >= CHIP_BONAIRE)
- *value = rdev->config.cik.max_tile_pipes;
- else if (rdev->family >= CHIP_TAHITI)
- *value = rdev->config.si.max_tile_pipes;
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_tile_pipes;
else if (rdev->family >= CHIP_CAYMAN)
- *value = rdev->config.cayman.max_tile_pipes;
+ value = rdev->config.cayman.max_tile_pipes;
else if (rdev->family >= CHIP_CEDAR)
- *value = rdev->config.evergreen.max_tile_pipes;
+ value = rdev->config.evergreen.max_tile_pipes;
else if (rdev->family >= CHIP_RV770)
- *value = rdev->config.rv770.max_tile_pipes;
+ value = rdev->config.rv770.max_tile_pipes;
else if (rdev->family >= CHIP_R600)
- *value = rdev->config.r600.max_tile_pipes;
+ value = rdev->config.r600.max_tile_pipes;
else {
return -EINVAL;
}
break;
case RADEON_INFO_FUSION_GART_WORKING:
- *value = 1;
+ value = 1;
break;
case RADEON_INFO_BACKEND_MAP:
- if (rdev->family >= CHIP_BONAIRE)
- *value = rdev->config.cik.backend_map;
- else if (rdev->family >= CHIP_TAHITI)
- *value = rdev->config.si.backend_map;
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.backend_map;
else if (rdev->family >= CHIP_CAYMAN)
- *value = rdev->config.cayman.backend_map;
+ value = rdev->config.cayman.backend_map;
else if (rdev->family >= CHIP_CEDAR)
- *value = rdev->config.evergreen.backend_map;
+ value = rdev->config.evergreen.backend_map;
else if (rdev->family >= CHIP_RV770)
- *value = rdev->config.rv770.backend_map;
+ value = rdev->config.rv770.backend_map;
else if (rdev->family >= CHIP_R600)
- *value = rdev->config.r600.backend_map;
+ value = rdev->config.r600.backend_map;
else {
return -EINVAL;
}
@@ -1035,204 +1040,60 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
/* this is where we report if vm is supported or not */
if (rdev->family < CHIP_CAYMAN)
return -EINVAL;
- *value = RADEON_VA_RESERVED_SIZE;
+ value = RADEON_VA_RESERVED_SIZE;
break;
case RADEON_INFO_IB_VM_MAX_SIZE:
/* this is where we report if vm is supported or not */
if (rdev->family < CHIP_CAYMAN)
return -EINVAL;
- *value = RADEON_IB_VM_MAX_SIZE;
+ value = RADEON_IB_VM_MAX_SIZE;
break;
case RADEON_INFO_MAX_PIPES:
- if (rdev->family >= CHIP_BONAIRE)
- *value = rdev->config.cik.max_cu_per_sh;
- else if (rdev->family >= CHIP_TAHITI)
- *value = rdev->config.si.max_cu_per_sh;
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_cu_per_sh;
else if (rdev->family >= CHIP_CAYMAN)
- *value = rdev->config.cayman.max_pipes_per_simd;
+ value = rdev->config.cayman.max_pipes_per_simd;
else if (rdev->family >= CHIP_CEDAR)
- *value = rdev->config.evergreen.max_pipes;
+ value = rdev->config.evergreen.max_pipes;
else if (rdev->family >= CHIP_RV770)
- *value = rdev->config.rv770.max_pipes;
+ value = rdev->config.rv770.max_pipes;
else if (rdev->family >= CHIP_R600)
- *value = rdev->config.r600.max_pipes;
+ value = rdev->config.r600.max_pipes;
else {
return -EINVAL;
}
break;
- case RADEON_INFO_TIMESTAMP:
- if (rdev->family < CHIP_R600) {
- DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
- return -EINVAL;
- }
- value = (uint32_t*)&value64;
- value_size = sizeof(uint64_t);
- value64 = radeon_get_gpu_clock_counter(rdev);
- break;
case RADEON_INFO_MAX_SE:
- if (rdev->family >= CHIP_BONAIRE)
- *value = rdev->config.cik.max_shader_engines;
- else if (rdev->family >= CHIP_TAHITI)
- *value = rdev->config.si.max_shader_engines;
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_shader_engines;
else if (rdev->family >= CHIP_CAYMAN)
- *value = rdev->config.cayman.max_shader_engines;
+ value = rdev->config.cayman.max_shader_engines;
else if (rdev->family >= CHIP_CEDAR)
- *value = rdev->config.evergreen.num_ses;
+ value = rdev->config.evergreen.num_ses;
else
- *value = 1;
+ value = 1;
break;
case RADEON_INFO_MAX_SH_PER_SE:
- if (rdev->family >= CHIP_BONAIRE)
- *value = rdev->config.cik.max_sh_per_se;
- else if (rdev->family >= CHIP_TAHITI)
- *value = rdev->config.si.max_sh_per_se;
+ if (rdev->family >= CHIP_TAHITI)
+ value = rdev->config.si.max_sh_per_se;
else
return -EINVAL;
break;
- case RADEON_INFO_FASTFB_WORKING:
- *value = rdev->fastfb_working;
- break;
- case RADEON_INFO_RING_WORKING:
- if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
- DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
- return -EFAULT;
- }
- switch (*value) {
- case RADEON_CS_RING_GFX:
- case RADEON_CS_RING_COMPUTE:
- *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
- break;
- case RADEON_CS_RING_DMA:
- *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
- *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
- break;
- case RADEON_CS_RING_UVD:
- *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
- break;
- case RADEON_CS_RING_VCE:
- *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
- break;
- default:
- return -EINVAL;
- }
- break;
- case RADEON_INFO_SI_TILE_MODE_ARRAY:
- if (rdev->family >= CHIP_BONAIRE) {
- value = rdev->config.cik.tile_mode_array;
- value_size = sizeof(uint32_t)*32;
- } else if (rdev->family >= CHIP_TAHITI) {
- value = rdev->config.si.tile_mode_array;
- value_size = sizeof(uint32_t)*32;
- } else {
- DRM_DEBUG_KMS("tile mode array is si+ only!\n");
- return -EINVAL;
- }
- break;
- case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
- if (rdev->family >= CHIP_BONAIRE) {
- value = rdev->config.cik.macrotile_mode_array;
- value_size = sizeof(uint32_t)*16;
- } else {
- DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
- return -EINVAL;
- }
- break;
case RADEON_INFO_SI_CP_DMA_COMPUTE:
- *value = 1;
+ value = 1;
break;
case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
- if (rdev->family >= CHIP_BONAIRE) {
- *value = rdev->config.cik.backend_enable_mask;
- } else if (rdev->family >= CHIP_TAHITI) {
- *value = rdev->config.si.backend_enable_mask;
+ if (rdev->family >= CHIP_TAHITI) {
+ value = rdev->config.si.backend_enable_mask;
} else {
DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
}
break;
- case RADEON_INFO_MAX_SCLK:
- if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
- rdev->pm.dpm_enabled)
- *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
- else
- *value = rdev->pm.default_sclk * 10;
- break;
- case RADEON_INFO_VCE_FW_VERSION:
- *value = rdev->vce.fw_version;
- break;
- case RADEON_INFO_VCE_FB_VERSION:
- *value = rdev->vce.fb_version;
- break;
- case RADEON_INFO_NUM_BYTES_MOVED:
- value = (uint32_t*)&value64;
- value_size = sizeof(uint64_t);
- value64 = atomic64_read(&rdev->num_bytes_moved);
- break;
- case RADEON_INFO_VRAM_USAGE:
- value = (uint32_t*)&value64;
- value_size = sizeof(uint64_t);
- value64 = atomic64_read(&rdev->vram_usage);
- break;
- case RADEON_INFO_GTT_USAGE:
- value = (uint32_t*)&value64;
- value_size = sizeof(uint64_t);
- value64 = atomic64_read(&rdev->gtt_usage);
- break;
- case RADEON_INFO_ACTIVE_CU_COUNT:
- if (rdev->family >= CHIP_BONAIRE)
- *value = rdev->config.cik.active_cus;
- else if (rdev->family >= CHIP_TAHITI)
- *value = rdev->config.si.active_cus;
- else if (rdev->family >= CHIP_CAYMAN)
- *value = rdev->config.cayman.active_simds;
- else if (rdev->family >= CHIP_CEDAR)
- *value = rdev->config.evergreen.active_simds;
- else if (rdev->family >= CHIP_RV770)
- *value = rdev->config.rv770.active_simds;
- else if (rdev->family >= CHIP_R600)
- *value = rdev->config.r600.active_simds;
- else
- *value = 1;
- break;
- case RADEON_INFO_CURRENT_GPU_TEMP:
- /* get temperature in millidegrees C */
- if (rdev->asic->pm.get_temperature)
- *value = radeon_get_temperature(rdev);
- else
- *value = 0;
- break;
- case RADEON_INFO_CURRENT_GPU_SCLK:
- /* get sclk in Mhz */
- if (rdev->pm.dpm_enabled)
- *value = radeon_dpm_get_current_sclk(rdev) / 100;
- else
- *value = rdev->pm.current_sclk / 100;
- break;
- case RADEON_INFO_CURRENT_GPU_MCLK:
- /* get mclk in Mhz */
- if (rdev->pm.dpm_enabled)
- *value = radeon_dpm_get_current_mclk(rdev) / 100;
- else
- *value = rdev->pm.current_mclk / 100;
- break;
- case RADEON_INFO_READ_REG:
- if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
- DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
- return -EFAULT;
- }
- if (radeon_get_allowed_info_register(rdev, *value, value))
- return -EINVAL;
- break;
- case RADEON_INFO_VA_UNMAP_WORKING:
- *value = true;
- break;
- case RADEON_INFO_GPU_RESET_COUNTER:
- *value = atomic_read(&rdev->gpu_reset_counter);
- break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
}
- if (copy_to_user(value_ptr, (char*)value, value_size)) {
+ if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
return -EFAULT;
}
@@ -1244,22 +1105,38 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
* Outdated mess for old drm with Xorg being in charge (void function now).
*/
/**
- * radeon_driver_lastclose_kms - drm callback for last close
+ * radeon_driver_firstopen_kms - drm callback for first open
+ *
+ * @dev: drm dev pointer
+ *
+ * Nothing to be done for KMS (all asics).
+ * Returns 0 on success.
+ */
+int radeon_driver_firstopen_kms(struct drm_device *dev)
+{
+ return 0;
+}
+
+/**
+ * radeon_driver_firstopen_kms - drm callback for last close
*
* @dev: drm dev pointer
*
- * Switch vga_switcheroo state after last close (all asics).
+ * Switch vga switcheroo state after last close (all asics).
*/
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ struct drm_fb_helper *fb_helper = (void *)rdev->mode_info.rfbdev;
#ifdef __sparc64__
fbwscons_setcolormap(&rdev->sf, radeondrm_setcolor);
#endif
if (rdev->mode_info.mode_config_initialized)
- radeon_fbdev_restore_mode(rdev);
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+#ifdef notyet
vga_switcheroo_process_delayed_switch();
+#endif
}
/**
@@ -1274,18 +1151,13 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
struct radeon_device *rdev = dev->dev_private;
- int r;
file_priv->driver_priv = NULL;
- r = pm_runtime_get_sync(dev->dev);
- if (r < 0)
- return r;
-
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
- struct radeon_vm *vm;
+ struct radeon_bo_va *bo_va;
int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -1293,40 +1165,33 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
- if (rdev->accel_working) {
- vm = &fpriv->vm;
- r = radeon_vm_init(rdev, vm);
- if (r) {
- kfree(fpriv);
- return r;
- }
+ radeon_vm_init(rdev, &fpriv->vm);
+ if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
- radeon_vm_fini(rdev, vm);
+ radeon_vm_fini(rdev, &fpriv->vm);
kfree(fpriv);
return r;
}
/* map the ib pool buffer read only into
* virtual address space */
- vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
- rdev->ring_tmp_bo.bo);
- r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
- RADEON_VA_IB_OFFSET,
+ bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
+
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
- radeon_vm_fini(rdev, vm);
+ radeon_vm_fini(rdev, &fpriv->vm);
kfree(fpriv);
return r;
}
}
file_priv->driver_priv = fpriv;
}
-
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
return 0;
}
@@ -1346,19 +1211,21 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
struct radeon_fpriv *fpriv = file_priv->driver_priv;
- struct radeon_vm *vm = &fpriv->vm;
+ struct radeon_bo_va *bo_va;
int r;
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- if (vm->ib_bo_va)
- radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
+ bo_va = radeon_vm_bo_find(&fpriv->vm,
+ rdev->ring_tmp_bo.bo);
+ if (bo_va)
+ radeon_vm_bo_rmv(rdev, bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
- radeon_vm_fini(rdev, vm);
}
+ radeon_vm_fini(rdev, &fpriv->vm);
kfree(fpriv);
file_priv->driver_priv = NULL;
}
@@ -1377,16 +1244,10 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
struct radeon_device *rdev = dev->dev_private;
-
- mutex_lock(&rdev->gem.mutex);
if (rdev->hyperz_filp == file_priv)
rdev->hyperz_filp = NULL;
if (rdev->cmask_filp == file_priv)
rdev->cmask_filp = NULL;
- mutex_unlock(&rdev->gem.mutex);
-
- radeon_uvd_free_handles(rdev, file_priv);
- radeon_vce_free_handles(rdev, file_priv);
}
/*
@@ -1401,10 +1262,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
* Gets the frame count on the requested crtc (all asics).
* Returns frame count on success, -EINVAL on failure.
*/
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
+u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int crtc)
{
- int vpos, hpos, stat;
- u32 count;
struct radeon_device *rdev = dev->dev_private;
if (crtc < 0 || crtc >= rdev->num_crtc) {
@@ -1412,53 +1271,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
return -EINVAL;
}
- /* The hw increments its frame counter at start of vsync, not at start
- * of vblank, as is required by DRM core vblank counter handling.
- * Cook the hw count here to make it appear to the caller as if it
- * incremented at start of vblank. We measure distance to start of
- * vblank in vpos. vpos therefore will be >= 0 between start of vblank
- * and start of vsync, so vpos >= 0 means to bump the hw frame counter
- * result by 1 to give the proper appearance to caller.
- */
- if (rdev->mode_info.crtcs[crtc]) {
- /* Repeat readout if needed to provide stable result if
- * we cross start of vsync during the queries.
- */
- do {
- count = radeon_get_vblank_counter(rdev, crtc);
- /* Ask radeon_get_crtc_scanoutpos to return vpos as
- * distance to start of vblank, instead of regular
- * vertical scanout pos.
- */
- stat = radeon_get_crtc_scanoutpos(
- dev, crtc, GET_DISTANCE_TO_VBLANKSTART,
- &vpos, &hpos, NULL, NULL,
- &rdev->mode_info.crtcs[crtc]->base.hwmode);
- } while (count != radeon_get_vblank_counter(rdev, crtc));
-
- if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
- (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
- DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
- }
- else {
- DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
- crtc, vpos);
-
- /* Bump counter if we are at >= leading edge of vblank,
- * but before vsync where vpos would turn negative and
- * the hw counter really increments.
- */
- if (vpos >= 0)
- count++;
- }
- }
- else {
- /* Fallback to use value as is. */
- count = radeon_get_vblank_counter(rdev, crtc);
- DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
- }
-
- return count;
+ return radeon_get_vblank_counter(rdev, crtc);
}
/**
@@ -1470,7 +1283,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
* Enable the interrupt on the requested crtc (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int crtc)
{
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
@@ -1496,7 +1309,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
*
* Disable the interrupt on the requested crtc (all asics).
*/
-void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int crtc)
{
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
@@ -1525,7 +1338,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
* scanout position. (all asics).
* Returns postive status flags on success, negative error on failure.
*/
-int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int crtc,
int *max_error,
struct timeval *vblank_time,
unsigned flags)
@@ -1549,49 +1362,96 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
&drmcrtc->hwmode);
}
-const struct drm_ioctl_desc radeon_ioctls_kms[] = {
- DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_START, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_RESET, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SWAP, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_CLEAR, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_VERTEX, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_INDICES, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_FLIP, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_ALLOC, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_FREE, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, drm_invalid_op, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, drm_invalid_op, DRM_AUTH),
+/*
+ * IOCTL.
+ */
+int radeon_dma_ioctl_kms(struct drm_device *dev, struct drm_dma *d,
+ struct drm_file *file_priv)
+{
+ /* Not valid in KMS. */
+ return -EINVAL;
+}
+
+#define KMS_INVALID_IOCTL(name) \
+int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
+{ \
+ DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
+ return -EINVAL; \
+}
+
+/*
+ * All these ioctls are invalid in kms world.
+ */
+KMS_INVALID_IOCTL(radeon_cp_init_kms)
+KMS_INVALID_IOCTL(radeon_cp_start_kms)
+KMS_INVALID_IOCTL(radeon_cp_stop_kms)
+KMS_INVALID_IOCTL(radeon_cp_reset_kms)
+KMS_INVALID_IOCTL(radeon_cp_idle_kms)
+KMS_INVALID_IOCTL(radeon_cp_resume_kms)
+KMS_INVALID_IOCTL(radeon_engine_reset_kms)
+KMS_INVALID_IOCTL(radeon_fullscreen_kms)
+KMS_INVALID_IOCTL(radeon_cp_swap_kms)
+KMS_INVALID_IOCTL(radeon_cp_clear_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
+KMS_INVALID_IOCTL(radeon_cp_indices_kms)
+KMS_INVALID_IOCTL(radeon_cp_texture_kms)
+KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
+KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
+KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
+KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
+KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
+KMS_INVALID_IOCTL(radeon_cp_flip_kms)
+KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
+KMS_INVALID_IOCTL(radeon_mem_free_kms)
+KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
+KMS_INVALID_IOCTL(radeon_irq_emit_kms)
+KMS_INVALID_IOCTL(radeon_irq_wait_kms)
+KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
+KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
+KMS_INVALID_IOCTL(radeon_surface_free_kms)
+
+
+struct drm_ioctl_desc radeon_ioctls_kms[] = {
+ DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
/* KMS */
- DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
-int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms);
+int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/sys/dev/pci/drm/radeon/radeon_legacy_crtc.c b/sys/dev/pci/drm/radeon/radeon_legacy_crtc.c
index 5dc0dc09277..a360e714717 100644
--- a/sys/dev/pci/drm/radeon/radeon_legacy_crtc.c
+++ b/sys/dev/pci/drm/radeon/radeon_legacy_crtc.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_legacy_crtc.c,v 1.6 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -331,8 +332,6 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
}
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
- /* Make sure vblank interrupt is still enabled if needed */
- radeon_irq_set(rdev);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
@@ -710,6 +709,9 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
RADEON_CRTC_VSYNC_DIS |
RADEON_CRTC_HSYNC_DIS |
RADEON_CRTC_DISPLAY_DIS);
+ crtc_ext_cntl &= ~(RADEON_CRTC_SYNC_TRISTAT |
+ RADEON_CRTC_VSYNC_TRISTAT |
+ RADEON_CRTC_HSYNC_TRISTAT);
disp_merge_cntl = RREG32(RADEON_DISP_MERGE_CNTL);
disp_merge_cntl &= ~RADEON_DISP_RGB_OFFSET_EN;
@@ -1056,15 +1058,16 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
}
}
- radeon_cursor_reset(crtc);
return 0;
}
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
+ radeon_crtc->in_mode_set = true;
/*
* The hardware wedges sometimes if you reconfigure one CRTC
* whilst another is running (see fdo bug #24611).
@@ -1075,6 +1078,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc)
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
@@ -1085,26 +1089,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
if (crtci->enabled)
radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
}
-}
-
-static void radeon_crtc_disable(struct drm_crtc *crtc)
-{
- radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct radeon_framebuffer *radeon_fb;
- struct radeon_bo *rbo;
-
- radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
- rbo = gem_to_radeon_bo(radeon_fb->obj);
- r = radeon_bo_reserve(rbo, false);
- if (unlikely(r))
- DRM_ERROR("failed to reserve rbo before unpin\n");
- else {
- radeon_bo_unpin(rbo);
- radeon_bo_unreserve(rbo);
- }
- }
+ radeon_crtc->in_mode_set = false;
}
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
@@ -1116,7 +1101,6 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
.load_lut = radeon_crtc_load_lut,
- .disable = radeon_crtc_disable
};
diff --git a/sys/dev/pci/drm/radeon/radeon_legacy_encoders.c b/sys/dev/pci/drm/radeon/radeon_legacy_encoders.c
index 46a8aa14ff8..b7abddd2626 100644
--- a/sys/dev/pci/drm/radeon/radeon_legacy_encoders.c
+++ b/sys/dev/pci/drm/radeon/radeon_legacy_encoders.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_legacy_encoders.c,v 1.8 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2007-8 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -28,9 +29,6 @@
#include <dev/pci/drm/radeon_drm.h>
#include "radeon.h"
#include "atom.h"
-#ifdef CONFIG_PMAC_BACKLIGHT
-#include <asm/backlight.h>
-#endif
static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
{
@@ -389,14 +387,11 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
memset(&props, 0, sizeof(props));
props.max_brightness = RADEON_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
-#ifdef notyet
+#ifdef __linux__
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
-#else
- snprintf(bl_name, sizeof(bl_name),
- "radeon_bl%d", 0);
#endif
- bd = backlight_device_register(bl_name, drm_connector->kdev,
+ bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
@@ -445,7 +440,6 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
backlight_update_status(bd);
DRM_INFO("radeon legacy LVDS backlight initialized\n");
- rdev->mode_info.bl_encoder = radeon_encoder;
return;
diff --git a/sys/dev/pci/drm/radeon/radeon_legacy_tv.c b/sys/dev/pci/drm/radeon/radeon_legacy_tv.c
index 2ea4887f3ee..4fb2fabc099 100644
--- a/sys/dev/pci/drm/radeon/radeon_legacy_tv.c
+++ b/sys/dev/pci/drm/radeon/radeon_legacy_tv.c
@@ -1,3 +1,5 @@
+/* $OpenBSD: radeon_legacy_tv.c,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
+
#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/drm_crtc_helper.h>
#include "radeon.h"
diff --git a/sys/dev/pci/drm/radeon/radeon_mode.h b/sys/dev/pci/drm/radeon/radeon_mode.h
index 81a6df8913e..0975dd44d76 100644
--- a/sys/dev/pci/drm/radeon/radeon_mode.h
+++ b/sys/dev/pci/drm/radeon/radeon_mode.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_mode.h,v 1.6 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
* VA Linux Systems Inc., Fremont, California.
@@ -33,7 +34,6 @@
#include <dev/pci/drm/drm_crtc.h>
#include <dev/pci/drm/drm_edid.h>
#include <dev/pci/drm/drm_dp_helper.h>
-#include <dev/pci/drm/drm_dp_mst_helper.h>
#include <dev/pci/drm/drm_fixed.h>
#include <dev/pci/drm/drm_crtc_helper.h>
@@ -45,10 +45,6 @@ struct radeon_device;
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
-#define RADEON_MAX_HPD_PINS 7
-#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_AFMT_BLOCKS 7
-
enum radeon_rmx_type {
RMX_OFF,
RMX_FULL,
@@ -84,13 +80,6 @@ enum radeon_hpd_id {
RADEON_HPD_NONE = 0xff,
};
-enum radeon_output_csc {
- RADEON_OUTPUT_CSC_BYPASS = 0,
- RADEON_OUTPUT_CSC_TVRGB = 1,
- RADEON_OUTPUT_CSC_YCBCR601 = 2,
- RADEON_OUTPUT_CSC_YCBCR709 = 3,
-};
-
#define RADEON_MAX_I2C_BUS 16
/* radeon gpio-based i2c
@@ -197,11 +186,16 @@ struct radeon_pll {
struct radeon_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
- struct i2c_algo_bit_data bit;
- struct radeon_i2c_bus_rec rec;
+#if 0
+ union {
+ struct i2c_algo_dp_aux_data dp;
+ } algo;
+#else
struct drm_dp_aux aux;
bool has_aux;
struct rwlock mutex;
+#endif
+ struct radeon_i2c_bus_rec rec;
};
/* mostly for macs, but really any system without connector tables */
@@ -242,8 +236,8 @@ struct radeon_mode_info {
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
- struct radeon_crtc *crtcs[RADEON_MAX_CRTCS];
- struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS];
+ struct radeon_crtc *crtcs[6];
+ struct radeon_afmt *afmt[6];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -256,12 +250,6 @@ struct radeon_mode_info {
struct drm_property *underscan_property;
struct drm_property *underscan_hborder_property;
struct drm_property *underscan_vborder_property;
- /* audio */
- struct drm_property *audio_property;
- /* FMT dithering */
- struct drm_property *dither_property;
- /* Output CSC */
- struct drm_property *output_csc_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
@@ -272,9 +260,6 @@ struct radeon_mode_info {
u16 firmware_flags;
/* pointer to backlight encoder */
struct radeon_encoder *bl_encoder;
-
- /* bitmask for active encoder frontends */
- uint32_t active_encoders;
};
#define RADEON_MAX_BL_LEVEL 0xFF
@@ -305,7 +290,6 @@ struct radeon_tv_regs {
struct radeon_atom_ss {
uint16_t percentage;
- uint16_t percentage_divider;
uint8_t type;
uint16_t step;
uint8_t delay;
@@ -316,31 +300,20 @@ struct radeon_atom_ss {
uint16_t amount;
};
-enum radeon_flip_status {
- RADEON_FLIP_NONE,
- RADEON_FLIP_PENDING,
- RADEON_FLIP_SUBMITTED
-};
-
struct radeon_crtc {
struct drm_crtc base;
int crtc_id;
u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
- bool cursor_out_of_bounds;
+ bool in_mode_set;
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
- int cursor_x;
- int cursor_y;
- int cursor_hot_x;
- int cursor_hot_y;
int cursor_width;
int cursor_height;
- int max_cursor_width;
- int max_cursor_height;
uint32_t legacy_display_base_addr;
+ uint32_t legacy_cursor_offset;
enum radeon_rmx_type rmx_type;
u8 h_border;
u8 v_border;
@@ -349,9 +322,8 @@ struct radeon_crtc {
struct drm_display_mode native_mode;
int pll_id;
/* page flipping */
- struct workqueue_struct *flip_queue;
- struct radeon_flip_work *flip_work;
- enum radeon_flip_status flip_status;
+ struct radeon_unpin_work *unpin_work;
+ int deferred_flip_completion;
/* pll sharing */
struct radeon_atom_ss ss;
bool ss_enabled;
@@ -362,13 +334,6 @@ struct radeon_crtc {
u32 pll_flags;
struct drm_encoder *encoder;
struct drm_connector *connector;
- /* for dpm */
- u32 line_time;
- u32 wm_low;
- u32 wm_high;
- u32 lb_vblank_lead_lines;
- struct drm_display_mode hw_mode;
- enum radeon_output_csc output_csc;
};
struct radeon_encoder_primary_dac {
@@ -438,25 +403,12 @@ struct radeon_encoder_atom_dig {
uint8_t backlight_level;
int panel_mode;
struct radeon_afmt *afmt;
- struct r600_audio_pin *pin;
- int active_mst_links;
};
struct radeon_encoder_atom_dac {
enum radeon_tv_std tv_std;
};
-struct radeon_encoder_mst {
- int crtc;
- struct radeon_encoder *primary;
- struct radeon_connector *connector;
- struct drm_dp_mst_port *port;
- int pbn;
- int fe;
- bool fe_from_be;
- bool enc_active;
-};
-
struct radeon_encoder {
struct drm_encoder base;
uint32_t encoder_enum;
@@ -474,23 +426,17 @@ struct radeon_encoder {
int audio_polling_active;
bool is_ext_encoder;
u16 caps;
- struct radeon_audio_funcs *audio;
- enum radeon_output_csc output_csc;
- bool can_mst;
- uint32_t offset;
- bool is_mst_encoder;
- /* front end for this mst encoder */
};
struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
/* displayport */
+ struct radeon_i2c_chan *dp_i2c_bus;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;
bool edp_on;
- bool is_mst;
};
struct radeon_gpio_rec {
@@ -498,7 +444,6 @@ struct radeon_gpio_rec {
u8 id;
u32 reg;
u32 mask;
- u32 shift;
};
struct radeon_hpd {
@@ -523,22 +468,6 @@ struct radeon_router {
u8 cd_mux_state;
};
-enum radeon_connector_audio {
- RADEON_AUDIO_DISABLE = 0,
- RADEON_AUDIO_ENABLE = 1,
- RADEON_AUDIO_AUTO = 2
-};
-
-enum radeon_connector_dither {
- RADEON_FMT_DITHER_DISABLE = 0,
- RADEON_FMT_DITHER_ENABLE = 1,
-};
-
-struct stream_attribs {
- uint16_t fe;
- uint16_t slots;
-};
-
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -553,22 +482,10 @@ struct radeon_connector {
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
- bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
uint16_t connector_object_id;
struct radeon_hpd hpd;
struct radeon_router router;
struct radeon_i2c_chan *router_bus;
- enum radeon_connector_audio audio;
- enum radeon_connector_dither dither;
- int pixelclock_for_modeset;
- bool is_mst_connector;
- struct radeon_connector *mst_port;
- struct drm_dp_mst_port *port;
- struct drm_dp_mst_topology_mgr mst_mgr;
-
- struct radeon_encoder *mst_encoder;
- struct stream_attribs cur_stream_attribs[6];
- int enabled_attribs;
};
struct radeon_framebuffer {
@@ -579,156 +496,10 @@ struct radeon_framebuffer {
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST))
-struct atom_clock_dividers {
- u32 post_div;
- union {
- struct {
-#ifdef __BIG_ENDIAN
- u32 reserved : 6;
- u32 whole_fb_div : 12;
- u32 frac_fb_div : 14;
-#else
- u32 frac_fb_div : 14;
- u32 whole_fb_div : 12;
- u32 reserved : 6;
-#endif
- };
- u32 fb_div;
- };
- u32 ref_div;
- bool enable_post_div;
- bool enable_dithen;
- u32 vco_mode;
- u32 real_clock;
- /* added for CI */
- u32 post_divider;
- u32 flags;
-};
-
-struct atom_mpll_param {
- union {
- struct {
-#ifdef __BIG_ENDIAN
- u32 reserved : 8;
- u32 clkfrac : 12;
- u32 clkf : 12;
-#else
- u32 clkf : 12;
- u32 clkfrac : 12;
- u32 reserved : 8;
-#endif
- };
- u32 fb_div;
- };
- u32 post_div;
- u32 bwcntl;
- u32 dll_speed;
- u32 vco_mode;
- u32 yclk_sel;
- u32 qdr;
- u32 half_rate;
-};
-
-#define MEM_TYPE_GDDR5 0x50
-#define MEM_TYPE_GDDR4 0x40
-#define MEM_TYPE_GDDR3 0x30
-#define MEM_TYPE_DDR2 0x20
-#define MEM_TYPE_GDDR1 0x10
-#define MEM_TYPE_DDR3 0xb0
-#define MEM_TYPE_MASK 0xf0
-
-struct atom_memory_info {
- u8 mem_vendor;
- u8 mem_type;
-};
-
-#define MAX_AC_TIMING_ENTRIES 16
-
-struct atom_memory_clock_range_table
-{
- u8 num_entries;
- u8 rsv[3];
- u32 mclk[MAX_AC_TIMING_ENTRIES];
-};
-
-#define VBIOS_MC_REGISTER_ARRAY_SIZE 32
-#define VBIOS_MAX_AC_TIMING_ENTRIES 20
-
-struct atom_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[VBIOS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct atom_mc_register_address {
- u16 s1;
- u8 pre_reg_data;
-};
-
-struct atom_mc_reg_table {
- u8 last;
- u8 num_entries;
- struct atom_mc_reg_entry mc_reg_table_entry[VBIOS_MAX_AC_TIMING_ENTRIES];
- struct atom_mc_register_address mc_reg_address[VBIOS_MC_REGISTER_ARRAY_SIZE];
-};
-
-#define MAX_VOLTAGE_ENTRIES 32
-
-struct atom_voltage_table_entry
-{
- u16 value;
- u32 smio_low;
-};
-
-struct atom_voltage_table
-{
- u32 count;
- u32 mask_low;
- u32 phase_delay;
- struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES];
-};
-
-/* Driver internal use only flags of radeon_get_crtc_scanoutpos() */
-#define USE_REAL_VBLANKSTART (1 << 30)
-#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
-
-extern void
-radeon_add_atom_connector(struct drm_device *dev,
- uint32_t connector_id,
- uint32_t supported_device,
- int connector_type,
- struct radeon_i2c_bus_rec *i2c_bus,
- uint32_t igp_lane_info,
- uint16_t connector_object_id,
- struct radeon_hpd *hpd,
- struct radeon_router *router);
-extern void
-radeon_add_legacy_connector(struct drm_device *dev,
- uint32_t connector_id,
- uint32_t supported_device,
- int connector_type,
- struct radeon_i2c_bus_rec *i2c_bus,
- uint16_t connector_object_id,
- struct radeon_hpd *hpd);
-extern uint32_t
-radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device,
- uint8_t dac);
-extern void radeon_link_encoder_connector(struct drm_device *dev);
-
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
radeon_atombios_get_tv_info(struct radeon_device *rdev);
-extern void radeon_atombios_get_default_voltages(struct radeon_device *rdev,
- u16 *vddc, u16 *vddci, u16 *mvdd);
-
-extern void
-radeon_combios_connected_scratch_regs(struct drm_connector *connector,
- struct drm_encoder *encoder,
- bool connected);
-extern void
-radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
- struct drm_encoder *encoder,
- bool connected);
extern struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder);
@@ -739,11 +510,10 @@ extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
+extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
extern int radeon_get_monitor_bpc(struct drm_connector *connector);
-extern struct edid *radeon_connector_edid(struct drm_connector *connector);
-
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode);
@@ -756,31 +526,20 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
-extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
- const u8 *dpcd,
- unsigned pix_clock,
- unsigned *dp_lanes, unsigned *dp_rate);
-extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
- u8 power_state);
extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
extern ssize_t
radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg);
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
-extern void atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_mode, int enc_override);
extern void radeon_atom_encoder_init(struct radeon_device *rdev);
extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
-extern void atombios_dig_transmitter_setup2(struct drm_encoder *encoder,
- int action, uint8_t lane_num,
- uint8_t lane_set, int fe);
-extern void atombios_set_mst_encoder_crtc_source(struct drm_encoder *encoder,
- int fe);
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
-void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
+extern int radeon_dp_i2c_aux_ch(struct i2c_controller *adapter, int mode,
+ u8 write_byte, u8 *read_byte);
extern void radeon_i2c_init(struct radeon_device *rdev);
extern void radeon_i2c_fini(struct radeon_device *rdev);
@@ -791,6 +550,9 @@ extern void radeon_i2c_add(struct radeon_device *rdev,
const char *name);
extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
struct radeon_i2c_bus_rec *i2c_bus);
+extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name);
extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
@@ -806,6 +568,9 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
+extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
+
+extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
@@ -813,8 +578,6 @@ extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
struct radeon_atom_ss *ss,
int id, u32 clock);
-extern struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
- u8 id);
extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
uint64_t freq,
@@ -844,7 +607,6 @@ extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
extern bool atombios_set_edp_panel_power(struct drm_connector *connector, int action);
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
-extern bool radeon_encoder_is_digital(struct drm_encoder *encoder);
extern void radeon_crtc_load_lut(struct drm_crtc *crtc);
extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
@@ -869,16 +631,13 @@ extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic);
-extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height,
- int32_t hot_x,
- int32_t hot_y);
+extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height);
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
-extern void radeon_cursor_reset(struct drm_crtc *crtc);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
unsigned int flags, int *vpos, int *hpos,
@@ -975,39 +734,18 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
-/* fmt blocks */
-void avivo_program_fmt(struct drm_encoder *encoder);
-void dce3_program_fmt(struct drm_encoder *encoder);
-void dce4_program_fmt(struct drm_encoder *encoder);
-void dce8_program_fmt(struct drm_encoder *encoder);
-
/* fbdev layer */
int radeon_fbdev_init(struct radeon_device *rdev);
void radeon_fbdev_fini(struct radeon_device *rdev);
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state);
+int radeon_fbdev_total_size(struct radeon_device *rdev);
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
-void radeon_fbdev_restore_mode(struct radeon_device *rdev);
void radeon_fb_output_poll_changed(struct radeon_device *rdev);
-void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
-
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector);
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector);
-
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
-/* mst */
-int radeon_dp_mst_init(struct radeon_connector *radeon_connector);
-int radeon_dp_mst_probe(struct radeon_connector *radeon_connector);
-int radeon_dp_mst_check_status(struct radeon_connector *radeon_connector);
-int radeon_mst_debugfs_init(struct radeon_device *rdev);
-void radeon_dp_mst_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode);
-
-void radeon_setup_mst_connector(struct drm_device *dev);
-
-int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx);
-void radeon_atom_release_dig_encoder(struct radeon_device *rdev, int enc_idx);
+void radeondrm_burner(void *, u_int, u_int);
#endif
diff --git a/sys/dev/pci/drm/radeon/radeon_object.c b/sys/dev/pci/drm/radeon/radeon_object.c
index ac5ea430556..42f0da42ddf 100644
--- a/sys/dev/pci/drm/radeon/radeon_object.c
+++ b/sys/dev/pci/drm/radeon/radeon_object.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_object.c,v 1.12 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
@@ -31,7 +32,6 @@
*/
#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/radeon_drm.h>
-#include <dev/pci/drm/drm_cache.h>
#include "radeon.h"
#include "radeon_trace.h"
@@ -45,25 +45,13 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
-static void radeon_update_memory_usage(struct radeon_bo *bo,
- unsigned mem_type, int sign)
+void radeon_bo_clear_va(struct radeon_bo *bo)
{
- struct radeon_device *rdev = bo->rdev;
- u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
-
- switch (mem_type) {
- case TTM_PL_TT:
- if (sign > 0)
- atomic64_add(size, &rdev->gtt_usage);
- else
- atomic64_sub(size, &rdev->gtt_usage);
- break;
- case TTM_PL_VRAM:
- if (sign > 0)
- atomic64_add(size, &rdev->vram_usage);
- else
- atomic64_sub(size, &rdev->vram_usage);
- break;
+ struct radeon_bo_va *bo_va, *tmp;
+
+ list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
+ /* remove from all vm address space */
+ radeon_vm_bo_rmv(bo->rdev, bo_va);
}
}
@@ -72,14 +60,11 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
struct radeon_bo *bo;
bo = container_of(tbo, struct radeon_bo, tbo);
-
- radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
-
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
- WARN_ON(!list_empty(&bo->va));
+ radeon_bo_clear_va(bo);
drm_gem_object_release(&bo->gem_base);
pool_put(&bo->rdev->ddev->objpl, bo);
}
@@ -93,91 +78,38 @@ bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
- u32 c = 0, i;
+ u32 c = 0;
+ rbo->placement.fpfn = 0;
+ rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
- if (domain & RADEON_GEM_DOMAIN_VRAM) {
- /* Try placing BOs which don't need CPU access outside of the
- * CPU accessible part of VRAM
- */
- if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
- rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
- rbo->placements[c].fpfn =
- rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
- }
-
- rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
- }
-
+ if (domain & RADEON_GEM_DOMAIN_VRAM)
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_VRAM;
if (domain & RADEON_GEM_DOMAIN_GTT) {
- if (rbo->flags & RADEON_GEM_GTT_UC) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_TT;
-
- } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
- (rbo->rdev->flags & RADEON_IS_AGP)) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_TT;
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
} else {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_TT;
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
}
}
-
if (domain & RADEON_GEM_DOMAIN_CPU) {
- if (rbo->flags & RADEON_GEM_GTT_UC) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_SYSTEM;
-
- } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
- rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_SYSTEM;
+ if (rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
} else {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_SYSTEM;
+ rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
}
}
- if (!c) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- TTM_PL_FLAG_SYSTEM;
- }
-
+ if (!c)
+ rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
rbo->placement.num_placement = c;
rbo->placement.num_busy_placement = c;
-
- for (i = 0; i < c; ++i) {
- if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
- (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- !rbo->placements[i].fpfn)
- rbo->placements[i].lpfn =
- rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
- else
- rbo->placements[i].lpfn = 0;
- }
}
int radeon_bo_create(struct radeon_device *rdev,
- unsigned long size, int byte_align, bool kernel,
- u32 domain, u32 flags, struct sg_table *sg,
- struct reservation_object *resv,
- struct radeon_bo **bo_ptr)
+ unsigned long size, int byte_align, bool kernel, u32 domain,
+ struct sg_table *sg, struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
@@ -187,6 +119,9 @@ int radeon_bo_create(struct radeon_device *rdev,
size = PAGE_ALIGN(size);
+#ifdef notyet
+ rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+#endif
if (kernel) {
type = ttm_bo_type_kernel;
} else if (sg) {
@@ -211,53 +146,12 @@ int radeon_bo_create(struct radeon_device *rdev,
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
INIT_LIST_HEAD(&bo->va);
- bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
- RADEON_GEM_DOMAIN_GTT |
- RADEON_GEM_DOMAIN_CPU);
-
- bo->flags = flags;
- /* PCI GART is always snooped */
- if (!(rdev->flags & RADEON_IS_PCIE))
- bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
-
- /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
- * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
- */
- if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
- bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
-
-#ifdef CONFIG_X86_32
- /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
- * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
- */
- bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
-#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
- /* Don't try to enable write-combining when it can't work, or things
- * may be slow
- * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
- */
-
-#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
- thanks to write-combining
-
- if (bo->flags & RADEON_GEM_GTT_WC)
- DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
- "better performance thanks to write-combining\n");
- bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
-#else
- /* For architectures that don't support WC memory,
- * mask out the WC flag from the BO
- */
- if (!drm_arch_can_wc_memory())
- bo->flags &= ~RADEON_GEM_GTT_WC;
-#endif
-
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
- acc_size, sg, resv, &radeon_ttm_bo_destroy);
+ acc_size, sg, &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
@@ -301,15 +195,6 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
ttm_bo_kunmap(&bo->kmap);
}
-struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
-{
- if (bo == NULL)
- return NULL;
-
- ttm_bo_reference(&bo->tbo);
- return bo;
-}
-
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
@@ -319,7 +204,9 @@ void radeon_bo_unref(struct radeon_bo **bo)
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
+ down_read(&rdev->pm.mclk_lock);
ttm_bo_unref(&tbo);
+ up_read(&rdev->pm.mclk_lock);
if (tbo == NULL)
*bo = NULL;
}
@@ -329,9 +216,6 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
{
int r, i;
- if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
- return -EPERM;
-
if (bo->pin_count) {
bo->pin_count++;
if (gpu_addr)
@@ -351,31 +235,29 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
return 0;
}
radeon_ttm_placement_from_domain(bo, domain);
- for (i = 0; i < bo->placement.num_placement; i++) {
+ if (domain == RADEON_GEM_DOMAIN_VRAM) {
/* force to pin into visible video ram */
- if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
- (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
- bo->placements[i].lpfn =
- bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
- else
- bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
-
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+ bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
}
+ if (max_offset) {
+ u64 lpfn = max_offset >> PAGE_SHIFT;
+
+ if (!bo->placement.lpfn)
+ bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
+ if (lpfn < bo->placement.lpfn)
+ bo->placement.lpfn = lpfn;
+ }
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = radeon_bo_gpu_offset(bo);
- if (domain == RADEON_GEM_DOMAIN_VRAM)
- bo->rdev->vram_pin_size += radeon_bo_size(bo);
- else
- bo->rdev->gart_pin_size += radeon_bo_size(bo);
- } else {
- dev_err(bo->rdev->dev, "%p pin failed\n", bo);
}
+ if (unlikely(r != 0))
+ dev_err(bo->rdev->dev, "%p pin failed\n", bo);
return r;
}
@@ -395,19 +277,11 @@ int radeon_bo_unpin(struct radeon_bo *bo)
bo->pin_count--;
if (bo->pin_count)
return 0;
- for (i = 0; i < bo->placement.num_placement; i++) {
- bo->placements[i].lpfn = 0;
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- }
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (likely(r == 0)) {
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
- bo->rdev->vram_pin_size -= radeon_bo_size(bo);
- else
- bo->rdev->gart_pin_size -= radeon_bo_size(bo);
- } else {
+ if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
- }
return r;
}
@@ -431,14 +305,18 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
}
dev_err(rdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+ mutex_lock(&rdev->ddev->struct_mutex);
+#ifdef notyet
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
*((unsigned long *)&bo->gem_base.refcount));
+#endif
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_unreference_unlocked(&bo->gem_base);
+ drm_gem_object_unreference(&bo->gem_base);
+ mutex_unlock(&rdev->ddev->struct_mutex);
}
}
@@ -447,162 +325,81 @@ int radeon_bo_init(struct radeon_device *rdev)
paddr_t start, end;
/* Add an MTRR for the VRAM */
- if (!rdev->fastfb_working) {
-#ifdef __linux__
- rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
- rdev->mc.aper_size);
-#else
- drm_mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, DRM_MTRR_WC);
- /* fake a 'cookie', seems to be unused? */
- rdev->mc.vram_mtrr = 1;
-#endif
- }
+ drm_mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, DRM_MTRR_WC);
+ /* fake a 'cookie', seems to be unused? */
+ rdev->mc.vram_mtrr = 1;
start = atop(bus_space_mmap(rdev->memt, rdev->mc.aper_base, 0, 0, 0));
end = start + atop(rdev->mc.aper_size);
uvm_page_physload(start, end, start, end, PHYSLOAD_DEVICE);
+#ifdef DRMDEBUG
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
rdev->mc.mc_vram_size >> 20,
(unsigned long long)rdev->mc.aper_size >> 20);
DRM_INFO("RAM width %dbits %cDR\n",
rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
+#endif
return radeon_ttm_init(rdev);
}
void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
-#ifdef __linux__
- arch_phys_wc_del(rdev->mc.vram_mtrr);
-#else
- drm_mtrr_del(0, rdev->mc.aper_base, rdev->mc.aper_size, DRM_MTRR_WC);
-#endif
}
-/* Returns how many bytes TTM can move per IB.
- */
-static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head)
{
- u64 real_vram_size = rdev->mc.real_vram_size;
- u64 vram_usage = atomic64_read(&rdev->vram_usage);
-
- /* This function is based on the current VRAM usage.
- *
- * - If all of VRAM is free, allow relocating the number of bytes that
- * is equal to 1/4 of the size of VRAM for this IB.
-
- * - If more than one half of VRAM is occupied, only allow relocating
- * 1 MB of data for this IB.
- *
- * - From 0 to one half of used VRAM, the threshold decreases
- * linearly.
- * __________________
- * 1/4 of -|\ |
- * VRAM | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \ |
- * | \________|1 MB
- * |----------------|
- * VRAM 0 % 100 %
- * used used
- *
- * Note: It's a threshold, not a limit. The threshold must be crossed
- * for buffer relocations to stop, so any buffer of an arbitrary size
- * can be moved as long as the threshold isn't crossed before
- * the relocation takes place. We don't want to disable buffer
- * relocations completely.
- *
- * The idea is that buffers should be placed in VRAM at creation time
- * and TTM should only do a minimum number of relocations during
- * command submission. In practice, you need to submit at least
- * a dozen IBs to move all buffers to VRAM if they are in GTT.
- *
- * Also, things can get pretty crazy under memory pressure and actual
- * VRAM usage can change a lot, so playing safe even at 50% does
- * consistently increase performance.
- */
-
- u64 half_vram = real_vram_size >> 1;
- u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
- u64 bytes_moved_threshold = half_free_vram >> 1;
- return max(bytes_moved_threshold, 1024*1024ull);
+ if (lobj->wdomain) {
+ list_add(&lobj->tv.head, head);
+ } else {
+ list_add_tail(&lobj->tv.head, head);
+ }
}
-int radeon_bo_list_validate(struct radeon_device *rdev,
- struct ww_acquire_ctx *ticket,
- struct list_head *head, int ring)
+int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
- struct list_head duplicates;
+ struct radeon_bo *bo;
+ u32 domain;
int r;
- u64 bytes_moved = 0, initial_bytes_moved;
- u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
- INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
+ r = ttm_eu_reserve_buffers(head);
if (unlikely(r != 0)) {
return r;
}
-
list_for_each_entry(lobj, head, tv.head) {
- struct radeon_bo *bo = lobj->robj;
+ bo = lobj->bo;
if (!bo->pin_count) {
- u32 domain = lobj->prefered_domains;
- u32 allowed = lobj->allowed_domains;
- u32 current_domain =
- radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
-
- /* Check if this buffer will be moved and don't move it
- * if we have moved too many buffers for this IB already.
- *
- * Note that this allows moving at least one buffer of
- * any size, because it doesn't take the current "bo"
- * into account. We don't want to disallow buffer moves
- * completely.
- */
- if ((allowed & current_domain) != 0 &&
- (domain & current_domain) == 0 && /* will be moved */
- bytes_moved > bytes_moved_threshold) {
- /* don't move it */
- domain = current_domain;
- }
-
+ domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
+
retry:
radeon_ttm_placement_from_domain(bo, domain);
- if (ring == R600_RING_TYPE_UVD_INDEX)
- radeon_uvd_force_into_uvd_segment(bo, allowed);
-
- initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
- initial_bytes_moved;
-
+ r = ttm_bo_validate(&bo->tbo, &bo->placement,
+ true, false);
if (unlikely(r)) {
- if (r != -ERESTARTSYS &&
- domain != lobj->allowed_domains) {
- domain = lobj->allowed_domains;
+ if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
+ domain |= RADEON_GEM_DOMAIN_GTT;
goto retry;
}
- ttm_eu_backoff_reservation(ticket, head);
return r;
}
}
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
}
-
- list_for_each_entry(lobj, &duplicates, tv.head) {
- lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
- lobj->tiling_flags = lobj->robj->tiling_flags;
- }
-
return 0;
}
+#ifdef notyet
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+ struct vm_area_struct *vma)
+{
+ return ttm_fbdev_mmap(vma, &bo->tbo);
+}
+#endif
+
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
{
struct radeon_device *rdev = bo->rdev;
@@ -611,9 +408,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
int steal;
int i;
-#ifdef notyet
- lockdep_assert_held(&bo->tbo.resv->lock.base);
-#endif
+ BUG_ON(!radeon_bo_is_reserved(bo));
if (!bo->tiling_flags)
return 0;
@@ -739,10 +534,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
uint32_t *tiling_flags,
uint32_t *pitch)
{
-#ifdef notyet
- lockdep_assert_held(&bo->tbo.resv->lock.base);
-#endif
-
+ BUG_ON(!radeon_bo_is_reserved(bo));
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
if (pitch)
@@ -752,10 +544,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop)
{
-#ifdef notyet
- if (!force_drop)
- lockdep_assert_held(&bo->tbo.resv->lock.base);
-#endif
+ BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
@@ -781,31 +570,22 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
}
void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem)
+ struct ttm_mem_reg *mem)
{
struct radeon_bo *rbo;
-
if (!radeon_ttm_bo_is_radeon_bo(bo))
return;
-
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
radeon_vm_bo_invalidate(rbo->rdev, rbo);
-
- /* update statistics */
- if (!new_mem)
- return;
-
- radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
- radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
}
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct radeon_device *rdev;
struct radeon_bo *rbo;
- unsigned long offset, size, lpfn;
- int i, r;
+ unsigned long offset, size;
+ int r;
if (!radeon_ttm_bo_is_radeon_bo(bo))
return 0;
@@ -822,13 +602,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
- lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
- for (i = 0; i < rbo->placement.num_placement; i++) {
- /* Force into visible VRAM */
- if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
- rbo->placements[i].lpfn = lpfn;
- }
+ rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
r = ttm_bo_validate(bo, &rbo->placement, false, false);
if (unlikely(r == -ENOMEM)) {
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -849,32 +623,38 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0))
return r;
+ spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
-
- r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ if (bo->tbo.sync_obj)
+ r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+ spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo);
return r;
}
+
/**
- * radeon_bo_fence - add fence to buffer object
- *
- * @bo: buffer object in question
- * @fence: fence to add
- * @shared: true if fence should be added shared
+ * radeon_bo_reserve - reserve bo
+ * @bo: bo structure
+ * @no_intr: don't return -ERESTARTSYS on pending signal
*
+ * Returns:
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
*/
-void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
- bool shared)
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
{
- struct reservation_object *resv = bo->tbo.resv;
+ int r;
- if (shared)
- reservation_object_add_shared_fence(resv, &fence->base);
- else
- reservation_object_add_excl_fence(resv, &fence->base);
+ r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+ return r;
+ }
+ return 0;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_object.h b/sys/dev/pci/drm/radeon/radeon_object.h
index 7b07e8f32ce..827a3559b76 100644
--- a/sys/dev/pci/drm/radeon/radeon_object.h
+++ b/sys/dev/pci/drm/radeon/radeon_object.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_object.h,v 1.5 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -52,27 +53,7 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
return 0;
}
-/**
- * radeon_bo_reserve - reserve bo
- * @bo: bo structure
- * @no_intr: don't return -ERESTARTSYS on pending signal
- *
- * Returns:
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
- * a signal. Release all buffer reservations and return to user-space.
- */
-static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
- return r;
- }
- return 0;
-}
+int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
{
@@ -98,6 +79,11 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
return bo->tbo.num_pages << PAGE_SHIFT;
}
+static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
+{
+ return ttm_bo_is_reserved(&bo->tbo);
+}
+
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
{
return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
@@ -124,13 +110,11 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
- bool kernel, u32 domain, u32 flags,
+ bool kernel, u32 domain,
struct sg_table *sg,
- struct reservation_object *resv,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
-extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
@@ -140,9 +124,13 @@ extern int radeon_bo_evict_vram(struct radeon_device *rdev);
extern void radeon_bo_force_delete(struct radeon_device *rdev);
extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
-extern int radeon_bo_list_validate(struct radeon_device *rdev,
- struct ww_acquire_ctx *ticket,
- struct list_head *head, int ring);
+extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+ struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head);
+#ifdef notyet
+extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+ struct vm_area_struct *vma);
+#endif
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
u32 tiling_flags, u32 pitch);
extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
@@ -150,11 +138,9 @@ extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop);
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem);
+ struct ttm_mem_reg *mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
-extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
- bool shared);
/*
* sub allocation
@@ -172,8 +158,7 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain,
- u32 flags);
+ unsigned size, u32 align, u32 domain);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
@@ -183,7 +168,7 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
- unsigned size, unsigned align);
+ unsigned size, unsigned align, bool block);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
struct radeon_sa_bo **sa_bo,
struct radeon_fence *fence);
diff --git a/sys/dev/pci/drm/radeon/radeon_pm.c b/sys/dev/pci/drm/radeon/radeon_pm.c
index 08829521923..99f8756fa3f 100644
--- a/sys/dev/pci/drm/radeon/radeon_pm.c
+++ b/sys/dev/pci/drm/radeon/radeon_pm.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_pm.c,v 1.17 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,7 +25,6 @@
#include "radeon.h"
#include "avivod.h"
#include "atom.h"
-#include "r600_dpm.h"
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
@@ -67,18 +67,7 @@ int radeon_pm_get_type_index(struct radeon_device *rdev,
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
{
- if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- mutex_lock(&rdev->pm.mutex);
- if (power_supply_is_system_supplied() > 0)
- rdev->pm.dpm.ac_power = true;
- else
- rdev->pm.dpm.ac_power = false;
- if (rdev->family == CHIP_ARUBA) {
- if (rdev->asic->dpm.enable_bapm)
- radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
- }
- mutex_unlock(&rdev->pm.mutex);
- } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
+ if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
radeon_pm_update_profile(rdev);
@@ -157,8 +146,7 @@ static void radeon_sync_with_vblank(struct radeon_device *rdev)
{
if (rdev->pm.active_crtcs) {
rdev->pm.vblank_sync = false;
- wait_event_timeout(
- rdev->irq.vblank_queue, rdev->pm.vblank_sync,
+ tsleep(&rdev->irq.vblank_queue, PZERO, "rdnsvb",
msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
}
}
@@ -252,6 +240,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
(rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
return;
+ mutex_lock(&rdev->ddev->struct_mutex);
down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
@@ -261,11 +250,12 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
if (!ring->ready) {
continue;
}
- r = radeon_fence_wait_empty(rdev, i);
+ r = radeon_fence_wait_empty_locked(rdev, i);
if (r) {
/* needs a GPU reset dont reset here */
mutex_unlock(&rdev->ring_lock);
up_write(&rdev->pm.mclk_lock);
+ mutex_unlock(&rdev->ddev->struct_mutex);
return;
}
}
@@ -301,6 +291,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_unlock(&rdev->ring_lock);
up_write(&rdev->pm.mclk_lock);
+ mutex_unlock(&rdev->ddev->struct_mutex);
}
static void radeon_pm_print_states(struct radeon_device *rdev)
@@ -342,7 +333,7 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
int cp = rdev->pm.profile;
@@ -358,16 +349,9 @@ static ssize_t radeon_set_pm_profile(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
- /* Can't set profile when the card is off */
-#ifdef notyet
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-#endif
-
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (strncmp("default", buf, strlen("default")) == 0)
@@ -399,13 +383,12 @@ static ssize_t radeon_get_pm_method(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
int pm = rdev->pm.pm_method;
return snprintf(buf, PAGE_SIZE, "%s\n",
- (pm == PM_METHOD_DYNPM) ? "dynpm" :
- (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
+ (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
}
static ssize_t radeon_set_pm_method(struct device *dev,
@@ -413,23 +396,9 @@ static ssize_t radeon_set_pm_method(struct device *dev,
const char *buf,
size_t count)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
-#ifdef notyet
- /* Can't set method when the card is off */
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
- count = -EINVAL;
- goto fail;
- }
-#endif
-
- /* we don't support the legacy modes with dpm */
- if (rdev->pm.pm_method == PM_METHOD_DPM) {
- count = -EINVAL;
- goto fail;
- }
if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
mutex_lock(&rdev->pm.mutex);
@@ -454,330 +423,62 @@ fail:
return count;
}
-static ssize_t radeon_get_dpm_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
- enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
- (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
-}
-
-static ssize_t radeon_set_dpm_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
-
- mutex_lock(&rdev->pm.mutex);
- if (strncmp("battery", buf, strlen("battery")) == 0)
- rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
- else if (strncmp("balanced", buf, strlen("balanced")) == 0)
- rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- else if (strncmp("performance", buf, strlen("performance")) == 0)
- rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
- else {
- mutex_unlock(&rdev->pm.mutex);
- count = -EINVAL;
- goto fail;
- }
- mutex_unlock(&rdev->pm.mutex);
-
- /* Can't set dpm state when the card is off */
-#ifdef notyet
- if (!(rdev->flags & RADEON_IS_PX) ||
- (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
-#endif
- radeon_pm_compute_clocks(rdev);
-
-fail:
- return count;
-}
-
-static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
- enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
-
-#ifdef notyet
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return snprintf(buf, PAGE_SIZE, "off\n");
-#endif
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
- (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
-}
-
-static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
- enum radeon_dpm_forced_level level;
- int ret = 0;
-
- /* Can't force performance level when the card is off */
-#ifdef notyet
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-#endif
-
- mutex_lock(&rdev->pm.mutex);
- if (strncmp("low", buf, strlen("low")) == 0) {
- level = RADEON_DPM_FORCED_LEVEL_LOW;
- } else if (strncmp("high", buf, strlen("high")) == 0) {
- level = RADEON_DPM_FORCED_LEVEL_HIGH;
- } else if (strncmp("auto", buf, strlen("auto")) == 0) {
- level = RADEON_DPM_FORCED_LEVEL_AUTO;
- } else {
- count = -EINVAL;
- goto fail;
- }
- if (rdev->asic->dpm.force_performance_level) {
- if (rdev->pm.dpm.thermal_active) {
- count = -EINVAL;
- goto fail;
- }
- ret = radeon_dpm_force_performance_level(rdev, level);
- if (ret)
- count = -EINVAL;
- }
-fail:
- mutex_unlock(&rdev->pm.mutex);
-
- return count;
-}
-#endif
-
-#ifdef notyet
-static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct radeon_device *rdev = dev_get_drvdata(dev);
- u32 pwm_mode = 0;
-
- if (rdev->asic->dpm.fan_ctrl_get_mode)
- pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev);
-
- /* never 0 (full-speed), fuse or smc-controlled always */
- return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
-}
-
-static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct radeon_device *rdev = dev_get_drvdata(dev);
- int err;
- int value;
-
- if(!rdev->asic->dpm.fan_ctrl_set_mode)
- return -EINVAL;
-
- err = kstrtoint(buf, 10, &value);
- if (err)
- return err;
-
- switch (value) {
- case 1: /* manual, percent-based */
- rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC);
- break;
- default: /* disable */
- rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0);
- break;
- }
-
- return count;
-}
-
-static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", 0);
-}
-
-static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", 255);
-}
-
-static ssize_t radeon_hwmon_set_pwm1(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct radeon_device *rdev = dev_get_drvdata(dev);
- int err;
- u32 value;
-
- err = kstrtou32(buf, 10, &value);
- if (err)
- return err;
-
- value = (value * 100) / 255;
-
- err = rdev->asic->dpm.set_fan_speed_percent(rdev, value);
- if (err)
- return err;
-
- return count;
-}
-
-static ssize_t radeon_hwmon_get_pwm1(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct radeon_device *rdev = dev_get_drvdata(dev);
- int err;
- u32 speed;
-
- err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed);
- if (err)
- return err;
-
- speed = (speed * 255) / 100;
-
- return sprintf(buf, "%i\n", speed);
-}
-
static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
-static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
-static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
- radeon_get_dpm_forced_performance_level,
- radeon_set_dpm_forced_performance_level);
+#endif /* notyet */
+#ifdef notyet
static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct radeon_device *rdev = dev_get_drvdata(dev);
- struct drm_device *ddev = rdev->ddev;
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
int temp;
- /* Can't get temperature when the card is off */
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
- if (rdev->asic->pm.get_temperature)
- temp = radeon_get_temperature(rdev);
- else
+ switch (rdev->pm.int_thermal_type) {
+ case THERMAL_TYPE_RV6XX:
+ temp = rv6xx_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_RV770:
+ temp = rv770_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_EVERGREEN:
+ case THERMAL_TYPE_NI:
+ temp = evergreen_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_SUMO:
+ temp = sumo_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_SI:
+ temp = si_get_temp(rdev);
+ break;
+ default:
temp = 0;
+ break;
+ }
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
-static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t radeon_hwmon_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct radeon_device *rdev = dev_get_drvdata(dev);
- int hyst = to_sensor_dev_attr(attr)->index;
- int temp;
-
- if (hyst)
- temp = rdev->pm.dpm.thermal.min_temp;
- else
- temp = rdev->pm.dpm.thermal.max_temp;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+ return sprintf(buf, "radeon\n");
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1);
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0);
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0);
-static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0);
-
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm1_min.dev_attr.attr,
- &sensor_dev_attr_pwm1_max.dev_attr.attr,
+ &sensor_dev_attr_name.dev_attr.attr,
NULL
};
-static umode_t hwmon_attributes_visible(struct kobject *kobj,
- struct attribute *attr, int index)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct radeon_device *rdev = dev_get_drvdata(dev);
- umode_t effective_mode = attr->mode;
-
- /* Skip attributes if DPM is not enabled */
- if (rdev->pm.pm_method != PM_METHOD_DPM &&
- (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
-
- /* Skip fan attributes if fan is not present */
- if (rdev->pm.no_fan &&
- (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
-
- /* mask fan attributes if we have no bindings for this asic to expose */
- if ((!rdev->asic->dpm.get_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!rdev->asic->dpm.fan_ctrl_get_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
- effective_mode &= ~S_IRUGO;
-
- if ((!rdev->asic->dpm.set_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!rdev->asic->dpm.fan_ctrl_set_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
- effective_mode &= ~S_IWUSR;
-
- /* hide max/min values if we can't both query and manage the fan */
- if ((!rdev->asic->dpm.set_fan_speed_percent &&
- !rdev->asic->dpm.get_fan_speed_percent) &&
- (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
-
- return effective_mode;
-}
-
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
- .is_visible = hwmon_attributes_visible,
-};
-
-static const struct attribute_group *hwmon_groups[] = {
- &hwmon_attrgroup,
- NULL
};
#endif
@@ -785,6 +486,8 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
{
int err = 0;
+ rdev->pm.int_hwmon_dev = NULL;
+
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
case THERMAL_TYPE_RV770:
@@ -792,21 +495,27 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_NI:
case THERMAL_TYPE_SUMO:
case THERMAL_TYPE_SI:
- case THERMAL_TYPE_CI:
- case THERMAL_TYPE_KV:
- if (rdev->asic->pm.get_temperature == NULL)
+ /* No support for TN yet */
+ if (rdev->family == CHIP_ARUBA)
return err;
#ifdef notyet
- rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev,
- "radeon", rdev,
- hwmon_groups);
+ rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
if (IS_ERR(rdev->pm.int_hwmon_dev)) {
err = PTR_ERR(rdev->pm.int_hwmon_dev);
dev_err(rdev->dev,
"Unable to register hwmon device: %d\n", err);
+ break;
+ }
+ dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
+ err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
+ &hwmon_attrgroup);
+ if (err) {
+ dev_err(rdev->dev,
+ "Unable to create hwmon sysfs file: %d\n", err);
+ hwmon_device_unregister(rdev->dev);
}
-#endif
break;
+#endif
default:
break;
}
@@ -816,382 +525,16 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
static void radeon_hwmon_fini(struct radeon_device *rdev)
{
+ printf("%s stub\n", __func__);
#ifdef notyet
- if (rdev->pm.int_hwmon_dev)
+ if (rdev->pm.int_hwmon_dev) {
+ sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
hwmon_device_unregister(rdev->pm.int_hwmon_dev);
-#endif
-}
-
-static void radeon_dpm_thermal_work_handler(struct work_struct *work)
-{
- struct radeon_device *rdev =
- container_of(work, struct radeon_device,
- pm.dpm.thermal.work);
- /* switch to the thermal state */
- enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
-
- if (!rdev->pm.dpm_enabled)
- return;
-
- if (rdev->asic->pm.get_temperature) {
- int temp = radeon_get_temperature(rdev);
-
- if (temp < rdev->pm.dpm.thermal.min_temp)
- /* switch back the user state */
- dpm_state = rdev->pm.dpm.user_state;
- } else {
- if (rdev->pm.dpm.thermal.high_to_low)
- /* switch back the user state */
- dpm_state = rdev->pm.dpm.user_state;
- }
- mutex_lock(&rdev->pm.mutex);
- if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
- rdev->pm.dpm.thermal_active = true;
- else
- rdev->pm.dpm.thermal_active = false;
- rdev->pm.dpm.state = dpm_state;
- mutex_unlock(&rdev->pm.mutex);
-
- radeon_pm_compute_clocks(rdev);
-}
-
-static bool radeon_dpm_single_display(struct radeon_device *rdev)
-{
- bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
-
- /* check if the vblank period is too short to adjust the mclk */
- if (single_display && rdev->asic->dpm.vblank_too_short) {
- if (radeon_dpm_vblank_too_short(rdev))
- single_display = false;
- }
-
- /* 120hz tends to be problematic even if they are under the
- * vblank limit.
- */
- if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120))
- single_display = false;
-
- return single_display;
-}
-
-static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type dpm_state)
-{
- int i;
- struct radeon_ps *ps;
- u32 ui_class;
- bool single_display = radeon_dpm_single_display(rdev);
-
- /* certain older asics have a separare 3D performance state,
- * so try that first if the user selected performance
- */
- if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
- dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
- /* balanced states don't exist at the moment */
- if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
-
-restart_search:
- /* Pick the best power state based on current conditions */
- for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
- ps = &rdev->pm.dpm.ps[i];
- ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
- switch (dpm_state) {
- /* user states */
- case POWER_STATE_TYPE_BATTERY:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_BALANCED:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_PERFORMANCE:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- /* internal states */
- case POWER_STATE_TYPE_INTERNAL_UVD:
- if (rdev->pm.dpm.uvd_ps)
- return rdev->pm.dpm.uvd_ps;
- else
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_BOOT:
- return rdev->pm.dpm.boot_ps;
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ULV:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- return ps;
- break;
- default:
- break;
- }
- }
- /* use a fallback state if we didn't match */
- switch (dpm_state) {
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (rdev->pm.dpm.uvd_ps) {
- return rdev->pm.dpm.uvd_ps;
- } else {
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- }
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- dpm_state = POWER_STATE_TYPE_BATTERY;
- goto restart_search;
- case POWER_STATE_TYPE_BATTERY:
- case POWER_STATE_TYPE_BALANCED:
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- default:
- break;
- }
-
- return NULL;
-}
-
-static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
-{
- int i;
- struct radeon_ps *ps;
- enum radeon_pm_state_type dpm_state;
- int ret;
- bool single_display = radeon_dpm_single_display(rdev);
-
- /* if dpm init failed */
- if (!rdev->pm.dpm_enabled)
- return;
-
- if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) {
- /* add other state override checks here */
- if ((!rdev->pm.dpm.thermal_active) &&
- (!rdev->pm.dpm.uvd_active))
- rdev->pm.dpm.state = rdev->pm.dpm.user_state;
- }
- dpm_state = rdev->pm.dpm.state;
-
- ps = radeon_dpm_pick_power_state(rdev, dpm_state);
- if (ps)
- rdev->pm.dpm.requested_ps = ps;
- else
- return;
-
- /* no need to reprogram if nothing changed unless we are on BTC+ */
- if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) {
- /* vce just modifies an existing state so force a change */
- if (ps->vce_active != rdev->pm.dpm.vce_active)
- goto force;
- /* user has made a display change (such as timing) */
- if (rdev->pm.dpm.single_display != single_display)
- goto force;
- if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) {
- /* for pre-BTC and APUs if the num crtcs changed but state is the same,
- * all we need to do is update the display configuration.
- */
- if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) {
- /* update display watermarks based on new power state */
- radeon_bandwidth_update(rdev);
- /* update displays */
- radeon_dpm_display_configuration_changed(rdev);
- rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
- rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
- }
- return;
- } else {
- /* for BTC+ if the num crtcs hasn't changed and state is the same,
- * nothing to do, if the num crtcs is > 1 and state is the same,
- * update display configuration.
- */
- if (rdev->pm.dpm.new_active_crtcs ==
- rdev->pm.dpm.current_active_crtcs) {
- return;
- } else {
- if ((rdev->pm.dpm.current_active_crtc_count > 1) &&
- (rdev->pm.dpm.new_active_crtc_count > 1)) {
- /* update display watermarks based on new power state */
- radeon_bandwidth_update(rdev);
- /* update displays */
- radeon_dpm_display_configuration_changed(rdev);
- rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
- rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
- return;
- }
- }
- }
- }
-
-force:
- if (radeon_dpm == 1) {
- printk("switching from power state:\n");
- radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps);
- printk("switching to power state:\n");
- radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps);
- }
-
- down_write(&rdev->pm.mclk_lock);
- mutex_lock(&rdev->ring_lock);
-
- /* update whether vce is active */
- ps->vce_active = rdev->pm.dpm.vce_active;
-
- ret = radeon_dpm_pre_set_power_state(rdev);
- if (ret)
- goto done;
-
- /* update display watermarks based on new power state */
- radeon_bandwidth_update(rdev);
- /* update displays */
- radeon_dpm_display_configuration_changed(rdev);
-
- /* wait for the rings to drain */
- for (i = 0; i < RADEON_NUM_RINGS; i++) {
- struct radeon_ring *ring = &rdev->ring[i];
- if (ring->ready)
- radeon_fence_wait_empty(rdev, i);
- }
-
- /* program the new power state */
- radeon_dpm_set_power_state(rdev);
-
- /* update current power state */
- rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps;
-
- radeon_dpm_post_set_power_state(rdev);
-
- rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
- rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
- rdev->pm.dpm.single_display = single_display;
-
- if (rdev->asic->dpm.force_performance_level) {
- if (rdev->pm.dpm.thermal_active) {
- enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
- /* force low perf level for thermal */
- radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
- /* save the user's level */
- rdev->pm.dpm.forced_level = level;
- } else {
- /* otherwise, user selected level */
- radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level);
- }
}
-
-done:
- mutex_unlock(&rdev->ring_lock);
- up_write(&rdev->pm.mclk_lock);
-}
-
-void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
-{
- enum radeon_pm_state_type dpm_state;
-
- if (rdev->asic->dpm.powergate_uvd) {
- mutex_lock(&rdev->pm.mutex);
- /* don't powergate anything if we
- have active but pause streams */
- enable |= rdev->pm.dpm.sd > 0;
- enable |= rdev->pm.dpm.hd > 0;
- /* enable/disable UVD */
- radeon_dpm_powergate_uvd(rdev, !enable);
- mutex_unlock(&rdev->pm.mutex);
- } else {
- if (enable) {
- mutex_lock(&rdev->pm.mutex);
- rdev->pm.dpm.uvd_active = true;
- /* disable this for now */
-#if 0
- if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
- else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
- else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
- else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
- else
#endif
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
- rdev->pm.dpm.state = dpm_state;
- mutex_unlock(&rdev->pm.mutex);
- } else {
- mutex_lock(&rdev->pm.mutex);
- rdev->pm.dpm.uvd_active = false;
- mutex_unlock(&rdev->pm.mutex);
- }
-
- radeon_pm_compute_clocks(rdev);
- }
}
-void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable)
-{
- if (enable) {
- mutex_lock(&rdev->pm.mutex);
- rdev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL;
- mutex_unlock(&rdev->pm.mutex);
- } else {
- mutex_lock(&rdev->pm.mutex);
- rdev->pm.dpm.vce_active = false;
- mutex_unlock(&rdev->pm.mutex);
- }
-
- radeon_pm_compute_clocks(rdev);
-}
-
-static void radeon_pm_suspend_old(struct radeon_device *rdev)
+void radeon_pm_suspend(struct radeon_device *rdev)
{
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
@@ -1203,26 +546,7 @@ static void radeon_pm_suspend_old(struct radeon_device *rdev)
cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
}
-static void radeon_pm_suspend_dpm(struct radeon_device *rdev)
-{
- mutex_lock(&rdev->pm.mutex);
- /* disable dpm */
- radeon_dpm_disable(rdev);
- /* reset the power state */
- rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
- rdev->pm.dpm_enabled = false;
- mutex_unlock(&rdev->pm.mutex);
-}
-
-void radeon_pm_suspend(struct radeon_device *rdev)
-{
- if (rdev->pm.pm_method == PM_METHOD_DPM)
- radeon_pm_suspend_dpm(rdev);
- else
- radeon_pm_suspend_old(rdev);
-}
-
-static void radeon_pm_resume_old(struct radeon_device *rdev)
+void radeon_pm_resume(struct radeon_device *rdev)
{
/* set up the default clocks if the MC ucode is loaded */
if ((rdev->family >= CHIP_BARTS) &&
@@ -1259,51 +583,12 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
radeon_pm_compute_clocks(rdev);
}
-static void radeon_pm_resume_dpm(struct radeon_device *rdev)
-{
- int ret;
-
- /* asic init will reset to the boot state */
- mutex_lock(&rdev->pm.mutex);
- rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
- radeon_dpm_setup_asic(rdev);
- ret = radeon_dpm_enable(rdev);
- mutex_unlock(&rdev->pm.mutex);
- if (ret)
- goto dpm_resume_fail;
- rdev->pm.dpm_enabled = true;
- return;
-
-dpm_resume_fail:
- DRM_ERROR("radeon: dpm resume failed\n");
- if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_CAYMAN) &&
- rdev->mc_fw) {
- if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
- SET_VOLTAGE_TYPE_ASIC_VDDC);
- if (rdev->pm.default_vddci)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
- SET_VOLTAGE_TYPE_ASIC_VDDCI);
- if (rdev->pm.default_sclk)
- radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
- if (rdev->pm.default_mclk)
- radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
- }
-}
-
-void radeon_pm_resume(struct radeon_device *rdev)
-{
- if (rdev->pm.pm_method == PM_METHOD_DPM)
- radeon_pm_resume_dpm(rdev);
- else
- radeon_pm_resume_old(rdev);
-}
-
-static int radeon_pm_init_old(struct radeon_device *rdev)
+int radeon_pm_init(struct radeon_device *rdev)
{
int ret;
+ /* default to profile method */
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
rdev->pm.profile = PM_PROFILE_DEFAULT;
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
@@ -1347,256 +632,28 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
if (rdev->pm.num_power_states > 1) {
+#ifdef notyet
+ /* where's the best place to put these? */
+ ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ if (ret)
+ DRM_ERROR("failed to create device file for power profile\n");
+ ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ if (ret)
+ DRM_ERROR("failed to create device file for power method\n");
+#endif
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
}
+#ifdef DRMDEBUG
DRM_INFO("radeon: power management initialized\n");
+#endif
}
return 0;
}
-static void radeon_dpm_print_power_states(struct radeon_device *rdev)
-{
- int i;
-
- for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
- printk("== power state %d ==\n", i);
- radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]);
- }
-}
-
-static int radeon_pm_init_dpm(struct radeon_device *rdev)
-{
- int ret;
-
- /* default to balanced state */
- rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
- rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
- rdev->pm.default_sclk = rdev->clock.default_sclk;
- rdev->pm.default_mclk = rdev->clock.default_mclk;
- rdev->pm.current_sclk = rdev->clock.default_sclk;
- rdev->pm.current_mclk = rdev->clock.default_mclk;
- rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
-
- if (rdev->bios && rdev->is_atom_bios)
- radeon_atombios_get_power_modes(rdev);
- else
- return -EINVAL;
-
- /* set up the internal thermal sensor if applicable */
- ret = radeon_hwmon_init(rdev);
- if (ret)
- return ret;
-
- INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
- mutex_lock(&rdev->pm.mutex);
- radeon_dpm_init(rdev);
- rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps;
- if (radeon_dpm == 1)
- radeon_dpm_print_power_states(rdev);
- radeon_dpm_setup_asic(rdev);
- ret = radeon_dpm_enable(rdev);
- mutex_unlock(&rdev->pm.mutex);
- if (ret)
- goto dpm_failed;
- rdev->pm.dpm_enabled = true;
-
- if (radeon_debugfs_pm_init(rdev)) {
- DRM_ERROR("Failed to register debugfs file for dpm!\n");
- }
-
- DRM_INFO("radeon: dpm initialized\n");
-
- return 0;
-
-dpm_failed:
- rdev->pm.dpm_enabled = false;
- if ((rdev->family >= CHIP_BARTS) &&
- (rdev->family <= CHIP_CAYMAN) &&
- rdev->mc_fw) {
- if (rdev->pm.default_vddc)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
- SET_VOLTAGE_TYPE_ASIC_VDDC);
- if (rdev->pm.default_vddci)
- radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
- SET_VOLTAGE_TYPE_ASIC_VDDCI);
- if (rdev->pm.default_sclk)
- radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
- if (rdev->pm.default_mclk)
- radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
- }
- DRM_ERROR("radeon: dpm initialization failed\n");
- return ret;
-}
-
-struct radeon_dpm_quirk {
- u32 chip_vendor;
- u32 chip_device;
- u32 subsys_vendor;
- u32 subsys_device;
-};
-
-/* cards with dpm stability problems */
-static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
- /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
- { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
- /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
- { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
- { 0, 0, 0, 0 },
-};
-
-int radeon_pm_init(struct radeon_device *rdev)
-{
- struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
- bool disable_dpm = false;
-
- /* Apply dpm quirks */
- while (p && p->chip_device != 0) {
- if (rdev->pdev->vendor == p->chip_vendor &&
- rdev->pdev->device == p->chip_device &&
- rdev->pdev->subsystem_vendor == p->subsys_vendor &&
- rdev->pdev->subsystem_device == p->subsys_device) {
- disable_dpm = true;
- break;
- }
- ++p;
- }
-
- /* enable dpm on rv6xx+ */
- switch (rdev->family) {
- case CHIP_RV610:
- case CHIP_RV630:
- case CHIP_RV620:
- case CHIP_RV635:
- case CHIP_RV670:
- case CHIP_RS780:
- case CHIP_RS880:
- case CHIP_RV770:
- /* DPM requires the RLC, RV770+ dGPU requires SMC */
- if (!rdev->rlc_fw)
- rdev->pm.pm_method = PM_METHOD_PROFILE;
- else if ((rdev->family >= CHIP_RV770) &&
- (!(rdev->flags & RADEON_IS_IGP)) &&
- (!rdev->smc_fw))
- rdev->pm.pm_method = PM_METHOD_PROFILE;
- else if (radeon_dpm == 1)
- rdev->pm.pm_method = PM_METHOD_DPM;
- else
- rdev->pm.pm_method = PM_METHOD_PROFILE;
- break;
- case CHIP_RV730:
- case CHIP_RV710:
- case CHIP_RV740:
- case CHIP_CEDAR:
- case CHIP_REDWOOD:
- case CHIP_JUNIPER:
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- case CHIP_PALM:
- case CHIP_SUMO:
- case CHIP_SUMO2:
- case CHIP_BARTS:
- case CHIP_TURKS:
- case CHIP_CAICOS:
- case CHIP_CAYMAN:
- case CHIP_ARUBA:
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- case CHIP_BONAIRE:
- case CHIP_KABINI:
- case CHIP_KAVERI:
- case CHIP_HAWAII:
- case CHIP_MULLINS:
- /* DPM requires the RLC, RV770+ dGPU requires SMC */
- if (!rdev->rlc_fw)
- rdev->pm.pm_method = PM_METHOD_PROFILE;
- else if ((rdev->family >= CHIP_RV770) &&
- (!(rdev->flags & RADEON_IS_IGP)) &&
- (!rdev->smc_fw))
- rdev->pm.pm_method = PM_METHOD_PROFILE;
- else if (disable_dpm && (radeon_dpm == -1))
- rdev->pm.pm_method = PM_METHOD_PROFILE;
- else if (radeon_dpm == 0)
- rdev->pm.pm_method = PM_METHOD_PROFILE;
- else
- rdev->pm.pm_method = PM_METHOD_DPM;
- break;
- default:
- /* default to profile method */
- rdev->pm.pm_method = PM_METHOD_PROFILE;
- break;
- }
-
- if (rdev->pm.pm_method == PM_METHOD_DPM)
- return radeon_pm_init_dpm(rdev);
- else
- return radeon_pm_init_old(rdev);
-}
-
-int radeon_pm_late_init(struct radeon_device *rdev)
-{
- int ret = 0;
-
- if (rdev->pm.pm_method == PM_METHOD_DPM) {
- if (rdev->pm.dpm_enabled) {
-#ifdef __linux__
- if (!rdev->pm.sysfs_initialized) {
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret)
- DRM_ERROR("failed to create device file for dpm state\n");
- /* XXX: these are noops for dpm but are here for backwards compat */
- ret = device_create_file(rdev->dev, &dev_attr_power_profile);
- if (ret)
- DRM_ERROR("failed to create device file for power profile\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_method);
- if (ret)
- DRM_ERROR("failed to create device file for power method\n");
- rdev->pm.sysfs_initialized = true;
- }
-#endif
-
- mutex_lock(&rdev->pm.mutex);
- ret = radeon_dpm_late_enable(rdev);
- mutex_unlock(&rdev->pm.mutex);
- if (ret) {
- rdev->pm.dpm_enabled = false;
- DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
- } else {
- /* set the dpm state for PX since there won't be
- * a modeset to call this.
- */
- radeon_pm_compute_clocks(rdev);
- }
- }
- } else {
-#ifdef __linux__
- if ((rdev->pm.num_power_states > 1) &&
- (!rdev->pm.sysfs_initialized)) {
- /* where's the best place to put these? */
- ret = device_create_file(rdev->dev, &dev_attr_power_profile);
- if (ret)
- DRM_ERROR("failed to create device file for power profile\n");
- ret = device_create_file(rdev->dev, &dev_attr_power_method);
- if (ret)
- DRM_ERROR("failed to create device file for power method\n");
- if (!ret)
- rdev->pm.sysfs_initialized = true;
- }
-#endif
- }
- return ret;
-}
-
-static void radeon_pm_fini_old(struct radeon_device *rdev)
+void radeon_pm_fini(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
mutex_lock(&rdev->pm.mutex);
@@ -1614,46 +671,19 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
-#ifdef __linux__
+#ifdef notyet
device_remove_file(rdev->dev, &dev_attr_power_profile);
device_remove_file(rdev->dev, &dev_attr_power_method);
#endif
}
- radeon_hwmon_fini(rdev);
- kfree(rdev->pm.power_state);
-}
-
-static void radeon_pm_fini_dpm(struct radeon_device *rdev)
-{
- if (rdev->pm.num_power_states > 1) {
- mutex_lock(&rdev->pm.mutex);
- radeon_dpm_disable(rdev);
- mutex_unlock(&rdev->pm.mutex);
-
-#ifdef __linux__
- device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
- device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
- /* XXX backwards compat */
- device_remove_file(rdev->dev, &dev_attr_power_profile);
- device_remove_file(rdev->dev, &dev_attr_power_method);
-#endif
- }
- radeon_dpm_fini(rdev);
+ if (rdev->pm.power_state)
+ kfree(rdev->pm.power_state);
radeon_hwmon_fini(rdev);
- kfree(rdev->pm.power_state);
-}
-
-void radeon_pm_fini(struct radeon_device *rdev)
-{
- if (rdev->pm.pm_method == PM_METHOD_DPM)
- radeon_pm_fini_dpm(rdev);
- else
- radeon_pm_fini_old(rdev);
}
-static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
+void radeon_pm_compute_clocks(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
@@ -1666,14 +696,12 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
rdev->pm.active_crtcs = 0;
rdev->pm.active_crtc_count = 0;
- if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- radeon_crtc = to_radeon_crtc(crtc);
- if (radeon_crtc->enabled) {
- rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
- rdev->pm.active_crtc_count++;
- }
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (radeon_crtc->enabled) {
+ rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+ rdev->pm.active_crtc_count++;
}
}
@@ -1726,51 +754,6 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
mutex_unlock(&rdev->pm.mutex);
}
-static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
-{
- struct drm_device *ddev = rdev->ddev;
- struct drm_crtc *crtc;
- struct radeon_crtc *radeon_crtc;
-
- if (!rdev->pm.dpm_enabled)
- return;
-
- mutex_lock(&rdev->pm.mutex);
-
- /* update active crtc counts */
- rdev->pm.dpm.new_active_crtcs = 0;
- rdev->pm.dpm.new_active_crtc_count = 0;
- if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- radeon_crtc = to_radeon_crtc(crtc);
- if (crtc->enabled) {
- rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
- rdev->pm.dpm.new_active_crtc_count++;
- }
- }
- }
-
- /* update battery/ac status */
- if (power_supply_is_system_supplied() > 0)
- rdev->pm.dpm.ac_power = true;
- else
- rdev->pm.dpm.ac_power = false;
-
- radeon_dpm_change_power_state_locked(rdev);
-
- mutex_unlock(&rdev->pm.mutex);
-
-}
-
-void radeon_pm_compute_clocks(struct radeon_device *rdev)
-{
- if (rdev->pm.pm_method == PM_METHOD_DPM)
- radeon_pm_compute_clocks_dpm(rdev);
- else
- radeon_pm_compute_clocks_old(rdev);
-}
-
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
{
int crtc, vpos, hpos, vbl_status;
@@ -1782,8 +765,7 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev)
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev,
- crtc,
- USE_REAL_VBLANKSTART,
+ crtc, 0,
&vpos, &hpos, NULL, NULL,
&rdev->mode_info.crtcs[crtc]->base.hwmode);
if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
@@ -1879,33 +861,20 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_device *ddev = rdev->ddev;
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
- seq_printf(m, "PX asic powered off\n");
- } else if (rdev->pm.dpm_enabled) {
- mutex_lock(&rdev->pm.mutex);
- if (rdev->asic->dpm.debugfs_print_current_performance_level)
- radeon_dpm_debugfs_print_current_performance_level(rdev, m);
- else
- seq_printf(m, "Debugfs support not implemented for this asic\n");
- mutex_unlock(&rdev->pm.mutex);
- } else {
- seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
- /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
- if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
- seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
- else
- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
- seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
- if (rdev->asic->pm.get_memory_clock)
- seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
- if (rdev->pm.current_vddc)
- seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
- if (rdev->asic->pm.get_pcie_lanes)
- seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
- }
+ seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
+ /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
+ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
+ seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
+ else
+ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+ seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
+ if (rdev->asic->pm.get_memory_clock)
+ seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ if (rdev->pm.current_vddc)
+ seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
+ if (rdev->asic->pm.get_pcie_lanes)
+ seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
return 0;
}
diff --git a/sys/dev/pci/drm/radeon/radeon_prime.c b/sys/dev/pci/drm/radeon/radeon_prime.c
index 975d2d58416..628d1534823 100644
--- a/sys/dev/pci/drm/radeon/radeon_prime.c
+++ b/sys/dev/pci/drm/radeon/radeon_prime.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_prime.c,v 1.6 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
@@ -28,99 +29,197 @@
#include "radeon.h"
#include <dev/pci/drm/radeon_drm.h>
-struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
+static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
{
- struct radeon_bo *bo = gem_to_radeon_bo(obj);
+ struct radeon_bo *bo = attachment->dmabuf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
int npages = bo->tbo.num_pages;
+ struct sg_table *sg;
+ int nents;
+
+ mutex_lock(&dev->struct_mutex);
+ sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+ nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ mutex_unlock(&dev->struct_mutex);
+ return sg;
+}
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
+static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *sg, enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ sg_free_table(sg);
+ kfree(sg);
}
-void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
+static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
{
- struct radeon_bo *bo = gem_to_radeon_bo(obj);
+ struct radeon_bo *bo = dma_buf->priv;
+
+ if (bo->gem_base.export_dma_buf == dma_buf) {
+ DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
+ bo->gem_base.export_dma_buf = NULL;
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
+ }
+}
+
+static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ return NULL;
+}
+
+static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+
+}
+
+static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
int ret;
+ mutex_lock(&dev->struct_mutex);
+ if (bo->vmapping_count) {
+ bo->vmapping_count++;
+ goto out_unlock;
+ }
+
ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
&bo->dma_buf_vmap);
- if (ret)
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
return ERR_PTR(ret);
-
+ }
+ bo->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
return bo->dma_buf_vmap.virtual;
}
-void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
- struct radeon_bo *bo = gem_to_radeon_bo(obj);
-
- ttm_bo_kunmap(&bo->dma_buf_vmap);
+ struct radeon_bo *bo = dma_buf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+
+ mutex_lock(&dev->struct_mutex);
+ bo->vmapping_count--;
+ if (bo->vmapping_count == 0) {
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
+ }
+ mutex_unlock(&dev->struct_mutex);
}
-
-struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg)
+const static struct dma_buf_ops radeon_dmabuf_ops = {
+ .map_dma_buf = radeon_gem_map_dma_buf,
+ .unmap_dma_buf = radeon_gem_unmap_dma_buf,
+ .release = radeon_gem_dmabuf_release,
+ .kmap = radeon_gem_kmap,
+ .kmap_atomic = radeon_gem_kmap_atomic,
+ .kunmap = radeon_gem_kunmap,
+ .kunmap_atomic = radeon_gem_kunmap_atomic,
+ .mmap = radeon_gem_prime_mmap,
+ .vmap = radeon_gem_prime_vmap,
+ .vunmap = radeon_gem_prime_vunmap,
+};
+
+static int radeon_prime_create(struct drm_device *dev,
+ size_t size,
+ struct sg_table *sg,
+ struct radeon_bo **pbo)
{
- struct reservation_object *resv = attach->dmabuf->resv;
struct radeon_device *rdev = dev->dev_private;
struct radeon_bo *bo;
int ret;
- ww_mutex_lock(&resv->lock, NULL);
- ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
- RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
- ww_mutex_unlock(&resv->lock);
+ ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
+ RADEON_GEM_DOMAIN_GTT, sg, pbo);
if (ret)
- return ERR_PTR(ret);
+ return ret;
+ bo = *pbo;
+ bo->gem_base.driver_private = bo;
mutex_lock(&rdev->gem.mutex);
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
- return &bo->gem_base;
+ return 0;
}
-int radeon_gem_prime_pin(struct drm_gem_object *obj)
+struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags)
{
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
ret = radeon_bo_reserve(bo, false);
if (unlikely(ret != 0))
- return ret;
+ return ERR_PTR(ret);
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+ if (ret) {
+ radeon_bo_unreserve(bo);
+ return ERR_PTR(ret);
+ }
radeon_bo_unreserve(bo);
- return ret;
+ return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
}
-void radeon_gem_prime_unpin(struct drm_gem_object *obj)
+struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
{
- struct radeon_bo *bo = gem_to_radeon_bo(obj);
- int ret = 0;
-
- ret = radeon_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- return;
-
- radeon_bo_unpin(bo);
- radeon_bo_unreserve(bo);
-}
+ struct dma_buf_attachment *attach;
+ struct sg_table *sg;
+ struct radeon_bo *bo;
+ int ret;
+ if (dma_buf->ops == &radeon_dmabuf_ops) {
+ bo = dma_buf->priv;
+ if (bo->gem_base.dev == dev) {
+ drm_gem_object_reference(&bo->gem_base);
+ dma_buf_put(dma_buf);
+ return &bo->gem_base;
+ }
+ }
+
+ /* need to attach */
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sg)) {
+ ret = PTR_ERR(sg);
+ goto fail_detach;
+ }
+
+ ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
+ if (ret)
+ goto fail_unmap;
-struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct radeon_bo *bo = gem_to_radeon_bo(obj);
+ bo->gem_base.import_attach = attach;
- return bo->tbo.resv;
-}
+ return &bo->gem_base;
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
- int flags)
-{
- struct radeon_bo *bo = gem_to_radeon_bo(gobj);
- if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
- return ERR_PTR(-EPERM);
- return drm_gem_prime_export(dev, gobj, flags);
+fail_unmap:
+ dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ return ERR_PTR(ret);
}
diff --git a/sys/dev/pci/drm/radeon/radeon_reg.h b/sys/dev/pci/drm/radeon/radeon_reg.h
index 62d54976d24..d6e1c9444d7 100644
--- a/sys/dev/pci/drm/radeon/radeon_reg.h
+++ b/sys/dev/pci/drm/radeon/radeon_reg.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_reg.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2000 ATI Technologies Inc., Markham, Ontario, and
* VA Linux Systems Inc., Fremont, California.
@@ -57,7 +58,6 @@
#include "evergreen_reg.h"
#include "ni_reg.h"
#include "si_reg.h"
-#include "cik_reg.h"
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
@@ -427,6 +427,8 @@
# define RADEON_CRTC_VSYNC_DIS (1 << 9)
# define RADEON_CRTC_DISPLAY_DIS (1 << 10)
# define RADEON_CRTC_SYNC_TRISTAT (1 << 11)
+# define RADEON_CRTC_HSYNC_TRISTAT (1 << 12)
+# define RADEON_CRTC_VSYNC_TRISTAT (1 << 13)
# define RADEON_CRTC_CRT_ON (1 << 15)
#define RADEON_CRTC_EXT_CNTL_DPMS_BYTE 0x0055
# define RADEON_CRTC_HSYNC_DIS_BYTE (1 << 0)
@@ -3707,19 +3709,4 @@
#define RV530_GB_PIPE_SELECT2 0x4124
-#define RADEON_CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
-#define RADEON_CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
-#define RADEON_CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
-#define RADEON_CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
-#define R100_CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
-#define R600_CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
-#define RADEON_PACKET_TYPE0 0
-#define RADEON_PACKET_TYPE1 1
-#define RADEON_PACKET_TYPE2 2
-#define RADEON_PACKET_TYPE3 3
-
-#define RADEON_PACKET3_NOP 0x10
-
-#define RADEON_VLINE_STAT (1 << 12)
-
#endif
diff --git a/sys/dev/pci/drm/radeon/radeon_ring.c b/sys/dev/pci/drm/radeon/radeon_ring.c
index 54d2e7ef9c2..b5f19f716d1 100644
--- a/sys/dev/pci/drm/radeon/radeon_ring.c
+++ b/sys/dev/pci/drm/radeon/radeon_ring.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_ring.c,v 1.10 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -27,7 +28,256 @@
* Christian König
*/
#include <dev/pci/drm/drmP.h>
+#include <dev/pci/drm/radeon_drm.h>
+#include "radeon_reg.h"
#include "radeon.h"
+#include "atom.h"
+
+/*
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored. You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them. Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
+ */
+static int radeon_debugfs_sa_init(struct radeon_device *rdev);
+
+/**
+ * radeon_ib_get - request an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the IB is associated with
+ * @ib: IB object returned
+ * @size: requested IB size
+ *
+ * Request an IB (all asics). IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib *ib, struct radeon_vm *vm,
+ unsigned size)
+{
+ int i, r;
+
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
+ if (r) {
+ dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
+ return r;
+ }
+
+ r = radeon_semaphore_create(rdev, &ib->semaphore);
+ if (r) {
+ return r;
+ }
+
+ ib->ring = ring;
+ ib->fence = NULL;
+ ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+ ib->vm = vm;
+ if (vm) {
+ /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
+ * space and soffset is the offset inside the pool bo
+ */
+ ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+ } else {
+ ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+ }
+ ib->is_const_ib = false;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ ib->sync_to[i] = NULL;
+
+ return 0;
+}
+
+/**
+ * radeon_ib_free - free an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_fence_unref(&ib->fence);
+}
+
+/**
+ * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ * @const_ib: Const IB to schedule (SI only)
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine). Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed. To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE. If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
+ * to SI there was just a DE IB.
+ */
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+ struct radeon_ib *const_ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ bool need_sync = false;
+ int i, r = 0;
+
+ if (!ib->length_dw || !ring->ready) {
+ /* TODO: Nothings in the ib we should report. */
+ dev_err(rdev->dev, "couldn't schedule ib\n");
+ return -EINVAL;
+ }
+
+ /* 64 dwords should be enough for fence too */
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
+ if (r) {
+ dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_fence *fence = ib->sync_to[i];
+ if (radeon_fence_need_sync(fence, ib->ring)) {
+ need_sync = true;
+ radeon_semaphore_sync_rings(rdev, ib->semaphore,
+ fence->ring, ib->ring);
+ radeon_fence_note_sync(fence, ib->ring);
+ }
+ }
+ /* immediately free semaphore when we don't need to sync */
+ if (!need_sync) {
+ radeon_semaphore_free(rdev, &ib->semaphore, NULL);
+ }
+ /* if we can't remember our last VM flush then flush now! */
+ /* XXX figure out why we have to flush for every IB */
+ if (ib->vm /*&& !ib->vm->last_flush*/) {
+ radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
+ }
+ if (const_ib) {
+ radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
+ radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+ }
+ radeon_ring_ib_execute(rdev, ib->ring, ib);
+ r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+ if (const_ib) {
+ const_ib->fence = radeon_fence_ref(ib->fence);
+ }
+ /* we just flushed the VM, remember that */
+ if (ib->vm && !ib->vm->last_flush) {
+ ib->vm->last_flush = radeon_fence_ref(ib->fence);
+ }
+ radeon_ring_unlock_commit(rdev, ring);
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->ib_pool_ready) {
+ return 0;
+ }
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT);
+ if (r) {
+ return r;
+ }
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
+ if (r) {
+ return r;
+ }
+
+ rdev->ib_pool_ready = true;
+ if (radeon_debugfs_sa_init(rdev)) {
+ dev_err(rdev->dev, "failed to register debugfs file for SA\n");
+ }
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+ if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+ radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+ rdev->ib_pool_ready = false;
+ }
+}
+
+/**
+ * radeon_ib_ring_tests - test IBs on the rings
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+ unsigned i;
+ int r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (!ring->ready)
+ continue;
+
+ r = radeon_ib_test(rdev, i, ring);
+ if (r) {
+ ring->ready = false;
+
+ if (i == RADEON_RING_TYPE_GFX_INDEX) {
+ /* oh, oh, that's really bad */
+ DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+
+ } else {
+ /* still not good, but we can live with it */
+ DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+ }
+ }
+ }
+ return 0;
+}
/*
* Rings
@@ -44,6 +294,29 @@
*/
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
+#if defined(DRM_DEBUG_CODE) && DRM_DEBUG_CODE != 0
+/**
+ * radeon_ring_write - write a value to the ring
+ *
+ * @ring: radeon_ring structure holding ring information
+ * @v: dword (dw) value to write
+ *
+ * Write a value to the requested ring buffer (all asics).
+ */
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
+{
+#if DRM_DEBUG_CODE
+ if (ring->count_dw <= 0) {
+ DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
+ }
+#endif
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
+}
+#endif
+
/**
* radeon_ring_supports_scratch_reg - check if the ring supports
* writing to scratch registers
@@ -77,17 +350,19 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
*/
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
{
- uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
+ u32 rptr;
+ if (rdev->wb.enabled)
+ rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+ else
+ rptr = RREG32(ring->rptr_reg);
+ ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
/* This works because ring_size is a power of 2 */
- ring->ring_free_dw = rptr + (ring->ring_size / 4);
+ ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
ring->ring_free_dw -= ring->wptr;
ring->ring_free_dw &= ring->ptr_mask;
if (!ring->ring_free_dw) {
- /* this is an empty ring */
ring->ring_free_dw = ring->ring_size / 4;
- /* update lockup info to avoid false positive */
- radeon_ring_lockup_update(rdev, ring);
}
}
@@ -111,13 +386,19 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
/* Align requested size with padding so unlock_commit can
* pad safely */
radeon_ring_free_size(rdev, ring);
+ if (ring->ring_free_dw == (ring->ring_size / 4)) {
+ /* This is an empty ring update lockup info to avoid
+ * false positive.
+ */
+ radeon_ring_lockup_update(ring);
+ }
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
while (ndw > (ring->ring_free_dw - 1)) {
radeon_ring_free_size(rdev, ring);
if (ndw < ring->ring_free_dw) {
break;
}
- r = radeon_fence_wait_next(rdev, ring->idx);
+ r = radeon_fence_wait_next_locked(rdev, ring->idx);
if (r)
return r;
}
@@ -156,30 +437,19 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
- * @hdp_flush: Whether or not to perform an HDP cache flush
*
* Update the wptr (write pointer) to tell the GPU to
* execute new commands on the ring buffer (all asics).
*/
-void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
- bool hdp_flush)
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
- /* If we are emitting the HDP flush via the ring buffer, we need to
- * do it before padding.
- */
- if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)
- rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
/* We pad to match fetch size */
while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
- mb();
- /* If we are emitting the HDP flush via MMIO, we need to do it after
- * all CPU writes to VRAM finished.
- */
- if (hdp_flush && rdev->asic->mmio_hdp_flush)
- rdev->asic->mmio_hdp_flush(rdev);
- radeon_ring_set_wptr(rdev, ring);
+ DRM_MEMORYBARRIER();
+ WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+ (void)RREG32(ring->wptr_reg);
}
/**
@@ -188,14 +458,12 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring,
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
- * @hdp_flush: Whether or not to perform an HDP cache flush
*
* Call radeon_ring_commit() then unlock the ring (all asics).
*/
-void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring,
- bool hdp_flush)
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
- radeon_ring_commit(rdev, ring, hdp_flush);
+ radeon_ring_commit(rdev, ring);
mutex_unlock(&rdev->ring_lock);
}
@@ -225,17 +493,39 @@ void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *rin
}
/**
+ * radeon_ring_force_activity - add some nop packets to the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Add some nop packets to the ring to force activity (all asics).
+ * Used for lockup detection to see if the rptr is advancing.
+ */
+void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ int r;
+
+ radeon_ring_free_size(rdev, ring);
+ if (ring->rptr == ring->wptr) {
+ r = radeon_ring_alloc(rdev, ring, 1);
+ if (!r) {
+ radeon_ring_write(ring, ring->nop);
+ radeon_ring_commit(rdev, ring);
+ }
+ }
+}
+
+/**
* radeon_ring_lockup_update - update lockup variables
*
* @ring: radeon_ring structure holding ring information
*
* Update the last rptr value and timestamp (all asics).
*/
-void radeon_ring_lockup_update(struct radeon_device *rdev,
- struct radeon_ring *ring)
+void radeon_ring_lockup_update(struct radeon_ring *ring)
{
- atomic_set(&ring->last_rptr, radeon_ring_get_rptr(rdev, ring));
- atomic64_set(&ring->last_activity, jiffies_64);
+ ring->last_rptr = ring->rptr;
+ ring->last_activity = jiffies;
}
/**
@@ -243,23 +533,42 @@ void radeon_ring_lockup_update(struct radeon_device *rdev,
* @rdev: radeon device structure
* @ring: radeon_ring structure holding ring information
*
- */
+ * We don't need to initialize the lockup tracking information as we will either
+ * have CP rptr to a different value of jiffies wrap around which will force
+ * initialization of the lockup tracking informations.
+ *
+ * A possible false positivie is if we get call after while and last_cp_rptr ==
+ * the current CP rptr, even if it's unlikely it might happen. To avoid this
+ * if the elapsed time since last call is bigger than 2 second than we return
+ * false and update the tracking information. Due to this the caller must call
+ * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
+ * the fencing code should be cautious about that.
+ *
+ * Caller should write to the ring to force CP to do something so we don't get
+ * false positive when CP is just gived nothing to do.
+ *
+ **/
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- uint32_t rptr = radeon_ring_get_rptr(rdev, ring);
- uint64_t last = atomic64_read(&ring->last_activity);
- uint64_t elapsed;
+ unsigned long cjiffies, elapsed;
+ uint32_t rptr;
- if (rptr != atomic_read(&ring->last_rptr)) {
- /* ring is still working, no lockup */
- radeon_ring_lockup_update(rdev, ring);
+ cjiffies = jiffies;
+ if (!time_after(cjiffies, ring->last_activity)) {
+ /* likely a wrap around */
+ radeon_ring_lockup_update(ring);
return false;
}
-
- elapsed = jiffies_to_msecs(jiffies_64 - last);
+ rptr = RREG32(ring->rptr_reg);
+ ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
+ if (ring->rptr != ring->last_rptr) {
+ /* CP is still working no lockup */
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
- dev_err(rdev->dev, "ring %d stalled for more than %llumsec\n",
- ring->idx, elapsed);
+ dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
/* give a chance to the GPU ... */
@@ -314,7 +623,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
}
/* and then save the content of the ring */
- *data = drm_malloc_ab(size, sizeof(uint32_t));
+ *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (!*data) {
mutex_unlock(&rdev->ring_lock);
return 0;
@@ -355,8 +664,8 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, data[i]);
}
- radeon_ring_unlock_commit(rdev, ring, false);
- drm_free_large(data);
+ radeon_ring_unlock_commit(rdev, ring);
+ kfree(data);
return 0;
}
@@ -367,23 +676,32 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
* @ring: radeon_ring structure holding ring information
* @ring_size: size of the ring
* @rptr_offs: offset of the rptr writeback location in the WB buffer
+ * @rptr_reg: MMIO offset of the rptr register
+ * @wptr_reg: MMIO offset of the wptr register
+ * @ptr_reg_shift: bit offset of the rptr/wptr values
+ * @ptr_reg_mask: bit mask of the rptr/wptr values
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
- unsigned rptr_offs, u32 nop)
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+ u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
{
int r;
ring->ring_size = ring_size;
ring->rptr_offs = rptr_offs;
+ ring->rptr_reg = rptr_reg;
+ ring->wptr_reg = wptr_reg;
+ ring->ptr_reg_shift = ptr_reg_shift;
+ ring->ptr_reg_mask = ptr_reg_mask;
ring->nop = nop;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, 0, NULL,
+ RADEON_GEM_DOMAIN_GTT,
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
@@ -417,7 +735,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
if (radeon_debugfs_ring_init(rdev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
- radeon_ring_lockup_update(rdev, ring);
+ radeon_ring_lockup_update(ring);
return 0;
}
@@ -464,74 +782,66 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
struct radeon_device *rdev = dev->dev_private;
int ridx = *(int*)node->info_ent->data;
struct radeon_ring *ring = &rdev->ring[ridx];
-
- uint32_t rptr, wptr, rptr_next;
unsigned count, i, j;
+ u32 tmp;
radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw;
-
- wptr = radeon_ring_get_wptr(rdev, ring);
- seq_printf(m, "wptr: 0x%08x [%5d]\n",
- wptr, wptr);
-
- rptr = radeon_ring_get_rptr(rdev, ring);
- seq_printf(m, "rptr: 0x%08x [%5d]\n",
- rptr, rptr);
-
+ tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
+ tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
+ seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
if (ring->rptr_save_reg) {
- rptr_next = RREG32(ring->rptr_save_reg);
- seq_printf(m, "rptr next(0x%04x): 0x%08x [%5d]\n",
- ring->rptr_save_reg, rptr_next, rptr_next);
- } else
- rptr_next = ~0;
-
- seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
- ring->wptr, ring->wptr);
- seq_printf(m, "last semaphore signal addr : 0x%016llx\n",
- ring->last_semaphore_signal_addr);
- seq_printf(m, "last semaphore wait addr : 0x%016llx\n",
- ring->last_semaphore_wait_addr);
+ seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
+ RREG32(ring->rptr_save_reg));
+ }
+ seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
+ seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
+ seq_printf(m, "last semaphore signal addr : 0x%016llx\n", ring->last_semaphore_signal_addr);
+ seq_printf(m, "last semaphore wait addr : 0x%016llx\n", ring->last_semaphore_wait_addr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
-
- if (!ring->ring)
- return 0;
-
/* print 8 dw before current rptr as often it's the last executed
* packet that is the root issue
*/
- i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
- for (j = 0; j <= (count + 32); j++) {
- seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
- if (rptr == i)
- seq_puts(m, " *");
- if (rptr_next == i)
- seq_puts(m, " #");
- seq_puts(m, "\n");
- i = (i + 1) & ring->ptr_mask;
+ i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
+ if (ring->ready) {
+ for (j = 0; j <= (count + 32); j++) {
+ seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
+ i = (i + 1) & ring->ptr_mask;
+ }
}
return 0;
}
-static int radeon_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
-static int cayman_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
-static int cayman_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
-static int radeon_dma1_index = R600_RING_TYPE_DMA_INDEX;
-static int radeon_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
-static int r600_uvd_index = R600_RING_TYPE_UVD_INDEX;
-static int si_vce1_index = TN_RING_TYPE_VCE1_INDEX;
-static int si_vce2_index = TN_RING_TYPE_VCE2_INDEX;
+static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
+static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
- {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_gfx_index},
- {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_cp1_index},
- {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_cp2_index},
- {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_dma1_index},
- {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_dma2_index},
- {"radeon_ring_uvd", radeon_debugfs_ring_info, 0, &r600_uvd_index},
- {"radeon_ring_vce1", radeon_debugfs_ring_info, 0, &si_vce1_index},
- {"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
+ {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
+ {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
+ {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
+ {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
+ {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
+};
+
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
+ return 0;
+
+}
+
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
};
#endif
@@ -555,3 +865,12 @@ static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ri
#endif
return 0;
}
+
+static int radeon_debugfs_sa_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/sys/dev/pci/drm/radeon/radeon_sa.c b/sys/dev/pci/drm/radeon/radeon_sa.c
index a9e3a2308c8..5eef79ecf47 100644
--- a/sys/dev/pci/drm/radeon/radeon_sa.c
+++ b/sys/dev/pci/drm/radeon/radeon_sa.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_sa.c,v 1.10 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2011 Red Hat Inc.
* All Rights Reserved.
@@ -49,7 +50,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain, u32 flags)
+ unsigned size, u32 align, u32 domain)
{
int i, r;
@@ -65,7 +66,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
}
r = radeon_bo_create(rdev, size, align, true,
- domain, flags, NULL, NULL, &sa_manager->bo);
+ domain, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
@@ -312,11 +313,11 @@ static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
- unsigned size, unsigned align)
+ unsigned size, unsigned align, bool block)
{
struct radeon_fence *fences[RADEON_NUM_RINGS];
unsigned tries[RADEON_NUM_RINGS];
- int i, r;
+ int i, r, error;
BUG_ON(align > sa_manager->align);
BUG_ON(size > sa_manager->size);
@@ -349,20 +350,24 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- radeon_fence_ref(fences[i]);
-
spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false);
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- radeon_fence_unref(&fences[i]);
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
- if (r == -ENOENT) {
- r = wait_event_interruptible_locked(
- sa_manager->wq,
- radeon_sa_event(sa_manager, size, align)
- );
+ if (r == -ENOENT && block) {
+ r = 0;
+ while (r == 0) {
+ if (radeon_sa_event(sa_manager, size, align))
+ break;
+ error = msleep(&sa_manager->wq, &sa_manager->wq.lock,
+ PZERO | PCATCH, "samgr", 0);
+ if (error == ERESTART)
+ error = EINTR; /* XXX */
+ r = -error;
+ }
+
+ } else if (r == -ENOENT) {
+ r = -ENOMEM;
}
} while (!r);
@@ -404,15 +409,13 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
- uint64_t soffset = i->soffset + sa_manager->gpu_addr;
- uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
} else {
seq_printf(m, " ");
}
- seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
- soffset, eoffset, eoffset - soffset);
+ seq_printf(m, "[0x%08x 0x%08x] size %8d",
+ i->soffset, i->eoffset, i->eoffset - i->soffset);
if (i->fence) {
seq_printf(m, " protected by 0x%016llx on ring %d",
i->fence->seq, i->fence->ring);
diff --git a/sys/dev/pci/drm/radeon/radeon_semaphore.c b/sys/dev/pci/drm/radeon/radeon_semaphore.c
index e6d2f7bdcb1..7b07f15bc8c 100644
--- a/sys/dev/pci/drm/radeon/radeon_semaphore.c
+++ b/sys/dev/pci/drm/radeon/radeon_semaphore.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_semaphore.c,v 1.5 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2011 Christian König.
* All Rights Reserved.
@@ -41,7 +42,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
return -ENOMEM;
}
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
- &(*semaphore)->sa_bo, 8, 8);
+ &(*semaphore)->sa_bo, 8, 8, true);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
@@ -49,44 +50,61 @@ int radeon_semaphore_create(struct radeon_device *rdev,
}
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
-
- *((uint64_t *)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
-
+ *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
return 0;
}
-bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
+ trace_radeon_semaphore_signale(ring, semaphore);
- trace_radeon_semaphore_signale(ridx, semaphore);
+ --semaphore->waiters;
+ radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+}
- if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
- --semaphore->waiters;
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore)
+{
+ trace_radeon_semaphore_wait(ring, semaphore);
- /* for debugging lockup only, used by sysfs debug files */
- ring->last_semaphore_signal_addr = semaphore->gpu_addr;
- return true;
- }
- return false;
+ ++semaphore->waiters;
+ radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
-bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
- struct radeon_semaphore *semaphore)
+/* caller must hold ring lock */
+int radeon_semaphore_sync_rings(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore,
+ int signaler, int waiter)
{
- struct radeon_ring *ring = &rdev->ring[ridx];
+ int r;
- trace_radeon_semaphore_wait(ridx, semaphore);
+ /* no need to signal and wait on the same ring */
+ if (signaler == waiter) {
+ return 0;
+ }
- if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
- ++semaphore->waiters;
+ /* prevent GPU deadlocks */
+ if (!rdev->ring[signaler].ready) {
+ dev_err(rdev->dev, "Trying to sync to a disabled ring!");
+ return -EINVAL;
+ }
- /* for debugging lockup only, used by sysfs debug files */
- ring->last_semaphore_wait_addr = semaphore->gpu_addr;
- return true;
+ r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
+ if (r) {
+ return r;
}
- return false;
+ radeon_semaphore_emit_signal(rdev, signaler, semaphore);
+ radeon_ring_commit(rdev, &rdev->ring[signaler]);
+
+ /* we assume caller has already allocated space on waiters ring */
+ radeon_semaphore_emit_wait(rdev, waiter, semaphore);
+
+ /* for debugging lockup only, used by sysfs debug files */
+ rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr;
+ rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr;
+
+ return 0;
}
void radeon_semaphore_free(struct radeon_device *rdev,
diff --git a/sys/dev/pci/drm/radeon/radeon_test.c b/sys/dev/pci/drm/radeon/radeon_test.c
index 4ab69640929..50a43b0d11f 100644
--- a/sys/dev/pci/drm/radeon/radeon_test.c
+++ b/sys/dev/pci/drm/radeon/radeon_test.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_test.c,v 1.6 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2009 VMware, Inc.
*
@@ -35,6 +36,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
{
struct radeon_bo *vram_obj = NULL;
struct radeon_bo **gtt_obj = NULL;
+ struct radeon_fence *fence = NULL;
uint64_t gtt_addr, vram_addr;
unsigned n, size;
int i, r, ring;
@@ -56,7 +58,13 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = rdev->mc.gtt_size - rdev->gart_pin_size;
+ n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ n -= rdev->ring[i].ring_size;
+ if (rdev->wb.wb_obj)
+ n -= RADEON_GPU_PAGE_SIZE;
+ if (rdev->ih.ring_obj)
+ n -= rdev->ih.ring_size;
n /= size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
@@ -67,46 +75,44 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- 0, NULL, NULL, &vram_obj);
+ NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
}
r = radeon_bo_reserve(vram_obj, false);
if (unlikely(r != 0))
- goto out_unref;
+ goto out_cleanup;
r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
if (r) {
DRM_ERROR("Failed to pin VRAM object\n");
- goto out_unres;
+ goto out_cleanup;
}
for (i = 0; i < n; i++) {
void *gtt_map, *vram_map;
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- struct radeon_fence *fence = NULL;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
- gtt_obj + i);
+ RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
- goto out_lclean;
+ goto out_cleanup;
}
r = radeon_bo_reserve(gtt_obj[i], false);
if (unlikely(r != 0))
- goto out_lclean_unref;
+ goto out_cleanup;
r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
if (r) {
DRM_ERROR("Failed to pin GTT object %d\n", i);
- goto out_lclean_unres;
+ goto out_cleanup;
}
r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
DRM_ERROR("Failed to map GTT object %d\n", i);
- goto out_lclean_unpin;
+ goto out_cleanup;
}
for (gtt_start = gtt_map, gtt_end = gtt_map + size;
@@ -117,23 +123,18 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
radeon_bo_kunmap(gtt_obj[i]);
if (ring == R600_RING_TYPE_DMA_INDEX)
- fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
- size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
else
- fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
- size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
- if (IS_ERR(fence)) {
+ r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
- r = PTR_ERR(fence);
- goto out_lclean_unpin;
+ goto out_cleanup;
}
r = radeon_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
- goto out_lclean_unpin;
+ goto out_cleanup;
}
radeon_fence_unref(&fence);
@@ -141,7 +142,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
r = radeon_bo_kmap(vram_obj, &vram_map);
if (r) {
DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
- goto out_lclean_unpin;
+ goto out_cleanup;
}
for (gtt_start = gtt_map, gtt_end = gtt_map + size,
@@ -160,7 +161,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
(vram_addr - rdev->mc.vram_start +
(void*)gtt_start - gtt_map));
radeon_bo_kunmap(vram_obj);
- goto out_lclean_unpin;
+ goto out_cleanup;
}
*vram_start = vram_start;
}
@@ -168,23 +169,18 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
radeon_bo_kunmap(vram_obj);
if (ring == R600_RING_TYPE_DMA_INDEX)
- fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
- size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
else
- fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
- size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
- if (IS_ERR(fence)) {
+ r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
+ if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
- r = PTR_ERR(fence);
- goto out_lclean_unpin;
+ goto out_cleanup;
}
r = radeon_fence_wait(fence, false);
if (r) {
DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
- goto out_lclean_unpin;
+ goto out_cleanup;
}
radeon_fence_unref(&fence);
@@ -192,7 +188,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
if (r) {
DRM_ERROR("Failed to map GTT object after copy %d\n", i);
- goto out_lclean_unpin;
+ goto out_cleanup;
}
for (gtt_start = gtt_map, gtt_end = gtt_map + size,
@@ -211,7 +207,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
(gtt_addr - rdev->mc.gtt_start +
(void*)vram_start - vram_map));
radeon_bo_kunmap(gtt_obj[i]);
- goto out_lclean_unpin;
+ goto out_cleanup;
}
}
@@ -219,32 +215,31 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
gtt_addr - rdev->mc.gtt_start);
- continue;
-
-out_lclean_unpin:
- radeon_bo_unpin(gtt_obj[i]);
-out_lclean_unres:
- radeon_bo_unreserve(gtt_obj[i]);
-out_lclean_unref:
- radeon_bo_unref(&gtt_obj[i]);
-out_lclean:
- for (--i; i >= 0; --i) {
- radeon_bo_unpin(gtt_obj[i]);
- radeon_bo_unreserve(gtt_obj[i]);
- radeon_bo_unref(&gtt_obj[i]);
- }
- if (fence && !IS_ERR(fence))
- radeon_fence_unref(&fence);
- break;
}
- radeon_bo_unpin(vram_obj);
-out_unres:
- radeon_bo_unreserve(vram_obj);
-out_unref:
- radeon_bo_unref(&vram_obj);
out_cleanup:
- kfree(gtt_obj);
+ if (vram_obj) {
+ if (radeon_bo_is_reserved(vram_obj)) {
+ radeon_bo_unpin(vram_obj);
+ radeon_bo_unreserve(vram_obj);
+ }
+ radeon_bo_unref(&vram_obj);
+ }
+ if (gtt_obj) {
+ for (i = 0; i < n; i++) {
+ if (gtt_obj[i]) {
+ if (radeon_bo_is_reserved(gtt_obj[i])) {
+ radeon_bo_unpin(gtt_obj[i]);
+ radeon_bo_unreserve(gtt_obj[i]);
+ }
+ radeon_bo_unref(&gtt_obj[i]);
+ }
+ }
+ kfree(gtt_obj);
+ }
+ if (fence) {
+ radeon_fence_unref(&fence);
+ }
if (r) {
printk(KERN_WARNING "Error while testing BO move.\n");
}
@@ -258,52 +253,6 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
}
-static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_fence **fence)
-{
- uint32_t handle = ring->idx ^ 0xdeafbeef;
- int r;
-
- if (ring->idx == R600_RING_TYPE_UVD_INDEX) {
- r = radeon_uvd_get_create_msg(rdev, ring->idx, handle, NULL);
- if (r) {
- DRM_ERROR("Failed to get dummy create msg\n");
- return r;
- }
-
- r = radeon_uvd_get_destroy_msg(rdev, ring->idx, handle, fence);
- if (r) {
- DRM_ERROR("Failed to get dummy destroy msg\n");
- return r;
- }
-
- } else if (ring->idx == TN_RING_TYPE_VCE1_INDEX ||
- ring->idx == TN_RING_TYPE_VCE2_INDEX) {
- r = radeon_vce_get_create_msg(rdev, ring->idx, handle, NULL);
- if (r) {
- DRM_ERROR("Failed to get dummy create msg\n");
- return r;
- }
-
- r = radeon_vce_get_destroy_msg(rdev, ring->idx, handle, fence);
- if (r) {
- DRM_ERROR("Failed to get dummy destroy msg\n");
- return r;
- }
-
- } else {
- r = radeon_ring_lock(rdev, ring, 64);
- if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ring->idx);
- return r;
- }
- radeon_fence_emit(rdev, fence, ring->idx);
- radeon_ring_unlock_commit(rdev, ring, false);
- }
- return 0;
-}
-
void radeon_test_ring_sync(struct radeon_device *rdev,
struct radeon_ring *ringA,
struct radeon_ring *ringB)
@@ -324,23 +273,20 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringA, false);
-
- r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);
- if (r)
- goto out_cleanup;
-
- r = radeon_ring_lock(rdev, ringA, 64);
+ r = radeon_fence_emit(rdev, &fence1, ringA->idx);
if (r) {
- DRM_ERROR("Failed to lock ring A %d\n", ringA->idx);
+ DRM_ERROR("Failed to emit fence 1\n");
+ radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringA, false);
-
- r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);
- if (r)
+ r = radeon_fence_emit(rdev, &fence2, ringA->idx);
+ if (r) {
+ DRM_ERROR("Failed to emit fence 2\n");
+ radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
+ }
+ radeon_ring_unlock_commit(rdev, ringA);
mdelay(1000);
@@ -355,7 +301,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringB, false);
+ radeon_ring_unlock_commit(rdev, ringB);
r = radeon_fence_wait(fence1, false);
if (r) {
@@ -376,7 +322,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringB, false);
+ radeon_ring_unlock_commit(rdev, ringB);
r = radeon_fence_wait(fence2, false);
if (r) {
@@ -419,11 +365,13 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringA, false);
-
- r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);
- if (r)
+ r = radeon_fence_emit(rdev, &fenceA, ringA->idx);
+ if (r) {
+ DRM_ERROR("Failed to emit sync fence 1\n");
+ radeon_ring_unlock_undo(rdev, ringA);
goto out_cleanup;
+ }
+ radeon_ring_unlock_commit(rdev, ringA);
r = radeon_ring_lock(rdev, ringB, 64);
if (r) {
@@ -431,10 +379,13 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringB, false);
- r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);
- if (r)
+ r = radeon_fence_emit(rdev, &fenceB, ringB->idx);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 2\n");
+ radeon_ring_unlock_undo(rdev, ringB);
goto out_cleanup;
+ }
+ radeon_ring_unlock_commit(rdev, ringB);
mdelay(1000);
@@ -443,7 +394,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
if (radeon_fence_signaled(fenceB)) {
- DRM_ERROR("Fence B signaled without waiting for semaphore.\n");
+ DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
goto out_cleanup;
}
@@ -453,7 +404,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringC, false);
+ radeon_ring_unlock_commit(rdev, ringC);
for (i = 0; i < 30; ++i) {
mdelay(100);
@@ -479,7 +430,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
- radeon_ring_unlock_commit(rdev, ringC, false);
+ radeon_ring_unlock_commit(rdev, ringC);
mdelay(1000);
@@ -507,16 +458,6 @@ out_cleanup:
printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
}
-static bool radeon_test_sync_possible(struct radeon_ring *ringA,
- struct radeon_ring *ringB)
-{
- if (ringA->idx == TN_RING_TYPE_VCE2_INDEX &&
- ringB->idx == TN_RING_TYPE_VCE1_INDEX)
- return false;
-
- return true;
-}
-
void radeon_test_syncing(struct radeon_device *rdev)
{
int i, j, k;
@@ -531,9 +472,6 @@ void radeon_test_syncing(struct radeon_device *rdev)
if (!ringB->ready)
continue;
- if (!radeon_test_sync_possible(ringA, ringB))
- continue;
-
DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
radeon_test_ring_sync(rdev, ringA, ringB);
@@ -545,12 +483,6 @@ void radeon_test_syncing(struct radeon_device *rdev)
if (!ringC->ready)
continue;
- if (!radeon_test_sync_possible(ringA, ringC))
- continue;
-
- if (!radeon_test_sync_possible(ringB, ringC))
- continue;
-
DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
diff --git a/sys/dev/pci/drm/radeon/radeon_trace.h b/sys/dev/pci/drm/radeon/radeon_trace.h
index 5bb2cd121c4..4377842f744 100644
--- a/sys/dev/pci/drm/radeon/radeon_trace.h
+++ b/sys/dev/pci/drm/radeon/radeon_trace.h
@@ -1,3 +1,5 @@
+/* $OpenBSD: radeon_trace.h,v 1.4 2018/04/20 16:09:37 deraadt Exp $ */
+
#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _RADEON_TRACE_H_
@@ -5,6 +7,7 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM radeon
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
#define TRACE_INCLUDE_FILE radeon_trace
TRACE_EVENT(radeon_bo_create,
@@ -22,142 +25,51 @@ TRACE_EVENT(radeon_bo_create,
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
);
-TRACE_EVENT(radeon_cs,
- TP_PROTO(struct radeon_cs_parser *p),
- TP_ARGS(p),
- TP_STRUCT__entry(
- __field(u32, ring)
- __field(u32, dw)
- __field(u32, fences)
- ),
-
- TP_fast_assign(
- __entry->ring = p->ring;
- __entry->dw = p->chunk_ib->length_dw;
- __entry->fences = radeon_fence_count_emitted(
- p->rdev, p->ring);
- ),
- TP_printk("ring=%u, dw=%u, fences=%u",
- __entry->ring, __entry->dw,
- __entry->fences)
-);
-
-TRACE_EVENT(radeon_vm_grab_id,
- TP_PROTO(unsigned vmid, int ring),
- TP_ARGS(vmid, ring),
- TP_STRUCT__entry(
- __field(u32, vmid)
- __field(u32, ring)
- ),
-
- TP_fast_assign(
- __entry->vmid = vmid;
- __entry->ring = ring;
- ),
- TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
-);
-
-TRACE_EVENT(radeon_vm_bo_update,
- TP_PROTO(struct radeon_bo_va *bo_va),
- TP_ARGS(bo_va),
- TP_STRUCT__entry(
- __field(u64, soffset)
- __field(u64, eoffset)
- __field(u32, flags)
- ),
-
- TP_fast_assign(
- __entry->soffset = bo_va->it.start;
- __entry->eoffset = bo_va->it.last + 1;
- __entry->flags = bo_va->flags;
- ),
- TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
- __entry->soffset, __entry->eoffset, __entry->flags)
-);
-
-TRACE_EVENT(radeon_vm_set_page,
- TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags),
- TP_ARGS(pe, addr, count, incr, flags),
- TP_STRUCT__entry(
- __field(u64, pe)
- __field(u64, addr)
- __field(u32, count)
- __field(u32, incr)
- __field(u32, flags)
- ),
-
- TP_fast_assign(
- __entry->pe = pe;
- __entry->addr = addr;
- __entry->count = count;
- __entry->incr = incr;
- __entry->flags = flags;
- ),
- TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
- __entry->pe, __entry->addr, __entry->incr,
- __entry->flags, __entry->count)
-);
-
-TRACE_EVENT(radeon_vm_flush,
- TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
- TP_ARGS(pd_addr, ring, id),
- TP_STRUCT__entry(
- __field(u64, pd_addr)
- __field(u32, ring)
- __field(u32, id)
- ),
-
- TP_fast_assign(
- __entry->pd_addr = pd_addr;
- __entry->ring = ring;
- __entry->id = id;
- ),
- TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
- __entry->pd_addr, __entry->ring, __entry->id)
-);
-
DECLARE_EVENT_CLASS(radeon_fence_request,
- TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+ TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, ring, seqno),
+ TP_ARGS(dev, seqno),
TP_STRUCT__entry(
__field(u32, dev)
- __field(int, ring)
__field(u32, seqno)
),
TP_fast_assign(
__entry->dev = dev->primary->index;
- __entry->ring = ring;
__entry->seqno = seqno;
),
- TP_printk("dev=%u, ring=%d, seqno=%u",
- __entry->dev, __entry->ring, __entry->seqno)
+ TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
- TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+ TP_PROTO(struct drm_device *dev, u32 seqno),
+
+ TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
+
+ TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, ring, seqno)
+ TP_ARGS(dev, seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
- TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+ TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, ring, seqno)
+ TP_ARGS(dev, seqno)
);
DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
- TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
+ TP_PROTO(struct drm_device *dev, u32 seqno),
- TP_ARGS(dev, ring, seqno)
+ TP_ARGS(dev, seqno)
);
DECLARE_EVENT_CLASS(radeon_semaphore_request,
diff --git a/sys/dev/pci/drm/radeon/radeon_trace_points.c b/sys/dev/pci/drm/radeon/radeon_trace_points.c
index 440e092f4a6..4bef278ba12 100644
--- a/sys/dev/pci/drm/radeon/radeon_trace_points.c
+++ b/sys/dev/pci/drm/radeon/radeon_trace_points.c
@@ -1,3 +1,5 @@
+/* $OpenBSD: radeon_trace_points.c,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
+
/* Copyright Red Hat Inc 2010.
* Author : Dave Airlie <airlied@redhat.com>
*/
diff --git a/sys/dev/pci/drm/radeon/radeon_ttm.c b/sys/dev/pci/drm/radeon/radeon_ttm.c
index 0ce9fed2931..e06d4dc256b 100644
--- a/sys/dev/pci/drm/radeon/radeon_ttm.c
+++ b/sys/dev/pci/drm/radeon/radeon_ttm.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_ttm.c,v 1.14 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2009 Jerome Glisse.
* All Rights Reserved.
@@ -29,12 +30,12 @@
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
*/
-#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/ttm/ttm_bo_api.h>
#include <dev/pci/drm/ttm/ttm_bo_driver.h>
#include <dev/pci/drm/ttm/ttm_placement.h>
#include <dev/pci/drm/ttm/ttm_module.h>
#include <dev/pci/drm/ttm/ttm_page_alloc.h>
+#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
@@ -42,7 +43,6 @@
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
-static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
{
@@ -140,11 +140,13 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
#if __OS_HAS_AGP
if (rdev->flags & RADEON_IS_AGP) {
- if (!rdev->ddev->agp) {
+#ifdef notyet
+ if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
return -EINVAL;
}
+#endif
if (!rdev->ddev->agp->cant_use_aperture)
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
@@ -172,15 +174,12 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
static void radeon_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
- static struct ttm_place placements = {
- .fpfn = 0,
- .lpfn = 0,
- .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
- };
-
struct radeon_bo *rbo;
+ static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
if (!radeon_ttm_bo_is_radeon_bo(bo)) {
+ placement->fpfn = 0;
+ placement->lpfn = 0;
placement->placement = &placements;
placement->busy_placement = &placements;
placement->num_placement = 1;
@@ -190,32 +189,9 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
+ if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
- else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
- bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
- unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
- int i;
-
- /* Try evicting to the CPU inaccessible part of VRAM
- * first, but only set GTT as busy placement, so this
- * BO will be evicted to GTT rather than causing other
- * BOs to be evicted from VRAM
- */
- radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
- RADEON_GEM_DOMAIN_GTT);
- rbo->placement.num_busy_placement = 0;
- for (i = 0; i < rbo->placement.num_placement; i++) {
- if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
- if (rbo->placements[i].fpfn < fpfn)
- rbo->placements[i].fpfn = fpfn;
- } else {
- rbo->placement.busy_placement =
- &rbo->placements[i];
- rbo->placement.num_busy_placement = 1;
- }
- }
- } else
+ else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
break;
case TTM_PL_TT:
@@ -227,11 +203,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
-
- if (radeon_ttm_tt_has_userptr(bo->ttm))
- return -EPERM;
- return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+ return 0;
}
static void radeon_move_null(struct ttm_buffer_object *bo,
@@ -252,13 +224,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence;
- unsigned num_pages;
int r, ridx;
rdev = radeon_get_rdev(bo->bdev);
ridx = radeon_copy_ring_index(rdev);
- old_start = (u64)old_mem->start << PAGE_SHIFT;
- new_start = (u64)new_mem->start << PAGE_SHIFT;
+ old_start = old_mem->start << PAGE_SHIFT;
+ new_start = new_mem->start << PAGE_SHIFT;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
@@ -289,12 +260,13 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
- num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
- fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
- if (IS_ERR(fence))
- return PTR_ERR(fence);
-
- r = ttm_bo_move_accel_cleanup(bo, &fence->base,
+ /* sync other rings */
+ fence = bo->sync_obj;
+ r = radeon_copy(rdev, old_start, new_start,
+ new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
+ &fence);
+ /* FIXME: handle copy error */
+ r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
evict, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
@@ -308,20 +280,20 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
- struct ttm_place placements;
+ u32 placements;
struct ttm_placement placement;
int r;
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
+ placement.fpfn = 0;
+ placement.lpfn = 0;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = 0;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
if (unlikely(r)) {
@@ -356,19 +328,19 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
- struct ttm_place placements;
+ u32 placements;
int r;
rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
+ placement.fpfn = 0;
+ placement.lpfn = 0;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = 0;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait_gpu);
if (unlikely(r)) {
@@ -430,14 +402,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
- if (r) {
- return r;
- }
}
-
- /* update statistics */
- atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
- return 0;
+ return r;
}
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -507,143 +473,55 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
{
}
-/*
- * TTM backend functions.
- */
-struct radeon_ttm_tt {
- struct ttm_dma_tt ttm;
- struct radeon_device *rdev;
- u64 offset;
-
- uint64_t userptr;
- struct mm_struct *usermm;
- uint32_t userflags;
-
- bus_dmamap_t map;
- bus_dma_segment_t *segs;
-};
-
-/* prepare the sg table with the user pages */
-static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
{
- STUB();
- return -ENOSYS;
-#ifdef notyet
- struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
- struct radeon_ttm_tt *gtt = (void *)ttm;
- unsigned pinned = 0, nents;
- int r;
-
- int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
- enum dma_data_direction direction = write ?
- DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
-
- if (current->mm != gtt->usermm)
- return -EPERM;
-
- if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
- /* check that we only pin down anonymous memory
- to prevent problems with writeback */
- unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
- struct vm_area_struct *vma;
- vma = find_vma(gtt->usermm, gtt->userptr);
- if (!vma || vma->vm_file || vma->vm_end < end)
- return -EPERM;
- }
-
- do {
- unsigned num_pages = ttm->num_pages - pinned;
- uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
- struct vm_page **pages = ttm->pages + pinned;
-
- r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, pages, NULL);
- if (r < 0)
- goto release_pages;
-
- pinned += r;
-
- } while (pinned < ttm->num_pages);
-
- r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
- ttm->num_pages << PAGE_SHIFT,
- GFP_KERNEL);
- if (r)
- goto release_sg;
-
- r = -ENOMEM;
- nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
- if (nents != ttm->sg->nents)
- goto release_sg;
-
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
+}
+static int radeon_sync_obj_flush(void *sync_obj)
+{
return 0;
-
-release_sg:
- kfree(ttm->sg);
-
-release_pages:
- release_pages(ttm->pages, pinned, 0);
- return r;
-#endif
}
-static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+static void radeon_sync_obj_unref(void **sync_obj)
{
- STUB();
-#ifdef notyet
- struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
- struct radeon_ttm_tt *gtt = (void *)ttm;
- struct sg_page_iter sg_iter;
-
- int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
- enum dma_data_direction direction = write ?
- DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
-
- /* double check that we don't free the table twice */
- if (!ttm->sg->sgl)
- return;
-
- /* free the sg table and pages again */
- dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
-
- for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
- struct vm_page *page = sg_page_iter_page(&sg_iter);
- if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
- set_page_dirty(page);
+ radeon_fence_unref((struct radeon_fence **)sync_obj);
+}
- mark_page_accessed(page);
- page_cache_release(page);
- }
+static void *radeon_sync_obj_ref(void *sync_obj)
+{
+ return radeon_fence_ref((struct radeon_fence *)sync_obj);
+}
- sg_free_table(ttm->sg);
-#endif
+static bool radeon_sync_obj_signaled(void *sync_obj)
+{
+ return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+ struct ttm_dma_tt ttm;
+ struct radeon_device *rdev;
+ bus_dmamap_t map;
+ bus_dma_segment_t *segs;
+ u64 offset;
+};
+
static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct radeon_ttm_tt *gtt = (void*)ttm;
- uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
- RADEON_GART_PAGE_WRITE;
int r;
- if (gtt->userptr) {
- radeon_ttm_tt_pin_userptr(ttm);
- flags &= ~RADEON_GART_PAGE_WRITE;
- }
-
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
- if (ttm->caching_state == tt_cached)
- flags |= RADEON_GART_PAGE_SNOOP;
- r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
+ r = radeon_gart_bind(gtt->rdev, gtt->offset,
+ ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
@@ -657,10 +535,6 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
struct radeon_ttm_tt *gtt = (void *)ttm;
radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
-
- if (gtt->userptr)
- radeon_ttm_tt_unpin_userptr(ttm);
-
return 0;
}
@@ -720,35 +594,17 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
return &gtt->ttm.ttm;
}
-static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
-{
- if (!ttm || ttm->func != &radeon_backend_func)
- return NULL;
- return (struct radeon_ttm_tt *)ttm;
-}
-
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
- struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
- int r;
- int seg;
+ int r, seg;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
return 0;
- if (gtt && gtt->userptr) {
- ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!ttm->sg)
- return -ENOMEM;
-
- ttm->page_flags |= TTM_PAGE_FLAG_SG;
- ttm->state = tt_unbound;
- return 0;
- }
-
if (slave && ttm->sg) {
#ifdef notyet
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
@@ -776,22 +632,6 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
return r;
}
-#ifdef __linux__
- for (i = 0; i < ttm->num_pages; i++) {
- gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
- while (i--) {
- pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- gtt->ttm.dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
- }
-#else
for (i = 0; i < ttm->num_pages; i++) {
gtt->segs[i].ds_addr = VM_PAGE_TO_PHYS(ttm->pages[i]);
gtt->segs[i].ds_len = PAGE_SIZE;
@@ -803,7 +643,6 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
ttm_pool_unpopulate(ttm);
return -EFAULT;
}
-#endif
for (seg = 0, i = 0; seg < gtt->map->dm_nsegs; seg++) {
bus_addr_t addr = gtt->map->dm_segs[seg].ds_addr;
@@ -822,16 +661,10 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
- struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
+ struct radeon_ttm_tt *gtt = (void *)ttm;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
- if (gtt && gtt->userptr) {
- kfree(ttm->sg);
- ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
- return;
- }
-
if (slave)
return;
@@ -850,61 +683,13 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
-#ifdef __linux__
- for (i = 0; i < ttm->num_pages; i++) {
- if (gtt->ttm.dma_address[i]) {
- pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-#else
bus_dmamap_unload(rdev->dmat, gtt->map);
for (i = 0; i < ttm->num_pages; i++)
gtt->ttm.dma_address[i] = 0;
-#endif
-
ttm_pool_unpopulate(ttm);
}
-int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
- uint32_t flags)
-{
- STUB();
- return -ENOSYS;
-#ifdef notyet
- struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
-
- if (gtt == NULL)
- return -EINVAL;
-
- gtt->userptr = addr;
- gtt->usermm = current->mm;
- gtt->userflags = flags;
- return 0;
-#endif
-}
-
-bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
-{
- struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
-
- if (gtt == NULL)
- return false;
-
- return !!gtt->userptr;
-}
-
-bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
-{
- struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
-
- if (gtt == NULL)
- return false;
-
- return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
-}
-
static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
@@ -914,6 +699,11 @@ static struct ttm_bo_driver radeon_bo_driver = {
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
+ .sync_obj_signaled = &radeon_sync_obj_signaled,
+ .sync_obj_wait = &radeon_sync_obj_wait,
+ .sync_obj_flush = &radeon_sync_obj_flush,
+ .sync_obj_unref = &radeon_sync_obj_unref,
+ .sync_obj_ref = &radeon_sync_obj_ref,
.move_notify = &radeon_bo_move_notify,
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
@@ -929,21 +719,10 @@ int radeon_ttm_init(struct radeon_device *rdev)
return r;
}
/* No others user of address space so set it to 0 */
-#ifdef notyet
r = ttm_bo_device_init(&rdev->mman.bdev,
rdev->mman.bo_global_ref.ref.object,
- &radeon_bo_driver,
- rdev->ddev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
+ &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
rdev->need_dma32);
-#else
- r = ttm_bo_device_init(&rdev->mman.bdev,
- rdev->mman.bo_global_ref.ref.object,
- &radeon_bo_driver,
- /*rdev->ddev->anon_inode->i_mapping*/ NULL,
- DRM_FILE_PAGE_OFFSET,
- rdev->need_dma32);
-#endif
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
@@ -963,11 +742,11 @@ int radeon_ttm_init(struct radeon_device *rdev)
#ifdef __sparc64__
r = radeon_bo_create(rdev, rdev->fb_offset, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->stollen_vga_memory);
#else
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->stollen_vga_memory);
#endif
if (r) {
@@ -992,6 +771,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
DRM_INFO("radeon: %uM of GTT memory ready.\n",
(unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
+#ifdef notyet
+ rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
+#endif
r = radeon_ttm_debugfs_init(rdev);
if (r) {
@@ -1007,7 +789,6 @@ void radeon_ttm_fini(struct radeon_device *rdev)
if (!rdev->mman.initialized)
return;
- radeon_ttm_debugfs_fini(rdev);
if (rdev->stollen_vga_memory) {
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
if (r == 0) {
@@ -1039,56 +820,6 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
man->size = size >> PAGE_SHIFT;
}
-#ifdef __linux__
-static struct vm_operations_struct radeon_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops = NULL;
-
-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo;
- struct radeon_device *rdev;
- int r;
-
- bo = (struct ttm_buffer_object *)vma->vm_private_data;
- if (bo == NULL) {
- return VM_FAULT_NOPAGE;
- }
- rdev = radeon_get_rdev(bo->bdev);
- down_read(&rdev->pm.mclk_lock);
- r = ttm_vm_ops->fault(vma, vmf);
- up_read(&rdev->pm.mclk_lock);
- return r;
-}
-
-int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv;
- struct radeon_device *rdev;
- int r;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
- return -EINVAL;
- }
-
- file_priv = filp->private_data;
- rdev = file_priv->minor->dev->dev_private;
- if (rdev == NULL) {
- return -EINVAL;
- }
- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
- if (unlikely(r != 0)) {
- return r;
- }
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- radeon_ttm_vm_ops = *ttm_vm_ops;
- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
- }
- vma->vm_ops = &radeon_ttm_vm_ops;
- return 0;
-}
-#else
-
static struct uvm_pagerops radeon_ttm_vm_ops;
static const struct uvm_pagerops *ttm_vm_ops = NULL;
@@ -1138,17 +869,17 @@ radeon_mmap(struct drm_device *dev, voff_t off, vsize_t size)
uobj->pgops = &radeon_ttm_vm_ops;
return uobj;
}
-#endif
-#if defined(CONFIG_DEBUG_FS)
+#define RADEON_DEBUGFS_MEM_TYPES 2
+
+#if defined(CONFIG_DEBUG_FS)
static int radeon_mm_dump_table(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
- unsigned ttm_pl = *(int *)node->info_ent->data;
+ struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
int ret;
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -1157,169 +888,46 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
spin_unlock(&glob->lru_lock);
return ret;
}
-
-static int ttm_pl_vram = TTM_PL_VRAM;
-static int ttm_pl_tt = TTM_PL_TT;
-
-static struct drm_info_list radeon_ttm_debugfs_list[] = {
- {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
- {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
- {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
-#ifdef CONFIG_SWIOTLB
- {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
-#endif
-};
-
-static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
-{
- struct radeon_device *rdev = inode->i_private;
- i_size_write(inode, rdev->mc.mc_vram_size);
- filep->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct radeon_device *rdev = f->private_data;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- unsigned long flags;
- uint32_t value;
-
- if (*pos >= rdev->mc.mc_vram_size)
- return result;
-
- spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
- WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
- if (rdev->family >= CHIP_CEDAR)
- WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
- value = RREG32(RADEON_MM_DATA);
- spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
-
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
-
- return result;
-}
-
-static const struct file_operations radeon_ttm_vram_fops = {
- .owner = THIS_MODULE,
- .open = radeon_ttm_vram_open,
- .read = radeon_ttm_vram_read,
- .llseek = default_llseek
-};
-
-static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
-{
- struct radeon_device *rdev = inode->i_private;
- i_size_write(inode, rdev->mc.gtt_size);
- filep->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct radeon_device *rdev = f->private_data;
- ssize_t result = 0;
- int r;
-
- while (size) {
- loff_t p = *pos / PAGE_SIZE;
- unsigned off = *pos & PAGE_MASK;
- size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
- struct vm_page *page;
- void *ptr;
-
- if (p >= rdev->gart.num_cpu_pages)
- return result;
-
- page = rdev->gart.pages[p];
- if (page) {
- ptr = kmap(page);
- ptr += off;
-
- r = copy_to_user(buf, ptr, cur_size);
- kunmap(rdev->gart.pages[p]);
- } else
- r = clear_user(buf, cur_size);
-
- if (r)
- return -EFAULT;
-
- result += cur_size;
- buf += cur_size;
- *pos += cur_size;
- size -= cur_size;
- }
-
- return result;
-}
-
-static const struct file_operations radeon_ttm_gtt_fops = {
- .owner = THIS_MODULE,
- .open = radeon_ttm_gtt_open,
- .read = radeon_ttm_gtt_read,
- .llseek = default_llseek
-};
-
#endif
static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned count;
-
- struct drm_minor *minor = rdev->ddev->primary;
- struct dentry *ent, *root = minor->debugfs_root;
-
- ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
- rdev, &radeon_ttm_vram_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- rdev->mman.vram = ent;
-
- ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root,
- rdev, &radeon_ttm_gtt_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- rdev->mman.gtt = ent;
+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
+ unsigned i;
- count = ARRAY_SIZE(radeon_ttm_debugfs_list);
+ for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
+ if (i == 0)
+ sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
+ else
+ sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &radeon_mm_dump_table;
+ radeon_mem_types_list[i].driver_features = 0;
+ if (i == 0)
+ radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+ else
+ radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
+ }
+ /* Add ttm page pool to debugfs */
+ sprintf(radeon_mem_types_names[i], "ttm_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i++].data = NULL;
#ifdef CONFIG_SWIOTLB
- if (!swiotlb_nr_tbl())
- --count;
-#endif
-
- return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
-#else
-
- return 0;
+ if (swiotlb_nr_tbl()) {
+ sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i++].data = NULL;
+ }
#endif
-}
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
-static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
-{
-#if defined(CONFIG_DEBUG_FS)
-
- debugfs_remove(rdev->mman.vram);
- rdev->mman.vram = NULL;
-
- debugfs_remove(rdev->mman.gtt);
- rdev->mman.gtt = NULL;
#endif
+ return 0;
}
diff --git a/sys/dev/pci/drm/radeon/reg_srcs/cayman b/sys/dev/pci/drm/radeon/reg_srcs/cayman
index d46b58d078a..af7a941adf0 100644
--- a/sys/dev/pci/drm/radeon/reg_srcs/cayman
+++ b/sys/dev/pci/drm/radeon/reg_srcs/cayman
@@ -21,7 +21,7 @@ cayman 0x9400
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x000089B0 VGT_HS_OFFCHIP_PARAM
0x00008A14 PA_CL_ENHANCE
-0x00008A60 PA_SU_LINE_STIPPLE_VALUE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
diff --git a/sys/dev/pci/drm/radeon/reg_srcs/evergreen b/sys/dev/pci/drm/radeon/reg_srcs/evergreen
index 57745c8761c..e19ef0e6ff2 100644
--- a/sys/dev/pci/drm/radeon/reg_srcs/evergreen
+++ b/sys/dev/pci/drm/radeon/reg_srcs/evergreen
@@ -22,7 +22,7 @@ evergreen 0x9400
0x000089A4 VGT_COMPUTE_START_Z
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x00008A14 PA_CL_ENHANCE
-0x00008A60 PA_SU_LINE_STIPPLE_VALUE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
diff --git a/sys/dev/pci/drm/radeon/rs100d.h b/sys/dev/pci/drm/radeon/rs100d.h
index 48a913a06cf..3d3a29de44f 100644
--- a/sys/dev/pci/drm/radeon/rs100d.h
+++ b/sys/dev/pci/drm/radeon/rs100d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: rs100d.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
diff --git a/sys/dev/pci/drm/radeon/rs400.c b/sys/dev/pci/drm/radeon/rs400.c
index 6e2eb342d07..1a32211497b 100644
--- a/sys/dev/pci/drm/radeon/rs400.c
+++ b/sys/dev/pci/drm/radeon/rs400.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: rs400.c,v 1.9 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -107,6 +108,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
uint32_t size_reg;
uint32_t tmp;
+ radeon_gart_restore(rdev);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
@@ -206,30 +208,24 @@ void rs400_gart_fini(struct radeon_device *rdev)
radeon_gart_table_ram_free(rdev);
}
-#define RS400_PTE_UNSNOOPED (1 << 0)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
-uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
+int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
uint32_t entry;
+ u32 *gtt = rdev->gart.ptr;
- entry = (lower_32_bits(addr) & ~PAGE_MASK) |
- ((upper_32_bits(addr) & 0xff) << 4);
- if (flags & RADEON_GART_PAGE_READ)
- entry |= RS400_PTE_READABLE;
- if (flags & RADEON_GART_PAGE_WRITE)
- entry |= RS400_PTE_WRITEABLE;
- if (!(flags & RADEON_GART_PAGE_SNOOP))
- entry |= RS400_PTE_UNSNOOPED;
- return entry;
-}
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t entry)
-{
- u32 *gtt = rdev->gart.ptr;
- gtt[i] = cpu_to_le32(lower_32_bits(entry));
+ entry = (lower_32_bits(addr) & ~PAGE_MASK) |
+ ((upper_32_bits(addr) & 0xff) << 4) |
+ RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
+ entry = cpu_to_le32(entry);
+ gtt[i] = entry;
+ return 0;
}
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
@@ -277,26 +273,19 @@ static void rs400_mc_init(struct radeon_device *rdev)
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
- unsigned long flags;
uint32_t r;
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, reg & 0xff);
r = RREG32(RS480_NB_MC_DATA);
WREG32(RS480_NB_MC_INDEX, 0xff);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
WREG32(RS480_NB_MC_DATA, (v));
WREG32(RS480_NB_MC_INDEX, 0xff);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
#if defined(CONFIG_DEBUG_FS)
@@ -487,7 +476,6 @@ int rs400_resume(struct radeon_device *rdev)
int rs400_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -497,7 +485,6 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -565,9 +552,6 @@ int rs400_init(struct radeon_device *rdev)
return r;
r300_set_reg_safe(rdev);
- /* Initialize power management */
- radeon_pm_init(rdev);
-
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
diff --git a/sys/dev/pci/drm/radeon/rs400d.h b/sys/dev/pci/drm/radeon/rs400d.h
index 6d8bac58ced..d2f0156120e 100644
--- a/sys/dev/pci/drm/radeon/rs400d.h
+++ b/sys/dev/pci/drm/radeon/rs400d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: rs400d.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
diff --git a/sys/dev/pci/drm/radeon/rs600.c b/sys/dev/pci/drm/radeon/rs600.c
index c6c7e2213a8..f531bd17c56 100644
--- a/sys/dev/pci/drm/radeon/rs600.c
+++ b/sys/dev/pci/drm/radeon/rs600.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: rs600.c,v 1.10 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -38,7 +39,6 @@
#include <dev/pci/drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
-#include "radeon_audio.h"
#include "atom.h"
#include "rs600d.h"
@@ -110,7 +110,19 @@ void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
}
}
-void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* enable the pflip int */
+ radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+ /* disable the pflip int */
+ radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -137,79 +149,9 @@ void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
-}
-
-bool rs600_page_flip_pending(struct radeon_device *rdev, int crtc_id)
-{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
/* Return current update_pending status: */
- return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
- AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
-}
-
-void avivo_program_fmt(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- int bpc = 0;
- u32 tmp = 0;
- enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
-
- if (connector) {
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- bpc = radeon_get_monitor_bpc(connector);
- dither = radeon_connector->dither;
- }
-
- /* LVDS FMT is set up by atom */
- if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
- return;
-
- if (bpc == 0)
- return;
-
- switch (bpc) {
- case 6:
- if (dither == RADEON_FMT_DITHER_ENABLE)
- /* XXX sort out optimal dither settings */
- tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
- else
- tmp |= AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN;
- break;
- case 8:
- if (dither == RADEON_FMT_DITHER_ENABLE)
- /* XXX sort out optimal dither settings */
- tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN |
- AVIVO_TMDS_BIT_DEPTH_CONTROL_SPATIAL_DITHER_DEPTH);
- else
- tmp |= (AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_EN |
- AVIVO_TMDS_BIT_DEPTH_CONTROL_TRUNCATE_DEPTH);
- break;
- case 10:
- default:
- /* not needed */
- break;
- }
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32(AVIVO_TMDSA_BIT_DEPTH_CONTROL, tmp);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, tmp);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- WREG32(AVIVO_DVOA_BIT_DEPTH_CONTROL, tmp);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_DDI:
- WREG32(AVIVO_DDIA_BIT_DEPTH_CONTROL, tmp);
- break;
- default:
- break;
- }
+ return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
}
void rs600_pm_misc(struct radeon_device *rdev)
@@ -465,9 +407,11 @@ int rs600_asic_reset(struct radeon_device *rdev)
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
WREG32(RADEON_CP_RB_CNTL, tmp);
+#ifdef notyet
pci_save_state(rdev->pdev);
/* disable bus mastering */
pci_clear_master(rdev->pdev);
+#endif
mdelay(1);
/* reset GA+VAP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
@@ -495,7 +439,9 @@ int rs600_asic_reset(struct radeon_device *rdev)
status = RREG32(R_000E40_RBBM_STATUS);
dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
/* restore PCI & busmastering */
+#ifdef notyet
pci_restore_state(rdev->pdev);
+#endif
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
@@ -556,6 +502,7 @@ static int rs600_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Enable bus master */
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
@@ -626,26 +573,25 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev);
}
-uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
-{
- addr = addr & 0xFFFFFFFFFFFFF000ULL;
- addr |= R600_PTE_SYSTEM;
- if (flags & RADEON_GART_PAGE_VALID)
- addr |= R600_PTE_VALID;
- if (flags & RADEON_GART_PAGE_READ)
- addr |= R600_PTE_READABLE;
- if (flags & RADEON_GART_PAGE_WRITE)
- addr |= R600_PTE_WRITEABLE;
- if (flags & RADEON_GART_PAGE_SNOOP)
- addr |= R600_PTE_SNOOPED;
- return addr;
-}
+#define R600_PTE_VALID (1 << 0)
+#define R600_PTE_SYSTEM (1 << 1)
+#define R600_PTE_SNOOPED (1 << 2)
+#define R600_PTE_READABLE (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t entry)
+int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
- void __iomem *ptr = (void *)rdev->gart.ptr;
- writeq(entry, ptr + (i * 8));
+ volatile uint64_t *ptr = rdev->gart.ptr;
+
+ if (i < 0 || i > rdev->gart.num_gpu_pages) {
+ return -EINVAL;
+ }
+ addr = addr & 0xFFFFFFFFFFFFF000ULL;
+ addr |= R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED;
+ addr |= R600_PTE_READABLE | R600_PTE_WRITEABLE;
+ ptr += i;
+ *ptr = addr;
+ return 0;
}
int rs600_irq_set(struct radeon_device *rdev)
@@ -694,10 +640,6 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
if (ASIC_IS_DCE2(rdev))
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
-
- /* posting read */
- RREG32(R_000040_GEN_INT_CNTL);
-
return 0;
}
@@ -787,7 +729,7 @@ int rs600_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
+ radeon_crtc_handle_flip(rdev, 0);
}
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[1]) {
@@ -796,7 +738,7 @@ int rs600_irq_process(struct radeon_device *rdev)
wake_up(&rdev->irq.vblank_queue);
}
if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
+ radeon_crtc_handle_flip(rdev, 1);
}
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
queue_hotplug = true;
@@ -813,9 +755,9 @@ int rs600_irq_process(struct radeon_device *rdev)
status = rs600_irq_ack(rdev);
}
if (queue_hotplug)
- schedule_delayed_work(&rdev->hotplug_work, 0);
+ task_add(systq, &rdev->hotplug_task);
if (queue_hdmi)
- schedule_work(&rdev->audio_work);
+ task_add(systq, &rdev->audio_task);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
@@ -888,9 +830,6 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
/* FIXME: implement full support */
- if (!rdev->mode_info.mode_config_initialized)
- return;
-
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled)
@@ -914,26 +853,16 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1));
- r = RREG32(R_000074_MC_IND_DATA);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
- return r;
+ return RREG32(R_000074_MC_IND_DATA);
}
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) |
S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1));
WREG32(R_000074_MC_IND_DATA, v);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void rs600_debugfs(struct radeon_device *rdev)
@@ -1021,7 +950,7 @@ static int rs600_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_audio_init(rdev);
+ r = r600_audio_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing audio\n");
return r;
@@ -1061,8 +990,7 @@ int rs600_resume(struct radeon_device *rdev)
int rs600_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
- radeon_audio_fini(rdev);
+ r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
@@ -1072,8 +1000,7 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
- radeon_audio_fini(rdev);
+ r600_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -1141,9 +1068,6 @@ int rs600_init(struct radeon_device *rdev)
return r;
rs600_set_safe_registers(rdev);
- /* Initialize power management */
- radeon_pm_init(rdev);
-
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
diff --git a/sys/dev/pci/drm/radeon/rs600d.h b/sys/dev/pci/drm/radeon/rs600d.h
index f1f89414dc6..20e76044f83 100644
--- a/sys/dev/pci/drm/radeon/rs600d.h
+++ b/sys/dev/pci/drm/radeon/rs600d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: rs600d.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
diff --git a/sys/dev/pci/drm/radeon/rs690.c b/sys/dev/pci/drm/radeon/rs690.c
index 488341c2021..5ff501a2ac2 100644
--- a/sys/dev/pci/drm/radeon/rs690.c
+++ b/sys/dev/pci/drm/radeon/rs690.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: rs690.c,v 1.8 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -28,7 +29,6 @@
#include <dev/pci/drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
-#include "radeon_audio.h"
#include "atom.h"
#include "rs690d.h"
@@ -149,8 +149,6 @@ void rs690_pm_info(struct radeon_device *rdev)
static void rs690_mc_init(struct radeon_device *rdev)
{
u64 base;
- uint32_t h_addr, l_addr;
- unsigned long long k8_addr;
rs400_gart_adjust_size(rdev);
rdev->mc.vram_is_ddr = true;
@@ -174,26 +172,6 @@ static void rs690_mc_init(struct radeon_device *rdev)
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
- /* Use K8 direct mapping for fast fb access. */
- rdev->fastfb_working = false;
- h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL));
- l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION);
- k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
- if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)
-#endif
- {
- /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
- * memory is present.
- */
- if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
- DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
- (unsigned long long)rdev->mc.aper_base, k8_addr);
- rdev->mc.aper_base = (resource_size_t)k8_addr;
- rdev->fastfb_working = true;
- }
- }
-
rs690_pm_info(rdev);
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
@@ -207,9 +185,6 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
{
u32 tmp;
- /* Guess line buffer size to be 8192 pixels */
- u32 lb_size = 8192;
-
/*
* Line Buffer Setup
* There is a single line buffer shared by both display controllers.
@@ -246,13 +221,6 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
}
WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
-
- /* Save number of lines the linebuffer leads before the scanout */
- if (mode1)
- rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
-
- if (mode2)
- rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
}
struct rs690_watermark {
@@ -269,16 +237,13 @@ struct rs690_watermark {
};
static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
- struct radeon_crtc *crtc,
- struct rs690_watermark *wm,
- bool low)
+ struct radeon_crtc *crtc,
+ struct rs690_watermark *wm)
{
struct drm_display_mode *mode = &crtc->base.mode;
fixed20_12 a, b, c;
fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
- fixed20_12 sclk, core_bandwidth, max_bandwidth;
- u32 selected_sclk;
if (!crtc->base.enabled) {
/* FIXME: wouldn't it better to set priority mark to maximum */
@@ -286,21 +251,6 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
return;
}
- if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) &&
- (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
- selected_sclk = radeon_dpm_get_sclk(rdev, low);
- else
- selected_sclk = rdev->pm.current_sclk;
-
- /* sclk in Mhz */
- a.full = dfixed_const(100);
- sclk.full = dfixed_const(selected_sclk);
- sclk.full = dfixed_div(sclk, a);
-
- /* core_bandwidth = sclk(Mhz) * 16 */
- a.full = dfixed_const(16);
- core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
-
if (crtc->vsc.full > dfixed_const(2))
wm->num_line_pair.full = dfixed_const(2);
else
@@ -361,38 +311,38 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
wm->active_time.full = dfixed_div(wm->active_time, a);
/* Maximun bandwidth is the minimun bandwidth of all component */
- max_bandwidth = core_bandwidth;
+ rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
if (rdev->mc.igp_sideport_enabled) {
- if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
+ if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
rdev->pm.sideport_bandwidth.full)
- max_bandwidth = rdev->pm.sideport_bandwidth;
+ rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
read_delay_latency.full = dfixed_const(370 * 800);
a.full = dfixed_const(1000);
b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a);
read_delay_latency.full = dfixed_div(read_delay_latency, b);
read_delay_latency.full = dfixed_mul(read_delay_latency, a);
} else {
- if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
+ if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
rdev->pm.k8_bandwidth.full)
- max_bandwidth = rdev->pm.k8_bandwidth;
- if (max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
+ rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth;
+ if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
rdev->pm.ht_bandwidth.full)
- max_bandwidth = rdev->pm.ht_bandwidth;
+ rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth;
read_delay_latency.full = dfixed_const(5000);
}
/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
a.full = dfixed_const(16);
- sclk.full = dfixed_mul(max_bandwidth, a);
+ rdev->pm.sclk.full = dfixed_mul(rdev->pm.max_bandwidth, a);
a.full = dfixed_const(1000);
- sclk.full = dfixed_div(a, sclk);
+ rdev->pm.sclk.full = dfixed_div(a, rdev->pm.sclk);
/* Determine chunk time
* ChunkTime = the time it takes the DCP to send one chunk of data
* to the LB which consists of pipeline delay and inter chunk gap
* sclk = system clock(ns)
*/
a.full = dfixed_const(256 * 13);
- chunk_time.full = dfixed_mul(sclk, a);
+ chunk_time.full = dfixed_mul(rdev->pm.sclk, a);
a.full = dfixed_const(10);
chunk_time.full = dfixed_div(chunk_time, a);
@@ -456,220 +406,185 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
}
}
-static void rs690_compute_mode_priority(struct radeon_device *rdev,
- struct rs690_watermark *wm0,
- struct rs690_watermark *wm1,
- struct drm_display_mode *mode0,
- struct drm_display_mode *mode1,
- u32 *d1mode_priority_a_cnt,
- u32 *d2mode_priority_a_cnt)
+void rs690_bandwidth_update(struct radeon_device *rdev)
{
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ struct rs690_watermark wm0;
+ struct rs690_watermark wm1;
+ u32 tmp;
+ u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+ u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
- *d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
- *d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+ radeon_update_display_priority(rdev);
+
+ if (rdev->mode_info.crtcs[0]->base.enabled)
+ mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+ if (rdev->mode_info.crtcs[1]->base.enabled)
+ mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+ /*
+ * Set display0/1 priority up in the memory controller for
+ * modes if the user specifies HIGH for displaypriority
+ * option.
+ */
+ if ((rdev->disp_priority == 2) &&
+ ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
+ tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
+ tmp &= C_000104_MC_DISP0R_INIT_LAT;
+ tmp &= C_000104_MC_DISP1R_INIT_LAT;
+ if (mode0)
+ tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
+ if (mode1)
+ tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
+ WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
+ }
+ rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+ if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
+ WREG32(R_006C9C_DCP_CONTROL, 0);
+ if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+ WREG32(R_006C9C_DCP_CONTROL, 2);
+
+ rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
+ rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
+
+ tmp = (wm0.lb_request_fifo_depth - 1);
+ tmp |= (wm1.lb_request_fifo_depth - 1) << 16;
+ WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
if (mode0 && mode1) {
- if (dfixed_trunc(wm0->dbpp) > 64)
- a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
- a.full = wm0->num_line_pair.full;
- if (dfixed_trunc(wm1->dbpp) > 64)
- b.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
+ a.full = wm0.num_line_pair.full;
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
- b.full = wm1->num_line_pair.full;
+ b.full = wm1.num_line_pair.full;
a.full += b.full;
- fill_rate.full = dfixed_div(wm0->sclk, a);
- if (wm0->consumption_rate.full > fill_rate.full) {
- b.full = wm0->consumption_rate.full - fill_rate.full;
- b.full = dfixed_mul(b, wm0->active_time);
- a.full = dfixed_mul(wm0->worst_case_latency,
- wm0->consumption_rate);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
+ if (wm0.consumption_rate.full > fill_rate.full) {
+ b.full = wm0.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
a.full = a.full + b.full;
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
} else {
- a.full = dfixed_mul(wm0->worst_case_latency,
- wm0->consumption_rate);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
}
- if (wm1->consumption_rate.full > fill_rate.full) {
- b.full = wm1->consumption_rate.full - fill_rate.full;
- b.full = dfixed_mul(b, wm1->active_time);
- a.full = dfixed_mul(wm1->worst_case_latency,
- wm1->consumption_rate);
+ if (wm1.consumption_rate.full > fill_rate.full) {
+ b.full = wm1.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
a.full = a.full + b.full;
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
} else {
- a.full = dfixed_mul(wm1->worst_case_latency,
- wm1->consumption_rate);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
- if (wm0->priority_mark.full > priority_mark02.full)
- priority_mark02.full = wm0->priority_mark.full;
- if (wm0->priority_mark_max.full > priority_mark02.full)
- priority_mark02.full = wm0->priority_mark_max.full;
- if (wm1->priority_mark.full > priority_mark12.full)
- priority_mark12.full = wm1->priority_mark.full;
- if (wm1->priority_mark_max.full > priority_mark12.full)
- priority_mark12.full = wm1->priority_mark_max.full;
- *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
- *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (wm0.priority_mark.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark.full;
+ if (wm0.priority_mark_max.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark_max.full;
+ if (wm1.priority_mark.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark.full;
+ if (wm1.priority_mark_max.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark_max.full;
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
- *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
- *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+ d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
}
} else if (mode0) {
- if (dfixed_trunc(wm0->dbpp) > 64)
- a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
else
- a.full = wm0->num_line_pair.full;
- fill_rate.full = dfixed_div(wm0->sclk, a);
- if (wm0->consumption_rate.full > fill_rate.full) {
- b.full = wm0->consumption_rate.full - fill_rate.full;
- b.full = dfixed_mul(b, wm0->active_time);
- a.full = dfixed_mul(wm0->worst_case_latency,
- wm0->consumption_rate);
+ a.full = wm0.num_line_pair.full;
+ fill_rate.full = dfixed_div(wm0.sclk, a);
+ if (wm0.consumption_rate.full > fill_rate.full) {
+ b.full = wm0.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm0.active_time);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
a.full = a.full + b.full;
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
} else {
- a.full = dfixed_mul(wm0->worst_case_latency,
- wm0->consumption_rate);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
}
- if (wm0->priority_mark.full > priority_mark02.full)
- priority_mark02.full = wm0->priority_mark.full;
- if (wm0->priority_mark_max.full > priority_mark02.full)
- priority_mark02.full = wm0->priority_mark_max.full;
- *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ if (wm0.priority_mark.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark.full;
+ if (wm0.priority_mark_max.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark_max.full;
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
- *d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
} else if (mode1) {
- if (dfixed_trunc(wm1->dbpp) > 64)
- a.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
- a.full = wm1->num_line_pair.full;
- fill_rate.full = dfixed_div(wm1->sclk, a);
- if (wm1->consumption_rate.full > fill_rate.full) {
- b.full = wm1->consumption_rate.full - fill_rate.full;
- b.full = dfixed_mul(b, wm1->active_time);
- a.full = dfixed_mul(wm1->worst_case_latency,
- wm1->consumption_rate);
+ a.full = wm1.num_line_pair.full;
+ fill_rate.full = dfixed_div(wm1.sclk, a);
+ if (wm1.consumption_rate.full > fill_rate.full) {
+ b.full = wm1.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm1.active_time);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
a.full = a.full + b.full;
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
} else {
- a.full = dfixed_mul(wm1->worst_case_latency,
- wm1->consumption_rate);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
- if (wm1->priority_mark.full > priority_mark12.full)
- priority_mark12.full = wm1->priority_mark.full;
- if (wm1->priority_mark_max.full > priority_mark12.full)
- priority_mark12.full = wm1->priority_mark_max.full;
- *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (wm1.priority_mark.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark.full;
+ if (wm1.priority_mark_max.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark_max.full;
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
- *d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+ d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
}
-}
-
-void rs690_bandwidth_update(struct radeon_device *rdev)
-{
- struct drm_display_mode *mode0 = NULL;
- struct drm_display_mode *mode1 = NULL;
- struct rs690_watermark wm0_high, wm0_low;
- struct rs690_watermark wm1_high, wm1_low;
- u32 tmp;
- u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
- u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
-
- if (!rdev->mode_info.mode_config_initialized)
- return;
-
- radeon_update_display_priority(rdev);
-
- if (rdev->mode_info.crtcs[0]->base.enabled)
- mode0 = &rdev->mode_info.crtcs[0]->base.mode;
- if (rdev->mode_info.crtcs[1]->base.enabled)
- mode1 = &rdev->mode_info.crtcs[1]->base.mode;
- /*
- * Set display0/1 priority up in the memory controller for
- * modes if the user specifies HIGH for displaypriority
- * option.
- */
- if ((rdev->disp_priority == 2) &&
- ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
- tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
- tmp &= C_000104_MC_DISP0R_INIT_LAT;
- tmp &= C_000104_MC_DISP1R_INIT_LAT;
- if (mode0)
- tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
- if (mode1)
- tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
- WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
- }
- rs690_line_buffer_adjust(rdev, mode0, mode1);
-
- if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
- WREG32(R_006C9C_DCP_CONTROL, 0);
- if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
- WREG32(R_006C9C_DCP_CONTROL, 2);
-
- rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
- rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
-
- rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, true);
- rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, true);
-
- tmp = (wm0_high.lb_request_fifo_depth - 1);
- tmp |= (wm1_high.lb_request_fifo_depth - 1) << 16;
- WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);
-
- rs690_compute_mode_priority(rdev,
- &wm0_high, &wm1_high,
- mode0, mode1,
- &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
- rs690_compute_mode_priority(rdev,
- &wm0_low, &wm1_low,
- mode0, mode1,
- &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
- WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
- WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
- unsigned long flags;
uint32_t r;
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
r = RREG32(R_00007C_MC_DATA);
WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
return r;
}
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
S_000078_MC_IND_WR_EN(1));
WREG32(R_00007C_MC_DATA, v);
WREG32(R_000078_MC_INDEX, 0x7F);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
static void rs690_mc_program(struct radeon_device *rdev)
@@ -740,7 +655,7 @@ static int rs690_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_audio_init(rdev);
+ r = r600_audio_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing audio\n");
return r;
@@ -780,8 +695,7 @@ int rs690_resume(struct radeon_device *rdev)
int rs690_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
- radeon_audio_fini(rdev);
+ r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
@@ -791,8 +705,7 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
- radeon_audio_fini(rdev);
+ r600_audio_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -861,9 +774,6 @@ int rs690_init(struct radeon_device *rdev)
return r;
rs600_set_safe_registers(rdev);
- /* Initialize power management */
- radeon_pm_init(rdev);
-
rdev->accel_working = true;
r = rs690_startup(rdev);
if (r) {
diff --git a/sys/dev/pci/drm/radeon/rs690d.h b/sys/dev/pci/drm/radeon/rs690d.h
index 8af3ccf20cc..5b2b38efd4a 100644
--- a/sys/dev/pci/drm/radeon/rs690d.h
+++ b/sys/dev/pci/drm/radeon/rs690d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: rs690d.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -29,9 +30,6 @@
#define __RS690D_H__
/* Registers */
-#define R_00001E_K8_FB_LOCATION 0x00001E
-#define R_00005F_MC_MISC_UMA_CNTL 0x00005F
-#define G_00005F_K8_ADDR_EXT(x) (((x) >> 0) & 0xFF)
#define R_000078_MC_INDEX 0x000078
#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0)
#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF)
diff --git a/sys/dev/pci/drm/radeon/rv200d.h b/sys/dev/pci/drm/radeon/rv200d.h
index c5b398330c2..384502a5beb 100644
--- a/sys/dev/pci/drm/radeon/rv200d.h
+++ b/sys/dev/pci/drm/radeon/rv200d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: rv200d.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
diff --git a/sys/dev/pci/drm/radeon/rv250d.h b/sys/dev/pci/drm/radeon/rv250d.h
index e5a70b06fe1..f20d17a3cb0 100644
--- a/sys/dev/pci/drm/radeon/rv250d.h
+++ b/sys/dev/pci/drm/radeon/rv250d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: rv250d.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
diff --git a/sys/dev/pci/drm/radeon/rv350d.h b/sys/dev/pci/drm/radeon/rv350d.h
index c75c5ed9e65..09dcb1b7d70 100644
--- a/sys/dev/pci/drm/radeon/rv350d.h
+++ b/sys/dev/pci/drm/radeon/rv350d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: rv350d.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
diff --git a/sys/dev/pci/drm/radeon/rv515.c b/sys/dev/pci/drm/radeon/rv515.c
index 995e01221db..225b7245577 100644
--- a/sys/dev/pci/drm/radeon/rv515.c
+++ b/sys/dev/pci/drm/radeon/rv515.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: rv515.c,v 1.8 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -122,7 +123,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(ring, PACKET0(0x20C8, 0));
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
}
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
@@ -207,27 +208,19 @@ static void rv515_mc_init(struct radeon_device *rdev)
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
- unsigned long flags;
uint32_t r;
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff));
r = RREG32(MC_IND_DATA);
WREG32(MC_IND_INDEX, 0);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
-
return r;
}
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->mc_idx_lock, flags);
WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff));
WREG32(MC_IND_DATA, (v));
WREG32(MC_IND_INDEX, 0);
- spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
}
#if defined(CONFIG_DEBUG_FS)
@@ -404,9 +397,8 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled[i]) {
tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x7) != 3) {
- tmp &= ~0x7;
- tmp |= 0x3;
+ if ((tmp & 0x3) != 0) {
+ tmp &= ~0x3;
WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
}
tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
@@ -595,7 +587,6 @@ int rv515_resume(struct radeon_device *rdev)
int rv515_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
rs600_irq_disable(rdev);
@@ -612,7 +603,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -686,9 +676,6 @@ int rv515_init(struct radeon_device *rdev)
return r;
rv515_set_safe_registers(rdev);
- /* Initialize power management */
- radeon_pm_init(rdev);
-
rdev->accel_working = true;
r = rv515_startup(rdev);
if (r) {
@@ -949,16 +936,13 @@ struct rv515_watermark {
};
static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
- struct radeon_crtc *crtc,
- struct rv515_watermark *wm,
- bool low)
+ struct radeon_crtc *crtc,
+ struct rv515_watermark *wm)
{
struct drm_display_mode *mode = &crtc->base.mode;
fixed20_12 a, b, c;
fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
- fixed20_12 sclk;
- u32 selected_sclk;
if (!crtc->base.enabled) {
/* FIXME: wouldn't it better to set priority mark to maximum */
@@ -966,18 +950,6 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
return;
}
- /* rv6xx, rv7xx */
- if ((rdev->family >= CHIP_RV610) &&
- (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
- selected_sclk = radeon_dpm_get_sclk(rdev, low);
- else
- selected_sclk = rdev->pm.current_sclk;
-
- /* sclk in Mhz */
- a.full = dfixed_const(100);
- sclk.full = dfixed_const(selected_sclk);
- sclk.full = dfixed_div(sclk, a);
-
if (crtc->vsc.full > dfixed_const(2))
wm->num_line_pair.full = dfixed_const(2);
else
@@ -1043,7 +1015,7 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
* sclk = system clock(Mhz)
*/
a.full = dfixed_const(600 * 1000);
- chunk_time.full = dfixed_div(a, sclk);
+ chunk_time.full = dfixed_div(a, rdev->pm.sclk);
read_delay_latency.full = dfixed_const(1000);
/* Determine the worst case latency
@@ -1104,169 +1076,144 @@ static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
}
}
-static void rv515_compute_mode_priority(struct radeon_device *rdev,
- struct rv515_watermark *wm0,
- struct rv515_watermark *wm1,
- struct drm_display_mode *mode0,
- struct drm_display_mode *mode1,
- u32 *d1mode_priority_a_cnt,
- u32 *d2mode_priority_a_cnt)
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
{
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ struct rv515_watermark wm0;
+ struct rv515_watermark wm1;
+ u32 tmp;
+ u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
+ u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
- *d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
- *d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
+ if (rdev->mode_info.crtcs[0]->base.enabled)
+ mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+ if (rdev->mode_info.crtcs[1]->base.enabled)
+ mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+ rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+ rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0);
+ rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1);
+
+ tmp = wm0.lb_request_fifo_depth;
+ tmp |= wm1.lb_request_fifo_depth << 16;
+ WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
if (mode0 && mode1) {
- if (dfixed_trunc(wm0->dbpp) > 64)
- a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
- a.full = wm0->num_line_pair.full;
- if (dfixed_trunc(wm1->dbpp) > 64)
- b.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
+ a.full = wm0.num_line_pair.full;
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ b.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
- b.full = wm1->num_line_pair.full;
+ b.full = wm1.num_line_pair.full;
a.full += b.full;
- fill_rate.full = dfixed_div(wm0->sclk, a);
- if (wm0->consumption_rate.full > fill_rate.full) {
- b.full = wm0->consumption_rate.full - fill_rate.full;
- b.full = dfixed_mul(b, wm0->active_time);
+ fill_rate.full = dfixed_div(wm0.sclk, a);
+ if (wm0.consumption_rate.full > fill_rate.full) {
+ b.full = wm0.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm0.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
- a.full = dfixed_mul(wm0->worst_case_latency,
- wm0->consumption_rate);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
- a.full = dfixed_mul(wm0->worst_case_latency,
- wm0->consumption_rate);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark02.full = dfixed_div(a, b);
}
- if (wm1->consumption_rate.full > fill_rate.full) {
- b.full = wm1->consumption_rate.full - fill_rate.full;
- b.full = dfixed_mul(b, wm1->active_time);
+ if (wm1.consumption_rate.full > fill_rate.full) {
+ b.full = wm1.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm1.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
- a.full = dfixed_mul(wm1->worst_case_latency,
- wm1->consumption_rate);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
- a.full = dfixed_mul(wm1->worst_case_latency,
- wm1->consumption_rate);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
- if (wm0->priority_mark.full > priority_mark02.full)
- priority_mark02.full = wm0->priority_mark.full;
- if (wm0->priority_mark_max.full > priority_mark02.full)
- priority_mark02.full = wm0->priority_mark_max.full;
- if (wm1->priority_mark.full > priority_mark12.full)
- priority_mark12.full = wm1->priority_mark.full;
- if (wm1->priority_mark_max.full > priority_mark12.full)
- priority_mark12.full = wm1->priority_mark_max.full;
- *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
- *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (wm0.priority_mark.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark.full;
+ if (wm0.priority_mark_max.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark_max.full;
+ if (wm1.priority_mark.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark.full;
+ if (wm1.priority_mark_max.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark_max.full;
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2) {
- *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
- *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
}
} else if (mode0) {
- if (dfixed_trunc(wm0->dbpp) > 64)
- a.full = dfixed_div(wm0->dbpp, wm0->num_line_pair);
+ if (dfixed_trunc(wm0.dbpp) > 64)
+ a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
else
- a.full = wm0->num_line_pair.full;
- fill_rate.full = dfixed_div(wm0->sclk, a);
- if (wm0->consumption_rate.full > fill_rate.full) {
- b.full = wm0->consumption_rate.full - fill_rate.full;
- b.full = dfixed_mul(b, wm0->active_time);
+ a.full = wm0.num_line_pair.full;
+ fill_rate.full = dfixed_div(wm0.sclk, a);
+ if (wm0.consumption_rate.full > fill_rate.full) {
+ b.full = wm0.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm0.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
- a.full = dfixed_mul(wm0->worst_case_latency,
- wm0->consumption_rate);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
priority_mark02.full = a.full + b.full;
} else {
- a.full = dfixed_mul(wm0->worst_case_latency,
- wm0->consumption_rate);
+ a.full = dfixed_mul(wm0.worst_case_latency,
+ wm0.consumption_rate);
b.full = dfixed_const(16);
priority_mark02.full = dfixed_div(a, b);
}
- if (wm0->priority_mark.full > priority_mark02.full)
- priority_mark02.full = wm0->priority_mark.full;
- if (wm0->priority_mark_max.full > priority_mark02.full)
- priority_mark02.full = wm0->priority_mark_max.full;
- *d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
+ if (wm0.priority_mark.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark.full;
+ if (wm0.priority_mark_max.full > priority_mark02.full)
+ priority_mark02.full = wm0.priority_mark_max.full;
+ d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
- *d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
} else if (mode1) {
- if (dfixed_trunc(wm1->dbpp) > 64)
- a.full = dfixed_div(wm1->dbpp, wm1->num_line_pair);
+ if (dfixed_trunc(wm1.dbpp) > 64)
+ a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
- a.full = wm1->num_line_pair.full;
- fill_rate.full = dfixed_div(wm1->sclk, a);
- if (wm1->consumption_rate.full > fill_rate.full) {
- b.full = wm1->consumption_rate.full - fill_rate.full;
- b.full = dfixed_mul(b, wm1->active_time);
+ a.full = wm1.num_line_pair.full;
+ fill_rate.full = dfixed_div(wm1.sclk, a);
+ if (wm1.consumption_rate.full > fill_rate.full) {
+ b.full = wm1.consumption_rate.full - fill_rate.full;
+ b.full = dfixed_mul(b, wm1.active_time);
a.full = dfixed_const(16);
b.full = dfixed_div(b, a);
- a.full = dfixed_mul(wm1->worst_case_latency,
- wm1->consumption_rate);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
priority_mark12.full = a.full + b.full;
} else {
- a.full = dfixed_mul(wm1->worst_case_latency,
- wm1->consumption_rate);
+ a.full = dfixed_mul(wm1.worst_case_latency,
+ wm1.consumption_rate);
b.full = dfixed_const(16 * 1000);
priority_mark12.full = dfixed_div(a, b);
}
- if (wm1->priority_mark.full > priority_mark12.full)
- priority_mark12.full = wm1->priority_mark.full;
- if (wm1->priority_mark_max.full > priority_mark12.full)
- priority_mark12.full = wm1->priority_mark_max.full;
- *d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
+ if (wm1.priority_mark.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark.full;
+ if (wm1.priority_mark_max.full > priority_mark12.full)
+ priority_mark12.full = wm1.priority_mark_max.full;
+ d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
- *d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
}
-}
-
-void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
-{
- struct drm_display_mode *mode0 = NULL;
- struct drm_display_mode *mode1 = NULL;
- struct rv515_watermark wm0_high, wm0_low;
- struct rv515_watermark wm1_high, wm1_low;
- u32 tmp;
- u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
- u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
-
- if (rdev->mode_info.crtcs[0]->base.enabled)
- mode0 = &rdev->mode_info.crtcs[0]->base.mode;
- if (rdev->mode_info.crtcs[1]->base.enabled)
- mode1 = &rdev->mode_info.crtcs[1]->base.mode;
- rs690_line_buffer_adjust(rdev, mode0, mode1);
-
- rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
- rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);
-
- rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, false);
- rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, false);
-
- tmp = wm0_high.lb_request_fifo_depth;
- tmp |= wm1_high.lb_request_fifo_depth << 16;
- WREG32(LB_MAX_REQ_OUTSTANDING, tmp);
-
- rv515_compute_mode_priority(rdev,
- &wm0_high, &wm1_high,
- mode0, mode1,
- &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
- rv515_compute_mode_priority(rdev,
- &wm0_low, &wm1_low,
- mode0, mode1,
- &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
- WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
+ WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
- WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
+ WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
void rv515_bandwidth_update(struct radeon_device *rdev)
@@ -1275,9 +1222,6 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
- if (!rdev->mode_info.mode_config_initialized)
- return;
-
radeon_update_display_priority(rdev);
if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/sys/dev/pci/drm/radeon/rv515d.h b/sys/dev/pci/drm/radeon/rv515d.h
index 6927a200daf..707c6f33d90 100644
--- a/sys/dev/pci/drm/radeon/rv515d.h
+++ b/sys/dev/pci/drm/radeon/rv515d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: rv515d.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -205,6 +206,17 @@
REG_SET(PACKET3_IT_OPCODE, (op)) | \
REG_SET(PACKET3_COUNT, (n)))
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
+#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+
/* Registers */
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/sys/dev/pci/drm/radeon/rv770.c b/sys/dev/pci/drm/radeon/rv770.c
index f9abd846a25..9d10d783a39 100644
--- a/sys/dev/pci/drm/radeon/rv770.c
+++ b/sys/dev/pci/drm/radeon/rv770.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: rv770.c,v 1.9 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
@@ -28,7 +29,6 @@
#include <dev/pci/drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
-#include "radeon_audio.h"
#include <dev/pci/drm/radeon_drm.h>
#include "rv770d.h"
#include "atom.h"
@@ -40,766 +40,8 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
-int evergreen_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
-{
- unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
- int r;
-
- /* RV740 uses evergreen uvd clk programming */
- if (rdev->family == CHIP_RV740)
- return evergreen_set_uvd_clocks(rdev, vclk, dclk);
-
- /* bypass vclk and dclk with bclk */
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
- ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
-
- if (!vclk || !dclk) {
- /* keep the Bypass mode, put PLL to sleep */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK);
- return 0;
- }
-
- r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000,
- 43663, 0x03FFFFFE, 1, 30, ~0,
- &fb_div, &vclk_div, &dclk_div);
- if (r)
- return r;
-
- fb_div |= 1;
- vclk_div -= 1;
- dclk_div -= 1;
-
- /* set UPLL_FB_DIV to 0x50000 */
- WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(0x50000), ~UPLL_FB_DIV_MASK);
-
- /* deassert UPLL_RESET and UPLL_SLEEP */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~(UPLL_RESET_MASK | UPLL_SLEEP_MASK));
-
- /* assert BYPASS EN and FB_DIV[0] <- ??? why? */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
- WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(1), ~UPLL_FB_DIV(1));
-
- r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
- if (r)
- return r;
-
- /* assert PLL_RESET */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
-
- /* set the required FB_DIV, REF_DIV, Post divder values */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REF_DIV(1), ~UPLL_REF_DIV_MASK);
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- UPLL_SW_HILEN(vclk_div >> 1) |
- UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) |
- UPLL_SW_HILEN2(dclk_div >> 1) |
- UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)),
- ~UPLL_SW_MASK);
-
- WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div),
- ~UPLL_FB_DIV_MASK);
-
- /* give the PLL some time to settle */
- mdelay(15);
-
- /* deassert PLL_RESET */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
-
- mdelay(15);
-
- /* deassert BYPASS EN and FB_DIV[0] <- ??? why? */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
- WREG32_P(CG_UPLL_FUNC_CNTL_3, 0, ~UPLL_FB_DIV(1));
-
- r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
- if (r)
- return r;
-
- /* switch VCLK and DCLK selection */
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
- ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
-
- mdelay(100);
-
- return 0;
-}
-
-static const u32 r7xx_golden_registers[] =
-{
- 0x8d00, 0xffffffff, 0x0e0e0074,
- 0x8d04, 0xffffffff, 0x013a2b34,
- 0x9508, 0xffffffff, 0x00000002,
- 0x8b20, 0xffffffff, 0,
- 0x88c4, 0xffffffff, 0x000000c2,
- 0x28350, 0xffffffff, 0,
- 0x9058, 0xffffffff, 0x0fffc40f,
- 0x240c, 0xffffffff, 0x00000380,
- 0x733c, 0xffffffff, 0x00000002,
- 0x2650, 0x00040000, 0,
- 0x20bc, 0x00040000, 0,
- 0x7300, 0xffffffff, 0x001000f0
-};
-
-static const u32 r7xx_golden_dyn_gpr_registers[] =
-{
- 0x8db0, 0xffffffff, 0x98989898,
- 0x8db4, 0xffffffff, 0x98989898,
- 0x8db8, 0xffffffff, 0x98989898,
- 0x8dbc, 0xffffffff, 0x98989898,
- 0x8dc0, 0xffffffff, 0x98989898,
- 0x8dc4, 0xffffffff, 0x98989898,
- 0x8dc8, 0xffffffff, 0x98989898,
- 0x8dcc, 0xffffffff, 0x98989898,
- 0x88c4, 0xffffffff, 0x00000082
-};
-
-static const u32 rv770_golden_registers[] =
-{
- 0x562c, 0xffffffff, 0,
- 0x3f90, 0xffffffff, 0,
- 0x9148, 0xffffffff, 0,
- 0x3f94, 0xffffffff, 0,
- 0x914c, 0xffffffff, 0,
- 0x9698, 0x18000000, 0x18000000
-};
-
-static const u32 rv770ce_golden_registers[] =
-{
- 0x562c, 0xffffffff, 0,
- 0x3f90, 0xffffffff, 0x00cc0000,
- 0x9148, 0xffffffff, 0x00cc0000,
- 0x3f94, 0xffffffff, 0x00cc0000,
- 0x914c, 0xffffffff, 0x00cc0000,
- 0x9b7c, 0xffffffff, 0x00fa0000,
- 0x3f8c, 0xffffffff, 0x00fa0000,
- 0x9698, 0x18000000, 0x18000000
-};
-
-static const u32 rv770_mgcg_init[] =
-{
- 0x8bcc, 0xffffffff, 0x130300f9,
- 0x5448, 0xffffffff, 0x100,
- 0x55e4, 0xffffffff, 0x100,
- 0x160c, 0xffffffff, 0x100,
- 0x5644, 0xffffffff, 0x100,
- 0xc164, 0xffffffff, 0x100,
- 0x8a18, 0xffffffff, 0x100,
- 0x897c, 0xffffffff, 0x8000100,
- 0x8b28, 0xffffffff, 0x3c000100,
- 0x9144, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x10000,
- 0x9a50, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x10001,
- 0x9a50, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x10002,
- 0x9a50, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x10003,
- 0x9a50, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x0,
- 0x9870, 0xffffffff, 0x100,
- 0x8d58, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x0,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x1,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x2,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x3,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x4,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x5,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x6,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x7,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x8,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x9,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x8000,
- 0x9490, 0xffffffff, 0x0,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x1,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x2,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x3,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x4,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x5,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x6,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x7,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x8,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x9,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x8000,
- 0x9604, 0xffffffff, 0x0,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x1,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x2,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x3,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x4,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x5,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x6,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x7,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x8,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x9,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x80000000,
- 0x9030, 0xffffffff, 0x100,
- 0x9034, 0xffffffff, 0x100,
- 0x9038, 0xffffffff, 0x100,
- 0x903c, 0xffffffff, 0x100,
- 0x9040, 0xffffffff, 0x100,
- 0xa200, 0xffffffff, 0x100,
- 0xa204, 0xffffffff, 0x100,
- 0xa208, 0xffffffff, 0x100,
- 0xa20c, 0xffffffff, 0x100,
- 0x971c, 0xffffffff, 0x100,
- 0x915c, 0xffffffff, 0x00020001,
- 0x9160, 0xffffffff, 0x00040003,
- 0x916c, 0xffffffff, 0x00060005,
- 0x9170, 0xffffffff, 0x00080007,
- 0x9174, 0xffffffff, 0x000a0009,
- 0x9178, 0xffffffff, 0x000c000b,
- 0x917c, 0xffffffff, 0x000e000d,
- 0x9180, 0xffffffff, 0x0010000f,
- 0x918c, 0xffffffff, 0x00120011,
- 0x9190, 0xffffffff, 0x00140013,
- 0x9194, 0xffffffff, 0x00020001,
- 0x9198, 0xffffffff, 0x00040003,
- 0x919c, 0xffffffff, 0x00060005,
- 0x91a8, 0xffffffff, 0x00080007,
- 0x91ac, 0xffffffff, 0x000a0009,
- 0x91b0, 0xffffffff, 0x000c000b,
- 0x91b4, 0xffffffff, 0x000e000d,
- 0x91b8, 0xffffffff, 0x0010000f,
- 0x91c4, 0xffffffff, 0x00120011,
- 0x91c8, 0xffffffff, 0x00140013,
- 0x91cc, 0xffffffff, 0x00020001,
- 0x91d0, 0xffffffff, 0x00040003,
- 0x91d4, 0xffffffff, 0x00060005,
- 0x91e0, 0xffffffff, 0x00080007,
- 0x91e4, 0xffffffff, 0x000a0009,
- 0x91e8, 0xffffffff, 0x000c000b,
- 0x91ec, 0xffffffff, 0x00020001,
- 0x91f0, 0xffffffff, 0x00040003,
- 0x91f4, 0xffffffff, 0x00060005,
- 0x9200, 0xffffffff, 0x00080007,
- 0x9204, 0xffffffff, 0x000a0009,
- 0x9208, 0xffffffff, 0x000c000b,
- 0x920c, 0xffffffff, 0x000e000d,
- 0x9210, 0xffffffff, 0x0010000f,
- 0x921c, 0xffffffff, 0x00120011,
- 0x9220, 0xffffffff, 0x00140013,
- 0x9224, 0xffffffff, 0x00020001,
- 0x9228, 0xffffffff, 0x00040003,
- 0x922c, 0xffffffff, 0x00060005,
- 0x9238, 0xffffffff, 0x00080007,
- 0x923c, 0xffffffff, 0x000a0009,
- 0x9240, 0xffffffff, 0x000c000b,
- 0x9244, 0xffffffff, 0x000e000d,
- 0x9248, 0xffffffff, 0x0010000f,
- 0x9254, 0xffffffff, 0x00120011,
- 0x9258, 0xffffffff, 0x00140013,
- 0x925c, 0xffffffff, 0x00020001,
- 0x9260, 0xffffffff, 0x00040003,
- 0x9264, 0xffffffff, 0x00060005,
- 0x9270, 0xffffffff, 0x00080007,
- 0x9274, 0xffffffff, 0x000a0009,
- 0x9278, 0xffffffff, 0x000c000b,
- 0x927c, 0xffffffff, 0x000e000d,
- 0x9280, 0xffffffff, 0x0010000f,
- 0x928c, 0xffffffff, 0x00120011,
- 0x9290, 0xffffffff, 0x00140013,
- 0x9294, 0xffffffff, 0x00020001,
- 0x929c, 0xffffffff, 0x00040003,
- 0x92a0, 0xffffffff, 0x00060005,
- 0x92a4, 0xffffffff, 0x00080007
-};
-
-static const u32 rv710_golden_registers[] =
-{
- 0x3f90, 0x00ff0000, 0x00fc0000,
- 0x9148, 0x00ff0000, 0x00fc0000,
- 0x3f94, 0x00ff0000, 0x00fc0000,
- 0x914c, 0x00ff0000, 0x00fc0000,
- 0xb4c, 0x00000020, 0x00000020,
- 0xa180, 0xffffffff, 0x00003f3f
-};
-
-static const u32 rv710_mgcg_init[] =
-{
- 0x8bcc, 0xffffffff, 0x13030040,
- 0x5448, 0xffffffff, 0x100,
- 0x55e4, 0xffffffff, 0x100,
- 0x160c, 0xffffffff, 0x100,
- 0x5644, 0xffffffff, 0x100,
- 0xc164, 0xffffffff, 0x100,
- 0x8a18, 0xffffffff, 0x100,
- 0x897c, 0xffffffff, 0x8000100,
- 0x8b28, 0xffffffff, 0x3c000100,
- 0x9144, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x10000,
- 0x9a50, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x0,
- 0x9870, 0xffffffff, 0x100,
- 0x8d58, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x0,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x1,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x8000,
- 0x9490, 0xffffffff, 0x0,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x1,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x8000,
- 0x9604, 0xffffffff, 0x0,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x1,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x80000000,
- 0x9030, 0xffffffff, 0x100,
- 0x9034, 0xffffffff, 0x100,
- 0x9038, 0xffffffff, 0x100,
- 0x903c, 0xffffffff, 0x100,
- 0x9040, 0xffffffff, 0x100,
- 0xa200, 0xffffffff, 0x100,
- 0xa204, 0xffffffff, 0x100,
- 0xa208, 0xffffffff, 0x100,
- 0xa20c, 0xffffffff, 0x100,
- 0x971c, 0xffffffff, 0x100,
- 0x915c, 0xffffffff, 0x00020001,
- 0x9174, 0xffffffff, 0x00000003,
- 0x9178, 0xffffffff, 0x00050001,
- 0x917c, 0xffffffff, 0x00030002,
- 0x918c, 0xffffffff, 0x00000004,
- 0x9190, 0xffffffff, 0x00070006,
- 0x9194, 0xffffffff, 0x00050001,
- 0x9198, 0xffffffff, 0x00030002,
- 0x91a8, 0xffffffff, 0x00000004,
- 0x91ac, 0xffffffff, 0x00070006,
- 0x91e8, 0xffffffff, 0x00000001,
- 0x9294, 0xffffffff, 0x00000001,
- 0x929c, 0xffffffff, 0x00000002,
- 0x92a0, 0xffffffff, 0x00040003,
- 0x9150, 0xffffffff, 0x4d940000
-};
-
-static const u32 rv730_golden_registers[] =
-{
- 0x3f90, 0x00ff0000, 0x00f00000,
- 0x9148, 0x00ff0000, 0x00f00000,
- 0x3f94, 0x00ff0000, 0x00f00000,
- 0x914c, 0x00ff0000, 0x00f00000,
- 0x900c, 0xffffffff, 0x003b033f,
- 0xb4c, 0x00000020, 0x00000020,
- 0xa180, 0xffffffff, 0x00003f3f
-};
-
-static const u32 rv730_mgcg_init[] =
-{
- 0x8bcc, 0xffffffff, 0x130300f9,
- 0x5448, 0xffffffff, 0x100,
- 0x55e4, 0xffffffff, 0x100,
- 0x160c, 0xffffffff, 0x100,
- 0x5644, 0xffffffff, 0x100,
- 0xc164, 0xffffffff, 0x100,
- 0x8a18, 0xffffffff, 0x100,
- 0x897c, 0xffffffff, 0x8000100,
- 0x8b28, 0xffffffff, 0x3c000100,
- 0x9144, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x10000,
- 0x9a50, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x10001,
- 0x9a50, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x0,
- 0x9870, 0xffffffff, 0x100,
- 0x8d58, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x0,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x1,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x2,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x3,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x4,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x5,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x6,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x7,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x8000,
- 0x9490, 0xffffffff, 0x0,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x1,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x2,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x3,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x4,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x5,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x6,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x7,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x8000,
- 0x9604, 0xffffffff, 0x0,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x1,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x2,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x3,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x4,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x5,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x6,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x7,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x80000000,
- 0x9030, 0xffffffff, 0x100,
- 0x9034, 0xffffffff, 0x100,
- 0x9038, 0xffffffff, 0x100,
- 0x903c, 0xffffffff, 0x100,
- 0x9040, 0xffffffff, 0x100,
- 0xa200, 0xffffffff, 0x100,
- 0xa204, 0xffffffff, 0x100,
- 0xa208, 0xffffffff, 0x100,
- 0xa20c, 0xffffffff, 0x100,
- 0x971c, 0xffffffff, 0x100,
- 0x915c, 0xffffffff, 0x00020001,
- 0x916c, 0xffffffff, 0x00040003,
- 0x9170, 0xffffffff, 0x00000005,
- 0x9178, 0xffffffff, 0x00050001,
- 0x917c, 0xffffffff, 0x00030002,
- 0x918c, 0xffffffff, 0x00000004,
- 0x9190, 0xffffffff, 0x00070006,
- 0x9194, 0xffffffff, 0x00050001,
- 0x9198, 0xffffffff, 0x00030002,
- 0x91a8, 0xffffffff, 0x00000004,
- 0x91ac, 0xffffffff, 0x00070006,
- 0x91b0, 0xffffffff, 0x00050001,
- 0x91b4, 0xffffffff, 0x00030002,
- 0x91c4, 0xffffffff, 0x00000004,
- 0x91c8, 0xffffffff, 0x00070006,
- 0x91cc, 0xffffffff, 0x00050001,
- 0x91d0, 0xffffffff, 0x00030002,
- 0x91e0, 0xffffffff, 0x00000004,
- 0x91e4, 0xffffffff, 0x00070006,
- 0x91e8, 0xffffffff, 0x00000001,
- 0x91ec, 0xffffffff, 0x00050001,
- 0x91f0, 0xffffffff, 0x00030002,
- 0x9200, 0xffffffff, 0x00000004,
- 0x9204, 0xffffffff, 0x00070006,
- 0x9208, 0xffffffff, 0x00050001,
- 0x920c, 0xffffffff, 0x00030002,
- 0x921c, 0xffffffff, 0x00000004,
- 0x9220, 0xffffffff, 0x00070006,
- 0x9224, 0xffffffff, 0x00050001,
- 0x9228, 0xffffffff, 0x00030002,
- 0x9238, 0xffffffff, 0x00000004,
- 0x923c, 0xffffffff, 0x00070006,
- 0x9240, 0xffffffff, 0x00050001,
- 0x9244, 0xffffffff, 0x00030002,
- 0x9254, 0xffffffff, 0x00000004,
- 0x9258, 0xffffffff, 0x00070006,
- 0x9294, 0xffffffff, 0x00000001,
- 0x929c, 0xffffffff, 0x00000002,
- 0x92a0, 0xffffffff, 0x00040003,
- 0x92a4, 0xffffffff, 0x00000005
-};
-
-static const u32 rv740_golden_registers[] =
-{
- 0x88c4, 0xffffffff, 0x00000082,
- 0x28a50, 0xfffffffc, 0x00000004,
- 0x2650, 0x00040000, 0,
- 0x20bc, 0x00040000, 0,
- 0x733c, 0xffffffff, 0x00000002,
- 0x7300, 0xffffffff, 0x001000f0,
- 0x3f90, 0x00ff0000, 0,
- 0x9148, 0x00ff0000, 0,
- 0x3f94, 0x00ff0000, 0,
- 0x914c, 0x00ff0000, 0,
- 0x240c, 0xffffffff, 0x00000380,
- 0x8a14, 0x00000007, 0x00000007,
- 0x8b24, 0xffffffff, 0x00ff0fff,
- 0x28a4c, 0xffffffff, 0x00004000,
- 0xa180, 0xffffffff, 0x00003f3f,
- 0x8d00, 0xffffffff, 0x0e0e003a,
- 0x8d04, 0xffffffff, 0x013a0e2a,
- 0x8c00, 0xffffffff, 0xe400000f,
- 0x8db0, 0xffffffff, 0x98989898,
- 0x8db4, 0xffffffff, 0x98989898,
- 0x8db8, 0xffffffff, 0x98989898,
- 0x8dbc, 0xffffffff, 0x98989898,
- 0x8dc0, 0xffffffff, 0x98989898,
- 0x8dc4, 0xffffffff, 0x98989898,
- 0x8dc8, 0xffffffff, 0x98989898,
- 0x8dcc, 0xffffffff, 0x98989898,
- 0x9058, 0xffffffff, 0x0fffc40f,
- 0x900c, 0xffffffff, 0x003b033f,
- 0x28350, 0xffffffff, 0,
- 0x8cf0, 0x1fffffff, 0x08e00420,
- 0x9508, 0xffffffff, 0x00000002,
- 0x88c4, 0xffffffff, 0x000000c2,
- 0x9698, 0x18000000, 0x18000000
-};
-
-static const u32 rv740_mgcg_init[] =
-{
- 0x8bcc, 0xffffffff, 0x13030100,
- 0x5448, 0xffffffff, 0x100,
- 0x55e4, 0xffffffff, 0x100,
- 0x160c, 0xffffffff, 0x100,
- 0x5644, 0xffffffff, 0x100,
- 0xc164, 0xffffffff, 0x100,
- 0x8a18, 0xffffffff, 0x100,
- 0x897c, 0xffffffff, 0x100,
- 0x8b28, 0xffffffff, 0x100,
- 0x9144, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x10000,
- 0x9a50, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x10001,
- 0x9a50, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x10002,
- 0x9a50, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x10003,
- 0x9a50, 0xffffffff, 0x100,
- 0x9a1c, 0xffffffff, 0x0,
- 0x9870, 0xffffffff, 0x100,
- 0x8d58, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x0,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x1,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x2,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x3,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x4,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x5,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x6,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x7,
- 0x9510, 0xffffffff, 0x100,
- 0x9500, 0xffffffff, 0x8000,
- 0x9490, 0xffffffff, 0x0,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x1,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x2,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x3,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x4,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x5,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x6,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x7,
- 0x949c, 0xffffffff, 0x100,
- 0x9490, 0xffffffff, 0x8000,
- 0x9604, 0xffffffff, 0x0,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x1,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x2,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x3,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x4,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x5,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x6,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x7,
- 0x9654, 0xffffffff, 0x100,
- 0x9604, 0xffffffff, 0x80000000,
- 0x9030, 0xffffffff, 0x100,
- 0x9034, 0xffffffff, 0x100,
- 0x9038, 0xffffffff, 0x100,
- 0x903c, 0xffffffff, 0x100,
- 0x9040, 0xffffffff, 0x100,
- 0xa200, 0xffffffff, 0x100,
- 0xa204, 0xffffffff, 0x100,
- 0xa208, 0xffffffff, 0x100,
- 0xa20c, 0xffffffff, 0x100,
- 0x971c, 0xffffffff, 0x100,
- 0x915c, 0xffffffff, 0x00020001,
- 0x9160, 0xffffffff, 0x00040003,
- 0x916c, 0xffffffff, 0x00060005,
- 0x9170, 0xffffffff, 0x00080007,
- 0x9174, 0xffffffff, 0x000a0009,
- 0x9178, 0xffffffff, 0x000c000b,
- 0x917c, 0xffffffff, 0x000e000d,
- 0x9180, 0xffffffff, 0x0010000f,
- 0x918c, 0xffffffff, 0x00120011,
- 0x9190, 0xffffffff, 0x00140013,
- 0x9194, 0xffffffff, 0x00020001,
- 0x9198, 0xffffffff, 0x00040003,
- 0x919c, 0xffffffff, 0x00060005,
- 0x91a8, 0xffffffff, 0x00080007,
- 0x91ac, 0xffffffff, 0x000a0009,
- 0x91b0, 0xffffffff, 0x000c000b,
- 0x91b4, 0xffffffff, 0x000e000d,
- 0x91b8, 0xffffffff, 0x0010000f,
- 0x91c4, 0xffffffff, 0x00120011,
- 0x91c8, 0xffffffff, 0x00140013,
- 0x91cc, 0xffffffff, 0x00020001,
- 0x91d0, 0xffffffff, 0x00040003,
- 0x91d4, 0xffffffff, 0x00060005,
- 0x91e0, 0xffffffff, 0x00080007,
- 0x91e4, 0xffffffff, 0x000a0009,
- 0x91e8, 0xffffffff, 0x000c000b,
- 0x91ec, 0xffffffff, 0x00020001,
- 0x91f0, 0xffffffff, 0x00040003,
- 0x91f4, 0xffffffff, 0x00060005,
- 0x9200, 0xffffffff, 0x00080007,
- 0x9204, 0xffffffff, 0x000a0009,
- 0x9208, 0xffffffff, 0x000c000b,
- 0x920c, 0xffffffff, 0x000e000d,
- 0x9210, 0xffffffff, 0x0010000f,
- 0x921c, 0xffffffff, 0x00120011,
- 0x9220, 0xffffffff, 0x00140013,
- 0x9224, 0xffffffff, 0x00020001,
- 0x9228, 0xffffffff, 0x00040003,
- 0x922c, 0xffffffff, 0x00060005,
- 0x9238, 0xffffffff, 0x00080007,
- 0x923c, 0xffffffff, 0x000a0009,
- 0x9240, 0xffffffff, 0x000c000b,
- 0x9244, 0xffffffff, 0x000e000d,
- 0x9248, 0xffffffff, 0x0010000f,
- 0x9254, 0xffffffff, 0x00120011,
- 0x9258, 0xffffffff, 0x00140013,
- 0x9294, 0xffffffff, 0x00020001,
- 0x929c, 0xffffffff, 0x00040003,
- 0x92a0, 0xffffffff, 0x00060005,
- 0x92a4, 0xffffffff, 0x00080007
-};
-
-static void rv770_init_golden_registers(struct radeon_device *rdev)
-{
- switch (rdev->family) {
- case CHIP_RV770:
- radeon_program_register_sequence(rdev,
- r7xx_golden_registers,
- (const u32)ARRAY_SIZE(r7xx_golden_registers));
- radeon_program_register_sequence(rdev,
- r7xx_golden_dyn_gpr_registers,
- (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
- if (rdev->pdev->device == 0x994e)
- radeon_program_register_sequence(rdev,
- rv770ce_golden_registers,
- (const u32)ARRAY_SIZE(rv770ce_golden_registers));
- else
- radeon_program_register_sequence(rdev,
- rv770_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
- radeon_program_register_sequence(rdev,
- rv770_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
- break;
- case CHIP_RV730:
- radeon_program_register_sequence(rdev,
- r7xx_golden_registers,
- (const u32)ARRAY_SIZE(r7xx_golden_registers));
- radeon_program_register_sequence(rdev,
- r7xx_golden_dyn_gpr_registers,
- (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
- radeon_program_register_sequence(rdev,
- rv730_golden_registers,
- (const u32)ARRAY_SIZE(rv730_golden_registers));
- radeon_program_register_sequence(rdev,
- rv730_mgcg_init,
- (const u32)ARRAY_SIZE(rv730_mgcg_init));
- break;
- case CHIP_RV710:
- radeon_program_register_sequence(rdev,
- r7xx_golden_registers,
- (const u32)ARRAY_SIZE(r7xx_golden_registers));
- radeon_program_register_sequence(rdev,
- r7xx_golden_dyn_gpr_registers,
- (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
- radeon_program_register_sequence(rdev,
- rv710_golden_registers,
- (const u32)ARRAY_SIZE(rv710_golden_registers));
- radeon_program_register_sequence(rdev,
- rv710_mgcg_init,
- (const u32)ARRAY_SIZE(rv710_mgcg_init));
- break;
- case CHIP_RV740:
- radeon_program_register_sequence(rdev,
- rv740_golden_registers,
- (const u32)ARRAY_SIZE(rv740_golden_registers));
- radeon_program_register_sequence(rdev,
- rv740_mgcg_init,
- (const u32)ARRAY_SIZE(rv740_mgcg_init));
- break;
- default:
- break;
- }
-}
-
-#define PCIE_BUS_CLK 10000
-#define TCLK (PCIE_BUS_CLK / 10)
-
-/**
- * rv770_get_xclk - get the xclk
- *
- * @rdev: radeon_device pointer
- *
- * Returns the reference clock used by the gfx engine
- * (r7xx-cayman).
- */
-u32 rv770_get_xclk(struct radeon_device *rdev)
-{
- u32 reference_clock = rdev->clock.spll.reference_freq;
- u32 tmp = RREG32(CG_CLKPIN_CNTL);
-
- if (tmp & MUX_TCLK_TO_XCLK)
- return TCLK;
-
- if (tmp & XTALIN_DIVIDE)
- return reference_clock / 4;
-
- return reference_clock;
-}
-
-void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
@@ -833,15 +75,9 @@ void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
/* Unlock the lock, so double-buffering can take place inside vblank */
tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
-}
-
-bool rv770_page_flip_pending(struct radeon_device *rdev, int crtc_id)
-{
- struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
/* Return current update_pending status: */
- return !!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) &
- AVIVO_D1GRPH_SURFACE_UPDATE_PENDING);
+ return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
}
/* get temperature in millidegrees */
@@ -898,6 +134,7 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -1074,8 +311,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
*/
void r700_cp_stop(struct radeon_device *rdev)
{
- if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -1102,13 +338,13 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
mdelay(15);
WREG32(GRBM_SOFT_RESET, 0);
- fw_data = (const __be32 *)rdev->pfp_fw->data;
+ fw_data = (const __be32 *)rdev->pfp_fw;
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
WREG32(CP_PFP_UCODE_ADDR, 0);
- fw_data = (const __be32 *)rdev->me_fw->data;
+ fw_data = (const __be32 *)rdev->me_fw;
WREG32(CP_ME_RAM_WADDR, 0);
for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
@@ -1127,35 +363,6 @@ void r700_cp_fini(struct radeon_device *rdev)
radeon_scratch_free(rdev, ring->rptr_save_reg);
}
-void rv770_set_clk_bypass_mode(struct radeon_device *rdev)
-{
- u32 tmp, i;
-
- if (rdev->flags & RADEON_IS_IGP)
- return;
-
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp &= SCLK_MUX_SEL_MASK;
- tmp |= SCLK_MUX_SEL(1) | SCLK_MUX_UPDATE;
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(CG_SPLL_STATUS) & SPLL_CHG_STATUS)
- break;
- udelay(1);
- }
-
- tmp &= ~SCLK_MUX_UPDATE;
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
-
- tmp = RREG32(MPLL_CNTL_MODE);
- if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
- tmp &= ~RV730_MPLL_MCLK_SEL;
- else
- tmp &= ~MPLL_MCLK_SEL;
- WREG32(MPLL_CNTL_MODE, tmp);
-}
-
/*
* Core functions
*/
@@ -1175,6 +382,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0;
u32 gb_tiling_config = 0;
+ u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0;
u32 mc_arb_ramcfg;
u32 db_debug4, tmp;
@@ -1308,10 +516,21 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(SPI_CONFIG_CNTL, 0);
}
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
+ if (tmp < rdev->config.rv770.max_backends) {
+ rdev->config.rv770.max_backends = tmp;
+ }
+
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
- tmp = rdev->config.rv770.max_simds -
- r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
- rdev->config.rv770.active_simds = tmp;
+ tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
+ if (tmp < rdev->config.rv770.max_pipes) {
+ rdev->config.rv770.max_pipes = tmp;
+ }
+ tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+ if (tmp < rdev->config.rv770.max_simds) {
+ rdev->config.rv770.max_simds = tmp;
+ }
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
@@ -1331,14 +550,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
- tmp = 0;
- for (i = 0; i < rdev->config.rv770.max_backends; i++)
- tmp |= (1 << i);
- /* if all the backends are disabled, fix it up here */
- if ((disabled_rb_mask & tmp) == tmp) {
- for (i = 0; i < rdev->config.rv770.max_backends; i++)
- disabled_rb_mask &= ~(1 << i);
- }
tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
R7XX_MAX_BACKENDS, disabled_rb_mask);
@@ -1373,11 +584,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
- if (rdev->family == CHIP_RV730) {
- WREG32(UVD_UDEC_DB_TILING_CONFIG, (gb_tiling_config & 0xffff));
- WREG32(UVD_UDEC_DBW_TILING_CONFIG, (gb_tiling_config & 0xffff));
- WREG32(UVD_UDEC_TILING_CONFIG, (gb_tiling_config & 0xffff));
- }
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
@@ -1607,7 +813,7 @@ void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
- size_af = mc->mc_mask - mc->gtt_end;
+ size_af = 0xFFFFFFFF - mc->gtt_end;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
@@ -1679,6 +885,80 @@ static int rv770_mc_init(struct radeon_device *rdev)
return 0;
}
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFF)
+ cur_size_in_dw = 0xFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
@@ -1687,13 +967,20 @@ static int rv770_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
rv770_pcie_gen2_enable(rdev);
- /* scratch needs to be initialized before MC */
+ rv770_mc_program(rdev);
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+
r = r600_vram_scratch_init(rdev);
if (r)
return r;
- rv770_mc_program(rdev);
-
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
@@ -1703,6 +990,12 @@ static int rv770_startup(struct radeon_device *rdev)
}
rv770_gpu_init(rdev);
+ r = r600_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy.copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -1721,17 +1014,6 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
- r = uvd_v2_2_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
- }
-
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
-
/* Enable IRQ */
if (!rdev->irq.installed) {
r = radeon_irq_kms_init(rdev);
@@ -1749,13 +1031,15 @@ static int rv770_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- RADEON_CP_PACKET2);
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_RB_RPTR, DMA_RB_WPTR,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -1770,24 +1054,13 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- RADEON_CP_PACKET2);
- if (!r)
- r = uvd_v1_0_init(rdev);
-
- if (r)
- DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
- }
-
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
- r = radeon_audio_init(rdev);
+ r = r600_audio_init(rdev);
if (r) {
DRM_ERROR("radeon: audio init failed\n");
return r;
@@ -1807,12 +1080,6 @@ int rv770_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* init golden registers */
- rv770_init_golden_registers(rdev);
-
- if (rdev->pm.pm_method == PM_METHOD_DPM)
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
@@ -1827,10 +1094,7 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
- radeon_audio_fini(rdev);
- uvd_v1_0_fini(rdev);
- radeon_uvd_suspend(rdev);
+ r600_audio_fini(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
r600_irq_suspend(rdev);
@@ -1872,8 +1136,6 @@ int rv770_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
- /* init golden registers */
- rv770_init_golden_registers(rdev);
/* Initialize scratch registers */
r600_scratch_init(rdev);
/* Initialize surface registers */
@@ -1898,30 +1160,12 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
- r = r600_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
- /* Initialize power management */
- radeon_pm_init(rdev);
-
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
- r = radeon_uvd_init(rdev);
- if (!r) {
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
- r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX],
- 4096);
- }
-
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1948,15 +1192,13 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
+ r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- uvd_v1_0_fini(rdev);
- radeon_uvd_fini(rdev);
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
@@ -1973,6 +1215,7 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
u32 link_width_cntl, lanes, speed_cntl, tmp;
u16 link_cntl2;
u32 mask;
+ int ret;
if (radeon_pcie_gen2 == 0)
return;
@@ -1987,32 +1230,33 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
if (ASIC_IS_X2(rdev))
return;
- if (drm_pcie_get_speed_cap_mask(rdev->ddev, &mask))
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
return;
- if (!(mask & (DRM_PCIE_SPEED_50|DRM_PCIE_SPEED_80)))
+ if (!(mask & DRM_PCIE_SPEED_50))
return;
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
/* advertise upconfig capability */
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
LC_RECONFIG_ARC_MISSING_ESCAPE);
link_width_cntl |= lanes | LC_RECONFIG_NOW |
LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
} else {
link_width_cntl |= LC_UPCONFIGURE_DIS;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
@@ -2025,29 +1269,29 @@ static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
WREG16(0x4088, link_cntl2);
WREG32(MM_CFGREGS_CNTL, 0);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
speed_cntl |= LC_GEN2_EN_STRAP;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+ WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
} else {
- link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
if (1)
link_width_cntl |= LC_UPCONFIGURE_DIS;
else
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+ WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
diff --git a/sys/dev/pci/drm/radeon/rv770d.h b/sys/dev/pci/drm/radeon/rv770d.h
index 9ef2064b1c9..338c540defc 100644
--- a/sys/dev/pci/drm/radeon/rv770d.h
+++ b/sys/dev/pci/drm/radeon/rv770d.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: rv770d.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2009 Advanced Micro Devices, Inc.
* Copyright 2009 Red Hat Inc.
@@ -38,277 +39,6 @@
#define R7XX_MAX_PIPES 8
#define R7XX_MAX_PIPES_MASK 0xff
-/* discrete uvd clocks */
-#define CG_UPLL_FUNC_CNTL 0x718
-# define UPLL_RESET_MASK 0x00000001
-# define UPLL_SLEEP_MASK 0x00000002
-# define UPLL_BYPASS_EN_MASK 0x00000004
-# define UPLL_CTLREQ_MASK 0x00000008
-# define UPLL_REF_DIV(x) ((x) << 16)
-# define UPLL_REF_DIV_MASK 0x003F0000
-# define UPLL_CTLACK_MASK 0x40000000
-# define UPLL_CTLACK2_MASK 0x80000000
-#define CG_UPLL_FUNC_CNTL_2 0x71c
-# define UPLL_SW_HILEN(x) ((x) << 0)
-# define UPLL_SW_LOLEN(x) ((x) << 4)
-# define UPLL_SW_HILEN2(x) ((x) << 8)
-# define UPLL_SW_LOLEN2(x) ((x) << 12)
-# define UPLL_SW_MASK 0x0000FFFF
-# define VCLK_SRC_SEL(x) ((x) << 20)
-# define VCLK_SRC_SEL_MASK 0x01F00000
-# define DCLK_SRC_SEL(x) ((x) << 25)
-# define DCLK_SRC_SEL_MASK 0x3E000000
-#define CG_UPLL_FUNC_CNTL_3 0x720
-# define UPLL_FB_DIV(x) ((x) << 0)
-# define UPLL_FB_DIV_MASK 0x01FFFFFF
-
-/* pm registers */
-#define SMC_SRAM_ADDR 0x200
-#define SMC_SRAM_AUTO_INC_DIS (1 << 16)
-#define SMC_SRAM_DATA 0x204
-#define SMC_IO 0x208
-#define SMC_RST_N (1 << 0)
-#define SMC_STOP_MODE (1 << 2)
-#define SMC_CLK_EN (1 << 11)
-#define SMC_MSG 0x20c
-#define HOST_SMC_MSG(x) ((x) << 0)
-#define HOST_SMC_MSG_MASK (0xff << 0)
-#define HOST_SMC_MSG_SHIFT 0
-#define HOST_SMC_RESP(x) ((x) << 8)
-#define HOST_SMC_RESP_MASK (0xff << 8)
-#define HOST_SMC_RESP_SHIFT 8
-#define SMC_HOST_MSG(x) ((x) << 16)
-#define SMC_HOST_MSG_MASK (0xff << 16)
-#define SMC_HOST_MSG_SHIFT 16
-#define SMC_HOST_RESP(x) ((x) << 24)
-#define SMC_HOST_RESP_MASK (0xff << 24)
-#define SMC_HOST_RESP_SHIFT 24
-
-#define SMC_ISR_FFD8_FFDB 0x218
-
-#define CG_SPLL_FUNC_CNTL 0x600
-#define SPLL_RESET (1 << 0)
-#define SPLL_SLEEP (1 << 1)
-#define SPLL_DIVEN (1 << 2)
-#define SPLL_BYPASS_EN (1 << 3)
-#define SPLL_REF_DIV(x) ((x) << 4)
-#define SPLL_REF_DIV_MASK (0x3f << 4)
-#define SPLL_HILEN(x) ((x) << 12)
-#define SPLL_HILEN_MASK (0xf << 12)
-#define SPLL_LOLEN(x) ((x) << 16)
-#define SPLL_LOLEN_MASK (0xf << 16)
-#define CG_SPLL_FUNC_CNTL_2 0x604
-#define SCLK_MUX_SEL(x) ((x) << 0)
-#define SCLK_MUX_SEL_MASK (0x1ff << 0)
-#define SCLK_MUX_UPDATE (1 << 26)
-#define CG_SPLL_FUNC_CNTL_3 0x608
-#define SPLL_FB_DIV(x) ((x) << 0)
-#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
-#define SPLL_DITHEN (1 << 28)
-#define CG_SPLL_STATUS 0x60c
-#define SPLL_CHG_STATUS (1 << 1)
-
-#define SPLL_CNTL_MODE 0x610
-#define SPLL_DIV_SYNC (1 << 5)
-
-#define MPLL_CNTL_MODE 0x61c
-# define MPLL_MCLK_SEL (1 << 11)
-# define RV730_MPLL_MCLK_SEL (1 << 25)
-
-#define MPLL_AD_FUNC_CNTL 0x624
-#define CLKF(x) ((x) << 0)
-#define CLKF_MASK (0x7f << 0)
-#define CLKR(x) ((x) << 7)
-#define CLKR_MASK (0x1f << 7)
-#define CLKFRAC(x) ((x) << 12)
-#define CLKFRAC_MASK (0x1f << 12)
-#define YCLK_POST_DIV(x) ((x) << 17)
-#define YCLK_POST_DIV_MASK (3 << 17)
-#define IBIAS(x) ((x) << 20)
-#define IBIAS_MASK (0x3ff << 20)
-#define RESET (1 << 30)
-#define PDNB (1 << 31)
-#define MPLL_AD_FUNC_CNTL_2 0x628
-#define BYPASS (1 << 19)
-#define BIAS_GEN_PDNB (1 << 24)
-#define RESET_EN (1 << 25)
-#define VCO_MODE (1 << 29)
-#define MPLL_DQ_FUNC_CNTL 0x62c
-#define MPLL_DQ_FUNC_CNTL_2 0x630
-
-#define GENERAL_PWRMGT 0x63c
-# define GLOBAL_PWRMGT_EN (1 << 0)
-# define STATIC_PM_EN (1 << 1)
-# define THERMAL_PROTECTION_DIS (1 << 2)
-# define THERMAL_PROTECTION_TYPE (1 << 3)
-# define ENABLE_GEN2PCIE (1 << 4)
-# define ENABLE_GEN2XSP (1 << 5)
-# define SW_SMIO_INDEX(x) ((x) << 6)
-# define SW_SMIO_INDEX_MASK (3 << 6)
-# define SW_SMIO_INDEX_SHIFT 6
-# define LOW_VOLT_D2_ACPI (1 << 8)
-# define LOW_VOLT_D3_ACPI (1 << 9)
-# define VOLT_PWRMGT_EN (1 << 10)
-# define BACKBIAS_PAD_EN (1 << 18)
-# define BACKBIAS_VALUE (1 << 19)
-# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
-# define AC_DC_SW (1 << 24)
-
-#define CG_TPC 0x640
-#define SCLK_PWRMGT_CNTL 0x644
-# define SCLK_PWRMGT_OFF (1 << 0)
-# define SCLK_LOW_D1 (1 << 1)
-# define FIR_RESET (1 << 4)
-# define FIR_FORCE_TREND_SEL (1 << 5)
-# define FIR_TREND_MODE (1 << 6)
-# define DYN_GFX_CLK_OFF_EN (1 << 7)
-# define GFX_CLK_FORCE_ON (1 << 8)
-# define GFX_CLK_REQUEST_OFF (1 << 9)
-# define GFX_CLK_FORCE_OFF (1 << 10)
-# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
-# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
-# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
-#define MCLK_PWRMGT_CNTL 0x648
-# define DLL_SPEED(x) ((x) << 0)
-# define DLL_SPEED_MASK (0x1f << 0)
-# define MPLL_PWRMGT_OFF (1 << 5)
-# define DLL_READY (1 << 6)
-# define MC_INT_CNTL (1 << 7)
-# define MRDCKA0_SLEEP (1 << 8)
-# define MRDCKA1_SLEEP (1 << 9)
-# define MRDCKB0_SLEEP (1 << 10)
-# define MRDCKB1_SLEEP (1 << 11)
-# define MRDCKC0_SLEEP (1 << 12)
-# define MRDCKC1_SLEEP (1 << 13)
-# define MRDCKD0_SLEEP (1 << 14)
-# define MRDCKD1_SLEEP (1 << 15)
-# define MRDCKA0_RESET (1 << 16)
-# define MRDCKA1_RESET (1 << 17)
-# define MRDCKB0_RESET (1 << 18)
-# define MRDCKB1_RESET (1 << 19)
-# define MRDCKC0_RESET (1 << 20)
-# define MRDCKC1_RESET (1 << 21)
-# define MRDCKD0_RESET (1 << 22)
-# define MRDCKD1_RESET (1 << 23)
-# define DLL_READY_READ (1 << 24)
-# define USE_DISPLAY_GAP (1 << 25)
-# define USE_DISPLAY_URGENT_NORMAL (1 << 26)
-# define MPLL_TURNOFF_D2 (1 << 28)
-#define DLL_CNTL 0x64c
-# define MRDCKA0_BYPASS (1 << 24)
-# define MRDCKA1_BYPASS (1 << 25)
-# define MRDCKB0_BYPASS (1 << 26)
-# define MRDCKB1_BYPASS (1 << 27)
-# define MRDCKC0_BYPASS (1 << 28)
-# define MRDCKC1_BYPASS (1 << 29)
-# define MRDCKD0_BYPASS (1 << 30)
-# define MRDCKD1_BYPASS (1 << 31)
-
-#define MPLL_TIME 0x654
-# define MPLL_LOCK_TIME(x) ((x) << 0)
-# define MPLL_LOCK_TIME_MASK (0xffff << 0)
-# define MPLL_RESET_TIME(x) ((x) << 16)
-# define MPLL_RESET_TIME_MASK (0xffff << 16)
-
-#define CG_CLKPIN_CNTL 0x660
-# define MUX_TCLK_TO_XCLK (1 << 8)
-# define XTALIN_DIVIDE (1 << 9)
-
-#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
-# define CURRENT_PROFILE_INDEX_MASK (0xf << 4)
-# define CURRENT_PROFILE_INDEX_SHIFT 4
-
-#define S0_VID_LOWER_SMIO_CNTL 0x678
-#define S1_VID_LOWER_SMIO_CNTL 0x67c
-#define S2_VID_LOWER_SMIO_CNTL 0x680
-#define S3_VID_LOWER_SMIO_CNTL 0x684
-
-#define CG_FTV 0x690
-#define CG_FFCT_0 0x694
-# define UTC_0(x) ((x) << 0)
-# define UTC_0_MASK (0x3ff << 0)
-# define DTC_0(x) ((x) << 10)
-# define DTC_0_MASK (0x3ff << 10)
-
-#define CG_BSP 0x6d0
-# define BSP(x) ((x) << 0)
-# define BSP_MASK (0xffff << 0)
-# define BSU(x) ((x) << 16)
-# define BSU_MASK (0xf << 16)
-#define CG_AT 0x6d4
-# define CG_R(x) ((x) << 0)
-# define CG_R_MASK (0xffff << 0)
-# define CG_L(x) ((x) << 16)
-# define CG_L_MASK (0xffff << 16)
-#define CG_GIT 0x6d8
-# define CG_GICST(x) ((x) << 0)
-# define CG_GICST_MASK (0xffff << 0)
-# define CG_GIPOT(x) ((x) << 16)
-# define CG_GIPOT_MASK (0xffff << 16)
-
-#define CG_SSP 0x6e8
-# define SST(x) ((x) << 0)
-# define SST_MASK (0xffff << 0)
-# define SSTU(x) ((x) << 16)
-# define SSTU_MASK (0xf << 16)
-
-#define CG_DISPLAY_GAP_CNTL 0x714
-# define DISP1_GAP(x) ((x) << 0)
-# define DISP1_GAP_MASK (3 << 0)
-# define DISP2_GAP(x) ((x) << 2)
-# define DISP2_GAP_MASK (3 << 2)
-# define VBI_TIMER_COUNT(x) ((x) << 4)
-# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
-# define VBI_TIMER_UNIT(x) ((x) << 20)
-# define VBI_TIMER_UNIT_MASK (7 << 20)
-# define DISP1_GAP_MCHG(x) ((x) << 24)
-# define DISP1_GAP_MCHG_MASK (3 << 24)
-# define DISP2_GAP_MCHG(x) ((x) << 26)
-# define DISP2_GAP_MCHG_MASK (3 << 26)
-
-#define CG_SPLL_SPREAD_SPECTRUM 0x790
-#define SSEN (1 << 0)
-#define CLKS(x) ((x) << 4)
-#define CLKS_MASK (0xfff << 4)
-#define CG_SPLL_SPREAD_SPECTRUM_2 0x794
-#define CLKV(x) ((x) << 0)
-#define CLKV_MASK (0x3ffffff << 0)
-#define CG_MPLL_SPREAD_SPECTRUM 0x798
-#define CG_UPLL_SPREAD_SPECTRUM 0x79c
-# define SSEN_MASK 0x00000001
-
-#define CG_CGTT_LOCAL_0 0x7d0
-#define CG_CGTT_LOCAL_1 0x7d4
-
-#define BIOS_SCRATCH_4 0x1734
-
-#define MC_SEQ_MISC0 0x2a00
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-#define MC_ARB_SQM_RATIO 0x2770
-#define STATE0(x) ((x) << 0)
-#define STATE0_MASK (0xff << 0)
-#define STATE1(x) ((x) << 8)
-#define STATE1_MASK (0xff << 8)
-#define STATE2(x) ((x) << 16)
-#define STATE2_MASK (0xff << 16)
-#define STATE3(x) ((x) << 24)
-#define STATE3_MASK (0xff << 24)
-
-#define MC_ARB_RFSH_RATE 0x27b0
-#define POWERMODE0(x) ((x) << 0)
-#define POWERMODE0_MASK (0xff << 0)
-#define POWERMODE1(x) ((x) << 8)
-#define POWERMODE1_MASK (0xff << 8)
-#define POWERMODE2(x) ((x) << 16)
-#define POWERMODE2_MASK (0xff << 16)
-#define POWERMODE3(x) ((x) << 24)
-#define POWERMODE3_MASK (0xff << 24)
-
-#define CGTS_SM_CTRL_REG 0x9150
-
/* Registers */
#define CB_COLOR0_BASE 0x28040
#define CB_COLOR1_BASE 0x28044
@@ -333,8 +63,8 @@
#define CONFIG_MEMSIZE 0x5428
#define CP_ME_CNTL 0x86D8
-#define CP_ME_HALT (1 << 28)
-#define CP_PFP_HALT (1 << 26)
+#define CP_ME_HALT (1<<28)
+#define CP_PFP_HALT (1<<26)
#define CP_ME_RAM_DATA 0xC160
#define CP_ME_RAM_RADDR 0xC158
#define CP_ME_RAM_WADDR 0xC15C
@@ -383,11 +113,6 @@
#define DMA_TILING_CONFIG 0x3ec8
#define DMA_TILING_CONFIG2 0xd0b8
-/* RV730 only */
-#define UVD_UDEC_TILING_CONFIG 0xef40
-#define UVD_UDEC_DB_TILING_CONFIG 0xef44
-#define UVD_UDEC_DBW_TILING_CONFIG 0xef48
-
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -404,23 +129,6 @@
#define GUI_ACTIVE (1<<31)
#define GRBM_STATUS2 0x8014
-#define CG_THERMAL_CTRL 0x72C
-#define DPM_EVENT_SRC(x) ((x) << 0)
-#define DPM_EVENT_SRC_MASK (7 << 0)
-#define DIG_THERM_DPM(x) ((x) << 14)
-#define DIG_THERM_DPM_MASK 0x003FC000
-#define DIG_THERM_DPM_SHIFT 14
-
-#define CG_THERMAL_INT 0x734
-#define DIG_THERM_INTH(x) ((x) << 8)
-#define DIG_THERM_INTH_MASK 0x0000FF00
-#define DIG_THERM_INTH_SHIFT 8
-#define DIG_THERM_INTL(x) ((x) << 16)
-#define DIG_THERM_INTL_MASK 0x00FF0000
-#define DIG_THERM_INTL_SHIFT 16
-#define THERM_INT_MASK_HIGH (1 << 24)
-#define THERM_INT_MASK_LOW (1 << 25)
-
#define CG_MULT_THERMAL_STATUS 0x740
#define ASIC_T(x) ((x) << 16)
#define ASIC_T_MASK 0x3FF0000
@@ -859,7 +567,7 @@
#define AFMT_VBI_PACKET_CONTROL 0x7608
# define AFMT_GENERIC0_UPDATE (1 << 2)
#define AFMT_INFOFRAME_CONTROL0 0x760c
-# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hdmi regs */
+# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7610
@@ -922,22 +630,7 @@
#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x691c
#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x611c
-/* PCIE indirect regs */
-#define PCIE_P_CNTL 0x40
-# define P_PLL_PWRDN_IN_L1L23 (1 << 3)
-# define P_PLL_BUF_PDNB (1 << 4)
-# define P_PLL_PDNB (1 << 9)
-# define P_ALLOW_PRX_FRONTEND_SHUTOFF (1 << 12)
-/* PCIE PORT regs */
-#define PCIE_LC_CNTL 0xa0
-# define LC_L0S_INACTIVITY(x) ((x) << 8)
-# define LC_L0S_INACTIVITY_MASK (0xf << 8)
-# define LC_L0S_INACTIVITY_SHIFT 8
-# define LC_L1_INACTIVITY(x) ((x) << 12)
-# define LC_L1_INACTIVITY_MASK (0xf << 12)
-# define LC_L1_INACTIVITY_SHIFT 12
-# define LC_PMI_TO_L1_DIS (1 << 16)
-# define LC_ASPM_TO_L1_DIS (1 << 24)
+/* PCIE link stuff */
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
# define LC_LINK_WIDTH_SHIFT 0
@@ -965,9 +658,6 @@
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 8)
# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 3
# define LC_CURRENT_DATA_RATE (1 << 11)
-# define LC_HW_VOLTAGE_IF_CONTROL(x) ((x) << 12)
-# define LC_HW_VOLTAGE_IF_CONTROL_MASK (3 << 12)
-# define LC_HW_VOLTAGE_IF_CONTROL_SHIFT 12
# define LC_VOLTAGE_TIMER_SEL_MASK (0xf << 14)
# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 21)
# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 23)
@@ -978,37 +668,4 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
-/*
- * PM4
- */
-#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
- (((reg) >> 2) & 0xFFFF) | \
- ((n) & 0x3FFF) << 16)
-#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
- (((op) & 0xFF) << 8) | \
- ((n) & 0x3FFF) << 16)
-
-/* UVD */
-#define UVD_SEMA_ADDR_LOW 0xef00
-#define UVD_SEMA_ADDR_HIGH 0xef04
-#define UVD_SEMA_CMD 0xef08
-#define UVD_GPCOM_VCPU_CMD 0xef0c
-#define UVD_GPCOM_VCPU_DATA0 0xef10
-#define UVD_GPCOM_VCPU_DATA1 0xef14
-
-#define UVD_LMI_EXT40_ADDR 0xf498
-#define UVD_VCPU_CHIP_ID 0xf4d4
-#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
-#define UVD_VCPU_CACHE_SIZE0 0xf4dc
-#define UVD_VCPU_CACHE_OFFSET1 0xf4e0
-#define UVD_VCPU_CACHE_SIZE1 0xf4e4
-#define UVD_VCPU_CACHE_OFFSET2 0xf4e8
-#define UVD_VCPU_CACHE_SIZE2 0xf4ec
-#define UVD_LMI_ADDR_EXT 0xf594
-
-#define UVD_RBC_RB_RPTR 0xf690
-#define UVD_RBC_RB_WPTR 0xf694
-
-#define UVD_CONTEXT_ID 0xf6f4
-
#endif
diff --git a/sys/dev/pci/drm/radeon/si.c b/sys/dev/pci/drm/radeon/si.c
index 3d47942e94b..5edbbde1e5e 100644
--- a/sys/dev/pci/drm/radeon/si.c
+++ b/sys/dev/pci/drm/radeon/si.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: si.c,v 1.25 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
@@ -24,1299 +25,39 @@
#include <dev/pci/drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
-#include "radeon_audio.h"
#include <dev/pci/drm/radeon_drm.h>
#include "sid.h"
#include "atom.h"
#include "si_blit_shaders.h"
-#include "clearstate_si.h"
-#include "radeon_ucode.h"
+#define SI_PFP_UCODE_SIZE 2144
+#define SI_PM4_UCODE_SIZE 2144
+#define SI_CE_UCODE_SIZE 2144
+#define SI_RLC_UCODE_SIZE 2048
+#define SI_MC_UCODE_SIZE 7769
MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
MODULE_FIRMWARE("radeon/TAHITI_me.bin");
MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
-MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
-MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
-
-MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
-MODULE_FIRMWARE("radeon/tahiti_me.bin");
-MODULE_FIRMWARE("radeon/tahiti_ce.bin");
-MODULE_FIRMWARE("radeon/tahiti_mc.bin");
-MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
-MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
-MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
-MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
-
-MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
-MODULE_FIRMWARE("radeon/pitcairn_me.bin");
-MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
-MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
-MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
-
MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
MODULE_FIRMWARE("radeon/VERDE_mc.bin");
-MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
-MODULE_FIRMWARE("radeon/VERDE_smc.bin");
-
-MODULE_FIRMWARE("radeon/verde_pfp.bin");
-MODULE_FIRMWARE("radeon/verde_me.bin");
-MODULE_FIRMWARE("radeon/verde_ce.bin");
-MODULE_FIRMWARE("radeon/verde_mc.bin");
-MODULE_FIRMWARE("radeon/verde_rlc.bin");
-MODULE_FIRMWARE("radeon/verde_smc.bin");
-
-MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
-MODULE_FIRMWARE("radeon/OLAND_me.bin");
-MODULE_FIRMWARE("radeon/OLAND_ce.bin");
-MODULE_FIRMWARE("radeon/OLAND_mc.bin");
-MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
-MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
-MODULE_FIRMWARE("radeon/OLAND_smc.bin");
-
-MODULE_FIRMWARE("radeon/oland_pfp.bin");
-MODULE_FIRMWARE("radeon/oland_me.bin");
-MODULE_FIRMWARE("radeon/oland_ce.bin");
-MODULE_FIRMWARE("radeon/oland_mc.bin");
-MODULE_FIRMWARE("radeon/oland_rlc.bin");
-MODULE_FIRMWARE("radeon/oland_smc.bin");
-
-MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
-MODULE_FIRMWARE("radeon/HAINAN_me.bin");
-MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
-MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
-MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
-MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
-MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
-
-MODULE_FIRMWARE("radeon/hainan_pfp.bin");
-MODULE_FIRMWARE("radeon/hainan_me.bin");
-MODULE_FIRMWARE("radeon/hainan_ce.bin");
-MODULE_FIRMWARE("radeon/hainan_mc.bin");
-MODULE_FIRMWARE("radeon/hainan_rlc.bin");
-MODULE_FIRMWARE("radeon/hainan_smc.bin");
-
-static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
-static void si_pcie_gen3_enable(struct radeon_device *rdev);
-static void si_program_aspm(struct radeon_device *rdev);
-extern void sumo_rlc_fini(struct radeon_device *rdev);
-extern int sumo_rlc_init(struct radeon_device *rdev);
+
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
-extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
-extern bool evergreen_is_display_hung(struct radeon_device *rdev);
-static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
- bool enable);
-static void si_init_pg(struct radeon_device *rdev);
-static void si_init_cg(struct radeon_device *rdev);
-static void si_fini_pg(struct radeon_device *rdev);
-static void si_fini_cg(struct radeon_device *rdev);
-static void si_rlc_stop(struct radeon_device *rdev);
-
-static const u32 verde_rlc_save_restore_register_list[] =
-{
- (0x8000 << 16) | (0x98f4 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x98f4 >> 2),
- 0x00000000,
- (0x8000 << 16) | (0xe80 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0xe80 >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x89bc >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x89bc >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x8c1c >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x8c1c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x98f0 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0xe7c >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x9148 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x9148 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9150 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x897c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8d8c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0xac54 >> 2),
- 0X00000000,
- 0x3,
- (0x9c00 << 16) | (0x98f8 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9910 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9914 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9918 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x991c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9920 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9924 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9928 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x992c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9930 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9934 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9938 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x993c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9940 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9944 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9948 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x994c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9950 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9954 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9958 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x995c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9960 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9964 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9968 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x996c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9970 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9974 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9978 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x997c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9980 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9984 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9988 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x998c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8c00 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8c14 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8c04 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8c08 >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x9b7c >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x9b7c >> 2),
- 0x00000000,
- (0x8000 << 16) | (0xe84 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0xe84 >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x89c0 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x89c0 >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x914c >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x914c >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x8c20 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x8c20 >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x9354 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x9354 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9060 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9364 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9100 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x913c >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x90e0 >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x90e4 >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x90e8 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x90e0 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x90e4 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x90e8 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8bcc >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8b24 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x88c4 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8e50 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8c0c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8e58 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8e5c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9508 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x950c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9494 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0xac0c >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0xac10 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0xac14 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0xae00 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0xac08 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x88d4 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x88c8 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x88cc >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x89b0 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8b10 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x8a14 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9830 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9834 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9838 >> 2),
- 0x00000000,
- (0x9c00 << 16) | (0x9a10 >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x9870 >> 2),
- 0x00000000,
- (0x8000 << 16) | (0x9874 >> 2),
- 0x00000000,
- (0x8001 << 16) | (0x9870 >> 2),
- 0x00000000,
- (0x8001 << 16) | (0x9874 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x9870 >> 2),
- 0x00000000,
- (0x8040 << 16) | (0x9874 >> 2),
- 0x00000000,
- (0x8041 << 16) | (0x9870 >> 2),
- 0x00000000,
- (0x8041 << 16) | (0x9874 >> 2),
- 0x00000000,
- 0x00000000
-};
-
-static const u32 tahiti_golden_rlc_registers[] =
-{
- 0xc424, 0xffffffff, 0x00601005,
- 0xc47c, 0xffffffff, 0x10104040,
- 0xc488, 0xffffffff, 0x0100000a,
- 0xc314, 0xffffffff, 0x00000800,
- 0xc30c, 0xffffffff, 0x800000f4,
- 0xf4a8, 0xffffffff, 0x00000000
-};
-
-static const u32 tahiti_golden_registers[] =
-{
- 0x9a10, 0x00010000, 0x00018208,
- 0x9830, 0xffffffff, 0x00000000,
- 0x9834, 0xf00fffff, 0x00000400,
- 0x9838, 0x0002021c, 0x00020200,
- 0xc78, 0x00000080, 0x00000000,
- 0xd030, 0x000300c0, 0x00800040,
- 0xd830, 0x000300c0, 0x00800040,
- 0x5bb0, 0x000000f0, 0x00000070,
- 0x5bc0, 0x00200000, 0x50100000,
- 0x7030, 0x31000311, 0x00000011,
- 0x277c, 0x00000003, 0x000007ff,
- 0x240c, 0x000007ff, 0x00000000,
- 0x8a14, 0xf000001f, 0x00000007,
- 0x8b24, 0xffffffff, 0x00ffffff,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x28a4c, 0x07ffffff, 0x4e000000,
- 0x28350, 0x3f3f3fff, 0x2a00126a,
- 0x30, 0x000000ff, 0x0040,
- 0x34, 0x00000040, 0x00004040,
- 0x9100, 0x07ffffff, 0x03000000,
- 0x8e88, 0x01ff1f3f, 0x00000000,
- 0x8e84, 0x01ff1f3f, 0x00000000,
- 0x9060, 0x0000007f, 0x00000020,
- 0x9508, 0x00010000, 0x00010000,
- 0xac14, 0x00000200, 0x000002fb,
- 0xac10, 0xffffffff, 0x0000543b,
- 0xac0c, 0xffffffff, 0xa9210876,
- 0x88d0, 0xffffffff, 0x000fff40,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x1410, 0x20000000, 0x20fffed8,
- 0x15c0, 0x000c0fc0, 0x000c0400
-};
-
-static const u32 tahiti_golden_registers2[] =
-{
- 0xc64, 0x00000001, 0x00000001
-};
-
-static const u32 pitcairn_golden_rlc_registers[] =
-{
- 0xc424, 0xffffffff, 0x00601004,
- 0xc47c, 0xffffffff, 0x10102020,
- 0xc488, 0xffffffff, 0x01000020,
- 0xc314, 0xffffffff, 0x00000800,
- 0xc30c, 0xffffffff, 0x800000a4
-};
-
-static const u32 pitcairn_golden_registers[] =
-{
- 0x9a10, 0x00010000, 0x00018208,
- 0x9830, 0xffffffff, 0x00000000,
- 0x9834, 0xf00fffff, 0x00000400,
- 0x9838, 0x0002021c, 0x00020200,
- 0xc78, 0x00000080, 0x00000000,
- 0xd030, 0x000300c0, 0x00800040,
- 0xd830, 0x000300c0, 0x00800040,
- 0x5bb0, 0x000000f0, 0x00000070,
- 0x5bc0, 0x00200000, 0x50100000,
- 0x7030, 0x31000311, 0x00000011,
- 0x2ae4, 0x00073ffe, 0x000022a2,
- 0x240c, 0x000007ff, 0x00000000,
- 0x8a14, 0xf000001f, 0x00000007,
- 0x8b24, 0xffffffff, 0x00ffffff,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x28a4c, 0x07ffffff, 0x4e000000,
- 0x28350, 0x3f3f3fff, 0x2a00126a,
- 0x30, 0x000000ff, 0x0040,
- 0x34, 0x00000040, 0x00004040,
- 0x9100, 0x07ffffff, 0x03000000,
- 0x9060, 0x0000007f, 0x00000020,
- 0x9508, 0x00010000, 0x00010000,
- 0xac14, 0x000003ff, 0x000000f7,
- 0xac10, 0xffffffff, 0x00000000,
- 0xac0c, 0xffffffff, 0x32761054,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x15c0, 0x000c0fc0, 0x000c0400
-};
-
-static const u32 verde_golden_rlc_registers[] =
-{
- 0xc424, 0xffffffff, 0x033f1005,
- 0xc47c, 0xffffffff, 0x10808020,
- 0xc488, 0xffffffff, 0x00800008,
- 0xc314, 0xffffffff, 0x00001000,
- 0xc30c, 0xffffffff, 0x80010014
-};
-
-static const u32 verde_golden_registers[] =
-{
- 0x9a10, 0x00010000, 0x00018208,
- 0x9830, 0xffffffff, 0x00000000,
- 0x9834, 0xf00fffff, 0x00000400,
- 0x9838, 0x0002021c, 0x00020200,
- 0xc78, 0x00000080, 0x00000000,
- 0xd030, 0x000300c0, 0x00800040,
- 0xd030, 0x000300c0, 0x00800040,
- 0xd830, 0x000300c0, 0x00800040,
- 0xd830, 0x000300c0, 0x00800040,
- 0x5bb0, 0x000000f0, 0x00000070,
- 0x5bc0, 0x00200000, 0x50100000,
- 0x7030, 0x31000311, 0x00000011,
- 0x2ae4, 0x00073ffe, 0x000022a2,
- 0x2ae4, 0x00073ffe, 0x000022a2,
- 0x2ae4, 0x00073ffe, 0x000022a2,
- 0x240c, 0x000007ff, 0x00000000,
- 0x240c, 0x000007ff, 0x00000000,
- 0x240c, 0x000007ff, 0x00000000,
- 0x8a14, 0xf000001f, 0x00000007,
- 0x8a14, 0xf000001f, 0x00000007,
- 0x8a14, 0xf000001f, 0x00000007,
- 0x8b24, 0xffffffff, 0x00ffffff,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x28a4c, 0x07ffffff, 0x4e000000,
- 0x28350, 0x3f3f3fff, 0x0000124a,
- 0x28350, 0x3f3f3fff, 0x0000124a,
- 0x28350, 0x3f3f3fff, 0x0000124a,
- 0x30, 0x000000ff, 0x0040,
- 0x34, 0x00000040, 0x00004040,
- 0x9100, 0x07ffffff, 0x03000000,
- 0x9100, 0x07ffffff, 0x03000000,
- 0x8e88, 0x01ff1f3f, 0x00000000,
- 0x8e88, 0x01ff1f3f, 0x00000000,
- 0x8e88, 0x01ff1f3f, 0x00000000,
- 0x8e84, 0x01ff1f3f, 0x00000000,
- 0x8e84, 0x01ff1f3f, 0x00000000,
- 0x8e84, 0x01ff1f3f, 0x00000000,
- 0x9060, 0x0000007f, 0x00000020,
- 0x9508, 0x00010000, 0x00010000,
- 0xac14, 0x000003ff, 0x00000003,
- 0xac14, 0x000003ff, 0x00000003,
- 0xac14, 0x000003ff, 0x00000003,
- 0xac10, 0xffffffff, 0x00000000,
- 0xac10, 0xffffffff, 0x00000000,
- 0xac10, 0xffffffff, 0x00000000,
- 0xac0c, 0xffffffff, 0x00001032,
- 0xac0c, 0xffffffff, 0x00001032,
- 0xac0c, 0xffffffff, 0x00001032,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x15c0, 0x000c0fc0, 0x000c0400
-};
-
-static const u32 oland_golden_rlc_registers[] =
-{
- 0xc424, 0xffffffff, 0x00601005,
- 0xc47c, 0xffffffff, 0x10104040,
- 0xc488, 0xffffffff, 0x0100000a,
- 0xc314, 0xffffffff, 0x00000800,
- 0xc30c, 0xffffffff, 0x800000f4
-};
-
-static const u32 oland_golden_registers[] =
-{
- 0x9a10, 0x00010000, 0x00018208,
- 0x9830, 0xffffffff, 0x00000000,
- 0x9834, 0xf00fffff, 0x00000400,
- 0x9838, 0x0002021c, 0x00020200,
- 0xc78, 0x00000080, 0x00000000,
- 0xd030, 0x000300c0, 0x00800040,
- 0xd830, 0x000300c0, 0x00800040,
- 0x5bb0, 0x000000f0, 0x00000070,
- 0x5bc0, 0x00200000, 0x50100000,
- 0x7030, 0x31000311, 0x00000011,
- 0x2ae4, 0x00073ffe, 0x000022a2,
- 0x240c, 0x000007ff, 0x00000000,
- 0x8a14, 0xf000001f, 0x00000007,
- 0x8b24, 0xffffffff, 0x00ffffff,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x28a4c, 0x07ffffff, 0x4e000000,
- 0x28350, 0x3f3f3fff, 0x00000082,
- 0x30, 0x000000ff, 0x0040,
- 0x34, 0x00000040, 0x00004040,
- 0x9100, 0x07ffffff, 0x03000000,
- 0x9060, 0x0000007f, 0x00000020,
- 0x9508, 0x00010000, 0x00010000,
- 0xac14, 0x000003ff, 0x000000f3,
- 0xac10, 0xffffffff, 0x00000000,
- 0xac0c, 0xffffffff, 0x00003210,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x15c0, 0x000c0fc0, 0x000c0400
-};
-
-static const u32 hainan_golden_registers[] =
-{
- 0x9a10, 0x00010000, 0x00018208,
- 0x9830, 0xffffffff, 0x00000000,
- 0x9834, 0xf00fffff, 0x00000400,
- 0x9838, 0x0002021c, 0x00020200,
- 0xd0c0, 0xff000fff, 0x00000100,
- 0xd030, 0x000300c0, 0x00800040,
- 0xd8c0, 0xff000fff, 0x00000100,
- 0xd830, 0x000300c0, 0x00800040,
- 0x2ae4, 0x00073ffe, 0x000022a2,
- 0x240c, 0x000007ff, 0x00000000,
- 0x8a14, 0xf000001f, 0x00000007,
- 0x8b24, 0xffffffff, 0x00ffffff,
- 0x8b10, 0x0000ff0f, 0x00000000,
- 0x28a4c, 0x07ffffff, 0x4e000000,
- 0x28350, 0x3f3f3fff, 0x00000000,
- 0x30, 0x000000ff, 0x0040,
- 0x34, 0x00000040, 0x00004040,
- 0x9100, 0x03e00000, 0x03600000,
- 0x9060, 0x0000007f, 0x00000020,
- 0x9508, 0x00010000, 0x00010000,
- 0xac14, 0x000003ff, 0x000000f1,
- 0xac10, 0xffffffff, 0x00000000,
- 0xac0c, 0xffffffff, 0x00003210,
- 0x88d4, 0x0000001f, 0x00000010,
- 0x15c0, 0x000c0fc0, 0x000c0400
-};
-
-static const u32 hainan_golden_registers2[] =
-{
- 0x98f8, 0xffffffff, 0x02010001
-};
-
-static const u32 tahiti_mgcg_cgcg_init[] =
-{
- 0xc400, 0xffffffff, 0xfffffffc,
- 0x802c, 0xffffffff, 0xe0000000,
- 0x9a60, 0xffffffff, 0x00000100,
- 0x92a4, 0xffffffff, 0x00000100,
- 0xc164, 0xffffffff, 0x00000100,
- 0x9774, 0xffffffff, 0x00000100,
- 0x8984, 0xffffffff, 0x06000100,
- 0x8a18, 0xffffffff, 0x00000100,
- 0x92a0, 0xffffffff, 0x00000100,
- 0xc380, 0xffffffff, 0x00000100,
- 0x8b28, 0xffffffff, 0x00000100,
- 0x9144, 0xffffffff, 0x00000100,
- 0x8d88, 0xffffffff, 0x00000100,
- 0x8d8c, 0xffffffff, 0x00000100,
- 0x9030, 0xffffffff, 0x00000100,
- 0x9034, 0xffffffff, 0x00000100,
- 0x9038, 0xffffffff, 0x00000100,
- 0x903c, 0xffffffff, 0x00000100,
- 0xad80, 0xffffffff, 0x00000100,
- 0xac54, 0xffffffff, 0x00000100,
- 0x897c, 0xffffffff, 0x06000100,
- 0x9868, 0xffffffff, 0x00000100,
- 0x9510, 0xffffffff, 0x00000100,
- 0xaf04, 0xffffffff, 0x00000100,
- 0xae04, 0xffffffff, 0x00000100,
- 0x949c, 0xffffffff, 0x00000100,
- 0x802c, 0xffffffff, 0xe0000000,
- 0x9160, 0xffffffff, 0x00010000,
- 0x9164, 0xffffffff, 0x00030002,
- 0x9168, 0xffffffff, 0x00040007,
- 0x916c, 0xffffffff, 0x00060005,
- 0x9170, 0xffffffff, 0x00090008,
- 0x9174, 0xffffffff, 0x00020001,
- 0x9178, 0xffffffff, 0x00040003,
- 0x917c, 0xffffffff, 0x00000007,
- 0x9180, 0xffffffff, 0x00060005,
- 0x9184, 0xffffffff, 0x00090008,
- 0x9188, 0xffffffff, 0x00030002,
- 0x918c, 0xffffffff, 0x00050004,
- 0x9190, 0xffffffff, 0x00000008,
- 0x9194, 0xffffffff, 0x00070006,
- 0x9198, 0xffffffff, 0x000a0009,
- 0x919c, 0xffffffff, 0x00040003,
- 0x91a0, 0xffffffff, 0x00060005,
- 0x91a4, 0xffffffff, 0x00000009,
- 0x91a8, 0xffffffff, 0x00080007,
- 0x91ac, 0xffffffff, 0x000b000a,
- 0x91b0, 0xffffffff, 0x00050004,
- 0x91b4, 0xffffffff, 0x00070006,
- 0x91b8, 0xffffffff, 0x0008000b,
- 0x91bc, 0xffffffff, 0x000a0009,
- 0x91c0, 0xffffffff, 0x000d000c,
- 0x91c4, 0xffffffff, 0x00060005,
- 0x91c8, 0xffffffff, 0x00080007,
- 0x91cc, 0xffffffff, 0x0000000b,
- 0x91d0, 0xffffffff, 0x000a0009,
- 0x91d4, 0xffffffff, 0x000d000c,
- 0x91d8, 0xffffffff, 0x00070006,
- 0x91dc, 0xffffffff, 0x00090008,
- 0x91e0, 0xffffffff, 0x0000000c,
- 0x91e4, 0xffffffff, 0x000b000a,
- 0x91e8, 0xffffffff, 0x000e000d,
- 0x91ec, 0xffffffff, 0x00080007,
- 0x91f0, 0xffffffff, 0x000a0009,
- 0x91f4, 0xffffffff, 0x0000000d,
- 0x91f8, 0xffffffff, 0x000c000b,
- 0x91fc, 0xffffffff, 0x000f000e,
- 0x9200, 0xffffffff, 0x00090008,
- 0x9204, 0xffffffff, 0x000b000a,
- 0x9208, 0xffffffff, 0x000c000f,
- 0x920c, 0xffffffff, 0x000e000d,
- 0x9210, 0xffffffff, 0x00110010,
- 0x9214, 0xffffffff, 0x000a0009,
- 0x9218, 0xffffffff, 0x000c000b,
- 0x921c, 0xffffffff, 0x0000000f,
- 0x9220, 0xffffffff, 0x000e000d,
- 0x9224, 0xffffffff, 0x00110010,
- 0x9228, 0xffffffff, 0x000b000a,
- 0x922c, 0xffffffff, 0x000d000c,
- 0x9230, 0xffffffff, 0x00000010,
- 0x9234, 0xffffffff, 0x000f000e,
- 0x9238, 0xffffffff, 0x00120011,
- 0x923c, 0xffffffff, 0x000c000b,
- 0x9240, 0xffffffff, 0x000e000d,
- 0x9244, 0xffffffff, 0x00000011,
- 0x9248, 0xffffffff, 0x0010000f,
- 0x924c, 0xffffffff, 0x00130012,
- 0x9250, 0xffffffff, 0x000d000c,
- 0x9254, 0xffffffff, 0x000f000e,
- 0x9258, 0xffffffff, 0x00100013,
- 0x925c, 0xffffffff, 0x00120011,
- 0x9260, 0xffffffff, 0x00150014,
- 0x9264, 0xffffffff, 0x000e000d,
- 0x9268, 0xffffffff, 0x0010000f,
- 0x926c, 0xffffffff, 0x00000013,
- 0x9270, 0xffffffff, 0x00120011,
- 0x9274, 0xffffffff, 0x00150014,
- 0x9278, 0xffffffff, 0x000f000e,
- 0x927c, 0xffffffff, 0x00110010,
- 0x9280, 0xffffffff, 0x00000014,
- 0x9284, 0xffffffff, 0x00130012,
- 0x9288, 0xffffffff, 0x00160015,
- 0x928c, 0xffffffff, 0x0010000f,
- 0x9290, 0xffffffff, 0x00120011,
- 0x9294, 0xffffffff, 0x00000015,
- 0x9298, 0xffffffff, 0x00140013,
- 0x929c, 0xffffffff, 0x00170016,
- 0x9150, 0xffffffff, 0x96940200,
- 0x8708, 0xffffffff, 0x00900100,
- 0xc478, 0xffffffff, 0x00000080,
- 0xc404, 0xffffffff, 0x0020003f,
- 0x30, 0xffffffff, 0x0000001c,
- 0x34, 0x000f0000, 0x000f0000,
- 0x160c, 0xffffffff, 0x00000100,
- 0x1024, 0xffffffff, 0x00000100,
- 0x102c, 0x00000101, 0x00000000,
- 0x20a8, 0xffffffff, 0x00000104,
- 0x264c, 0x000c0000, 0x000c0000,
- 0x2648, 0x000c0000, 0x000c0000,
- 0x55e4, 0xff000fff, 0x00000100,
- 0x55e8, 0x00000001, 0x00000001,
- 0x2f50, 0x00000001, 0x00000001,
- 0x30cc, 0xc0000fff, 0x00000104,
- 0xc1e4, 0x00000001, 0x00000001,
- 0xd0c0, 0xfffffff0, 0x00000100,
- 0xd8c0, 0xfffffff0, 0x00000100
-};
-
-static const u32 pitcairn_mgcg_cgcg_init[] =
-{
- 0xc400, 0xffffffff, 0xfffffffc,
- 0x802c, 0xffffffff, 0xe0000000,
- 0x9a60, 0xffffffff, 0x00000100,
- 0x92a4, 0xffffffff, 0x00000100,
- 0xc164, 0xffffffff, 0x00000100,
- 0x9774, 0xffffffff, 0x00000100,
- 0x8984, 0xffffffff, 0x06000100,
- 0x8a18, 0xffffffff, 0x00000100,
- 0x92a0, 0xffffffff, 0x00000100,
- 0xc380, 0xffffffff, 0x00000100,
- 0x8b28, 0xffffffff, 0x00000100,
- 0x9144, 0xffffffff, 0x00000100,
- 0x8d88, 0xffffffff, 0x00000100,
- 0x8d8c, 0xffffffff, 0x00000100,
- 0x9030, 0xffffffff, 0x00000100,
- 0x9034, 0xffffffff, 0x00000100,
- 0x9038, 0xffffffff, 0x00000100,
- 0x903c, 0xffffffff, 0x00000100,
- 0xad80, 0xffffffff, 0x00000100,
- 0xac54, 0xffffffff, 0x00000100,
- 0x897c, 0xffffffff, 0x06000100,
- 0x9868, 0xffffffff, 0x00000100,
- 0x9510, 0xffffffff, 0x00000100,
- 0xaf04, 0xffffffff, 0x00000100,
- 0xae04, 0xffffffff, 0x00000100,
- 0x949c, 0xffffffff, 0x00000100,
- 0x802c, 0xffffffff, 0xe0000000,
- 0x9160, 0xffffffff, 0x00010000,
- 0x9164, 0xffffffff, 0x00030002,
- 0x9168, 0xffffffff, 0x00040007,
- 0x916c, 0xffffffff, 0x00060005,
- 0x9170, 0xffffffff, 0x00090008,
- 0x9174, 0xffffffff, 0x00020001,
- 0x9178, 0xffffffff, 0x00040003,
- 0x917c, 0xffffffff, 0x00000007,
- 0x9180, 0xffffffff, 0x00060005,
- 0x9184, 0xffffffff, 0x00090008,
- 0x9188, 0xffffffff, 0x00030002,
- 0x918c, 0xffffffff, 0x00050004,
- 0x9190, 0xffffffff, 0x00000008,
- 0x9194, 0xffffffff, 0x00070006,
- 0x9198, 0xffffffff, 0x000a0009,
- 0x919c, 0xffffffff, 0x00040003,
- 0x91a0, 0xffffffff, 0x00060005,
- 0x91a4, 0xffffffff, 0x00000009,
- 0x91a8, 0xffffffff, 0x00080007,
- 0x91ac, 0xffffffff, 0x000b000a,
- 0x91b0, 0xffffffff, 0x00050004,
- 0x91b4, 0xffffffff, 0x00070006,
- 0x91b8, 0xffffffff, 0x0008000b,
- 0x91bc, 0xffffffff, 0x000a0009,
- 0x91c0, 0xffffffff, 0x000d000c,
- 0x9200, 0xffffffff, 0x00090008,
- 0x9204, 0xffffffff, 0x000b000a,
- 0x9208, 0xffffffff, 0x000c000f,
- 0x920c, 0xffffffff, 0x000e000d,
- 0x9210, 0xffffffff, 0x00110010,
- 0x9214, 0xffffffff, 0x000a0009,
- 0x9218, 0xffffffff, 0x000c000b,
- 0x921c, 0xffffffff, 0x0000000f,
- 0x9220, 0xffffffff, 0x000e000d,
- 0x9224, 0xffffffff, 0x00110010,
- 0x9228, 0xffffffff, 0x000b000a,
- 0x922c, 0xffffffff, 0x000d000c,
- 0x9230, 0xffffffff, 0x00000010,
- 0x9234, 0xffffffff, 0x000f000e,
- 0x9238, 0xffffffff, 0x00120011,
- 0x923c, 0xffffffff, 0x000c000b,
- 0x9240, 0xffffffff, 0x000e000d,
- 0x9244, 0xffffffff, 0x00000011,
- 0x9248, 0xffffffff, 0x0010000f,
- 0x924c, 0xffffffff, 0x00130012,
- 0x9250, 0xffffffff, 0x000d000c,
- 0x9254, 0xffffffff, 0x000f000e,
- 0x9258, 0xffffffff, 0x00100013,
- 0x925c, 0xffffffff, 0x00120011,
- 0x9260, 0xffffffff, 0x00150014,
- 0x9150, 0xffffffff, 0x96940200,
- 0x8708, 0xffffffff, 0x00900100,
- 0xc478, 0xffffffff, 0x00000080,
- 0xc404, 0xffffffff, 0x0020003f,
- 0x30, 0xffffffff, 0x0000001c,
- 0x34, 0x000f0000, 0x000f0000,
- 0x160c, 0xffffffff, 0x00000100,
- 0x1024, 0xffffffff, 0x00000100,
- 0x102c, 0x00000101, 0x00000000,
- 0x20a8, 0xffffffff, 0x00000104,
- 0x55e4, 0xff000fff, 0x00000100,
- 0x55e8, 0x00000001, 0x00000001,
- 0x2f50, 0x00000001, 0x00000001,
- 0x30cc, 0xc0000fff, 0x00000104,
- 0xc1e4, 0x00000001, 0x00000001,
- 0xd0c0, 0xfffffff0, 0x00000100,
- 0xd8c0, 0xfffffff0, 0x00000100
-};
-
-static const u32 verde_mgcg_cgcg_init[] =
-{
- 0xc400, 0xffffffff, 0xfffffffc,
- 0x802c, 0xffffffff, 0xe0000000,
- 0x9a60, 0xffffffff, 0x00000100,
- 0x92a4, 0xffffffff, 0x00000100,
- 0xc164, 0xffffffff, 0x00000100,
- 0x9774, 0xffffffff, 0x00000100,
- 0x8984, 0xffffffff, 0x06000100,
- 0x8a18, 0xffffffff, 0x00000100,
- 0x92a0, 0xffffffff, 0x00000100,
- 0xc380, 0xffffffff, 0x00000100,
- 0x8b28, 0xffffffff, 0x00000100,
- 0x9144, 0xffffffff, 0x00000100,
- 0x8d88, 0xffffffff, 0x00000100,
- 0x8d8c, 0xffffffff, 0x00000100,
- 0x9030, 0xffffffff, 0x00000100,
- 0x9034, 0xffffffff, 0x00000100,
- 0x9038, 0xffffffff, 0x00000100,
- 0x903c, 0xffffffff, 0x00000100,
- 0xad80, 0xffffffff, 0x00000100,
- 0xac54, 0xffffffff, 0x00000100,
- 0x897c, 0xffffffff, 0x06000100,
- 0x9868, 0xffffffff, 0x00000100,
- 0x9510, 0xffffffff, 0x00000100,
- 0xaf04, 0xffffffff, 0x00000100,
- 0xae04, 0xffffffff, 0x00000100,
- 0x949c, 0xffffffff, 0x00000100,
- 0x802c, 0xffffffff, 0xe0000000,
- 0x9160, 0xffffffff, 0x00010000,
- 0x9164, 0xffffffff, 0x00030002,
- 0x9168, 0xffffffff, 0x00040007,
- 0x916c, 0xffffffff, 0x00060005,
- 0x9170, 0xffffffff, 0x00090008,
- 0x9174, 0xffffffff, 0x00020001,
- 0x9178, 0xffffffff, 0x00040003,
- 0x917c, 0xffffffff, 0x00000007,
- 0x9180, 0xffffffff, 0x00060005,
- 0x9184, 0xffffffff, 0x00090008,
- 0x9188, 0xffffffff, 0x00030002,
- 0x918c, 0xffffffff, 0x00050004,
- 0x9190, 0xffffffff, 0x00000008,
- 0x9194, 0xffffffff, 0x00070006,
- 0x9198, 0xffffffff, 0x000a0009,
- 0x919c, 0xffffffff, 0x00040003,
- 0x91a0, 0xffffffff, 0x00060005,
- 0x91a4, 0xffffffff, 0x00000009,
- 0x91a8, 0xffffffff, 0x00080007,
- 0x91ac, 0xffffffff, 0x000b000a,
- 0x91b0, 0xffffffff, 0x00050004,
- 0x91b4, 0xffffffff, 0x00070006,
- 0x91b8, 0xffffffff, 0x0008000b,
- 0x91bc, 0xffffffff, 0x000a0009,
- 0x91c0, 0xffffffff, 0x000d000c,
- 0x9200, 0xffffffff, 0x00090008,
- 0x9204, 0xffffffff, 0x000b000a,
- 0x9208, 0xffffffff, 0x000c000f,
- 0x920c, 0xffffffff, 0x000e000d,
- 0x9210, 0xffffffff, 0x00110010,
- 0x9214, 0xffffffff, 0x000a0009,
- 0x9218, 0xffffffff, 0x000c000b,
- 0x921c, 0xffffffff, 0x0000000f,
- 0x9220, 0xffffffff, 0x000e000d,
- 0x9224, 0xffffffff, 0x00110010,
- 0x9228, 0xffffffff, 0x000b000a,
- 0x922c, 0xffffffff, 0x000d000c,
- 0x9230, 0xffffffff, 0x00000010,
- 0x9234, 0xffffffff, 0x000f000e,
- 0x9238, 0xffffffff, 0x00120011,
- 0x923c, 0xffffffff, 0x000c000b,
- 0x9240, 0xffffffff, 0x000e000d,
- 0x9244, 0xffffffff, 0x00000011,
- 0x9248, 0xffffffff, 0x0010000f,
- 0x924c, 0xffffffff, 0x00130012,
- 0x9250, 0xffffffff, 0x000d000c,
- 0x9254, 0xffffffff, 0x000f000e,
- 0x9258, 0xffffffff, 0x00100013,
- 0x925c, 0xffffffff, 0x00120011,
- 0x9260, 0xffffffff, 0x00150014,
- 0x9150, 0xffffffff, 0x96940200,
- 0x8708, 0xffffffff, 0x00900100,
- 0xc478, 0xffffffff, 0x00000080,
- 0xc404, 0xffffffff, 0x0020003f,
- 0x30, 0xffffffff, 0x0000001c,
- 0x34, 0x000f0000, 0x000f0000,
- 0x160c, 0xffffffff, 0x00000100,
- 0x1024, 0xffffffff, 0x00000100,
- 0x102c, 0x00000101, 0x00000000,
- 0x20a8, 0xffffffff, 0x00000104,
- 0x264c, 0x000c0000, 0x000c0000,
- 0x2648, 0x000c0000, 0x000c0000,
- 0x55e4, 0xff000fff, 0x00000100,
- 0x55e8, 0x00000001, 0x00000001,
- 0x2f50, 0x00000001, 0x00000001,
- 0x30cc, 0xc0000fff, 0x00000104,
- 0xc1e4, 0x00000001, 0x00000001,
- 0xd0c0, 0xfffffff0, 0x00000100,
- 0xd8c0, 0xfffffff0, 0x00000100
-};
-
-static const u32 oland_mgcg_cgcg_init[] =
-{
- 0xc400, 0xffffffff, 0xfffffffc,
- 0x802c, 0xffffffff, 0xe0000000,
- 0x9a60, 0xffffffff, 0x00000100,
- 0x92a4, 0xffffffff, 0x00000100,
- 0xc164, 0xffffffff, 0x00000100,
- 0x9774, 0xffffffff, 0x00000100,
- 0x8984, 0xffffffff, 0x06000100,
- 0x8a18, 0xffffffff, 0x00000100,
- 0x92a0, 0xffffffff, 0x00000100,
- 0xc380, 0xffffffff, 0x00000100,
- 0x8b28, 0xffffffff, 0x00000100,
- 0x9144, 0xffffffff, 0x00000100,
- 0x8d88, 0xffffffff, 0x00000100,
- 0x8d8c, 0xffffffff, 0x00000100,
- 0x9030, 0xffffffff, 0x00000100,
- 0x9034, 0xffffffff, 0x00000100,
- 0x9038, 0xffffffff, 0x00000100,
- 0x903c, 0xffffffff, 0x00000100,
- 0xad80, 0xffffffff, 0x00000100,
- 0xac54, 0xffffffff, 0x00000100,
- 0x897c, 0xffffffff, 0x06000100,
- 0x9868, 0xffffffff, 0x00000100,
- 0x9510, 0xffffffff, 0x00000100,
- 0xaf04, 0xffffffff, 0x00000100,
- 0xae04, 0xffffffff, 0x00000100,
- 0x949c, 0xffffffff, 0x00000100,
- 0x802c, 0xffffffff, 0xe0000000,
- 0x9160, 0xffffffff, 0x00010000,
- 0x9164, 0xffffffff, 0x00030002,
- 0x9168, 0xffffffff, 0x00040007,
- 0x916c, 0xffffffff, 0x00060005,
- 0x9170, 0xffffffff, 0x00090008,
- 0x9174, 0xffffffff, 0x00020001,
- 0x9178, 0xffffffff, 0x00040003,
- 0x917c, 0xffffffff, 0x00000007,
- 0x9180, 0xffffffff, 0x00060005,
- 0x9184, 0xffffffff, 0x00090008,
- 0x9188, 0xffffffff, 0x00030002,
- 0x918c, 0xffffffff, 0x00050004,
- 0x9190, 0xffffffff, 0x00000008,
- 0x9194, 0xffffffff, 0x00070006,
- 0x9198, 0xffffffff, 0x000a0009,
- 0x919c, 0xffffffff, 0x00040003,
- 0x91a0, 0xffffffff, 0x00060005,
- 0x91a4, 0xffffffff, 0x00000009,
- 0x91a8, 0xffffffff, 0x00080007,
- 0x91ac, 0xffffffff, 0x000b000a,
- 0x91b0, 0xffffffff, 0x00050004,
- 0x91b4, 0xffffffff, 0x00070006,
- 0x91b8, 0xffffffff, 0x0008000b,
- 0x91bc, 0xffffffff, 0x000a0009,
- 0x91c0, 0xffffffff, 0x000d000c,
- 0x91c4, 0xffffffff, 0x00060005,
- 0x91c8, 0xffffffff, 0x00080007,
- 0x91cc, 0xffffffff, 0x0000000b,
- 0x91d0, 0xffffffff, 0x000a0009,
- 0x91d4, 0xffffffff, 0x000d000c,
- 0x9150, 0xffffffff, 0x96940200,
- 0x8708, 0xffffffff, 0x00900100,
- 0xc478, 0xffffffff, 0x00000080,
- 0xc404, 0xffffffff, 0x0020003f,
- 0x30, 0xffffffff, 0x0000001c,
- 0x34, 0x000f0000, 0x000f0000,
- 0x160c, 0xffffffff, 0x00000100,
- 0x1024, 0xffffffff, 0x00000100,
- 0x102c, 0x00000101, 0x00000000,
- 0x20a8, 0xffffffff, 0x00000104,
- 0x264c, 0x000c0000, 0x000c0000,
- 0x2648, 0x000c0000, 0x000c0000,
- 0x55e4, 0xff000fff, 0x00000100,
- 0x55e8, 0x00000001, 0x00000001,
- 0x2f50, 0x00000001, 0x00000001,
- 0x30cc, 0xc0000fff, 0x00000104,
- 0xc1e4, 0x00000001, 0x00000001,
- 0xd0c0, 0xfffffff0, 0x00000100,
- 0xd8c0, 0xfffffff0, 0x00000100
-};
-
-static const u32 hainan_mgcg_cgcg_init[] =
-{
- 0xc400, 0xffffffff, 0xfffffffc,
- 0x802c, 0xffffffff, 0xe0000000,
- 0x9a60, 0xffffffff, 0x00000100,
- 0x92a4, 0xffffffff, 0x00000100,
- 0xc164, 0xffffffff, 0x00000100,
- 0x9774, 0xffffffff, 0x00000100,
- 0x8984, 0xffffffff, 0x06000100,
- 0x8a18, 0xffffffff, 0x00000100,
- 0x92a0, 0xffffffff, 0x00000100,
- 0xc380, 0xffffffff, 0x00000100,
- 0x8b28, 0xffffffff, 0x00000100,
- 0x9144, 0xffffffff, 0x00000100,
- 0x8d88, 0xffffffff, 0x00000100,
- 0x8d8c, 0xffffffff, 0x00000100,
- 0x9030, 0xffffffff, 0x00000100,
- 0x9034, 0xffffffff, 0x00000100,
- 0x9038, 0xffffffff, 0x00000100,
- 0x903c, 0xffffffff, 0x00000100,
- 0xad80, 0xffffffff, 0x00000100,
- 0xac54, 0xffffffff, 0x00000100,
- 0x897c, 0xffffffff, 0x06000100,
- 0x9868, 0xffffffff, 0x00000100,
- 0x9510, 0xffffffff, 0x00000100,
- 0xaf04, 0xffffffff, 0x00000100,
- 0xae04, 0xffffffff, 0x00000100,
- 0x949c, 0xffffffff, 0x00000100,
- 0x802c, 0xffffffff, 0xe0000000,
- 0x9160, 0xffffffff, 0x00010000,
- 0x9164, 0xffffffff, 0x00030002,
- 0x9168, 0xffffffff, 0x00040007,
- 0x916c, 0xffffffff, 0x00060005,
- 0x9170, 0xffffffff, 0x00090008,
- 0x9174, 0xffffffff, 0x00020001,
- 0x9178, 0xffffffff, 0x00040003,
- 0x917c, 0xffffffff, 0x00000007,
- 0x9180, 0xffffffff, 0x00060005,
- 0x9184, 0xffffffff, 0x00090008,
- 0x9188, 0xffffffff, 0x00030002,
- 0x918c, 0xffffffff, 0x00050004,
- 0x9190, 0xffffffff, 0x00000008,
- 0x9194, 0xffffffff, 0x00070006,
- 0x9198, 0xffffffff, 0x000a0009,
- 0x919c, 0xffffffff, 0x00040003,
- 0x91a0, 0xffffffff, 0x00060005,
- 0x91a4, 0xffffffff, 0x00000009,
- 0x91a8, 0xffffffff, 0x00080007,
- 0x91ac, 0xffffffff, 0x000b000a,
- 0x91b0, 0xffffffff, 0x00050004,
- 0x91b4, 0xffffffff, 0x00070006,
- 0x91b8, 0xffffffff, 0x0008000b,
- 0x91bc, 0xffffffff, 0x000a0009,
- 0x91c0, 0xffffffff, 0x000d000c,
- 0x91c4, 0xffffffff, 0x00060005,
- 0x91c8, 0xffffffff, 0x00080007,
- 0x91cc, 0xffffffff, 0x0000000b,
- 0x91d0, 0xffffffff, 0x000a0009,
- 0x91d4, 0xffffffff, 0x000d000c,
- 0x9150, 0xffffffff, 0x96940200,
- 0x8708, 0xffffffff, 0x00900100,
- 0xc478, 0xffffffff, 0x00000080,
- 0xc404, 0xffffffff, 0x0020003f,
- 0x30, 0xffffffff, 0x0000001c,
- 0x34, 0x000f0000, 0x000f0000,
- 0x160c, 0xffffffff, 0x00000100,
- 0x1024, 0xffffffff, 0x00000100,
- 0x20a8, 0xffffffff, 0x00000104,
- 0x264c, 0x000c0000, 0x000c0000,
- 0x2648, 0x000c0000, 0x000c0000,
- 0x2f50, 0x00000001, 0x00000001,
- 0x30cc, 0xc0000fff, 0x00000104,
- 0xc1e4, 0x00000001, 0x00000001,
- 0xd0c0, 0xfffffff0, 0x00000100,
- 0xd8c0, 0xfffffff0, 0x00000100
-};
-
-static u32 verde_pg_init[] =
-{
- 0x353c, 0xffffffff, 0x40000,
- 0x3538, 0xffffffff, 0x200010ff,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x7007,
- 0x3538, 0xffffffff, 0x300010ff,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x400000,
- 0x3538, 0xffffffff, 0x100010ff,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x120200,
- 0x3538, 0xffffffff, 0x500010ff,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x1e1e16,
- 0x3538, 0xffffffff, 0x600010ff,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x171f1e,
- 0x3538, 0xffffffff, 0x700010ff,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x353c, 0xffffffff, 0x0,
- 0x3538, 0xffffffff, 0x9ff,
- 0x3500, 0xffffffff, 0x0,
- 0x3504, 0xffffffff, 0x10000800,
- 0x3504, 0xffffffff, 0xf,
- 0x3504, 0xffffffff, 0xf,
- 0x3500, 0xffffffff, 0x4,
- 0x3504, 0xffffffff, 0x1000051e,
- 0x3504, 0xffffffff, 0xffff,
- 0x3504, 0xffffffff, 0xffff,
- 0x3500, 0xffffffff, 0x8,
- 0x3504, 0xffffffff, 0x80500,
- 0x3500, 0xffffffff, 0x12,
- 0x3504, 0xffffffff, 0x9050c,
- 0x3500, 0xffffffff, 0x1d,
- 0x3504, 0xffffffff, 0xb052c,
- 0x3500, 0xffffffff, 0x2a,
- 0x3504, 0xffffffff, 0x1053e,
- 0x3500, 0xffffffff, 0x2d,
- 0x3504, 0xffffffff, 0x10546,
- 0x3500, 0xffffffff, 0x30,
- 0x3504, 0xffffffff, 0xa054e,
- 0x3500, 0xffffffff, 0x3c,
- 0x3504, 0xffffffff, 0x1055f,
- 0x3500, 0xffffffff, 0x3f,
- 0x3504, 0xffffffff, 0x10567,
- 0x3500, 0xffffffff, 0x42,
- 0x3504, 0xffffffff, 0x1056f,
- 0x3500, 0xffffffff, 0x45,
- 0x3504, 0xffffffff, 0x10572,
- 0x3500, 0xffffffff, 0x48,
- 0x3504, 0xffffffff, 0x20575,
- 0x3500, 0xffffffff, 0x4c,
- 0x3504, 0xffffffff, 0x190801,
- 0x3500, 0xffffffff, 0x67,
- 0x3504, 0xffffffff, 0x1082a,
- 0x3500, 0xffffffff, 0x6a,
- 0x3504, 0xffffffff, 0x1b082d,
- 0x3500, 0xffffffff, 0x87,
- 0x3504, 0xffffffff, 0x310851,
- 0x3500, 0xffffffff, 0xba,
- 0x3504, 0xffffffff, 0x891,
- 0x3500, 0xffffffff, 0xbc,
- 0x3504, 0xffffffff, 0x893,
- 0x3500, 0xffffffff, 0xbe,
- 0x3504, 0xffffffff, 0x20895,
- 0x3500, 0xffffffff, 0xc2,
- 0x3504, 0xffffffff, 0x20899,
- 0x3500, 0xffffffff, 0xc6,
- 0x3504, 0xffffffff, 0x2089d,
- 0x3500, 0xffffffff, 0xca,
- 0x3504, 0xffffffff, 0x8a1,
- 0x3500, 0xffffffff, 0xcc,
- 0x3504, 0xffffffff, 0x8a3,
- 0x3500, 0xffffffff, 0xce,
- 0x3504, 0xffffffff, 0x308a5,
- 0x3500, 0xffffffff, 0xd3,
- 0x3504, 0xffffffff, 0x6d08cd,
- 0x3500, 0xffffffff, 0x142,
- 0x3504, 0xffffffff, 0x2000095a,
- 0x3504, 0xffffffff, 0x1,
- 0x3500, 0xffffffff, 0x144,
- 0x3504, 0xffffffff, 0x301f095b,
- 0x3500, 0xffffffff, 0x165,
- 0x3504, 0xffffffff, 0xc094d,
- 0x3500, 0xffffffff, 0x173,
- 0x3504, 0xffffffff, 0xf096d,
- 0x3500, 0xffffffff, 0x184,
- 0x3504, 0xffffffff, 0x15097f,
- 0x3500, 0xffffffff, 0x19b,
- 0x3504, 0xffffffff, 0xc0998,
- 0x3500, 0xffffffff, 0x1a9,
- 0x3504, 0xffffffff, 0x409a7,
- 0x3500, 0xffffffff, 0x1af,
- 0x3504, 0xffffffff, 0xcdc,
- 0x3500, 0xffffffff, 0x1b1,
- 0x3504, 0xffffffff, 0x800,
- 0x3508, 0xffffffff, 0x6c9b2000,
- 0x3510, 0xfc00, 0x2000,
- 0x3544, 0xffffffff, 0xfc0,
- 0x28d4, 0x00000100, 0x100
-};
-
-static void si_init_golden_registers(struct radeon_device *rdev)
-{
- switch (rdev->family) {
- case CHIP_TAHITI:
- radeon_program_register_sequence(rdev,
- tahiti_golden_registers,
- (const u32)ARRAY_SIZE(tahiti_golden_registers));
- radeon_program_register_sequence(rdev,
- tahiti_golden_rlc_registers,
- (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
- radeon_program_register_sequence(rdev,
- tahiti_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
- radeon_program_register_sequence(rdev,
- tahiti_golden_registers2,
- (const u32)ARRAY_SIZE(tahiti_golden_registers2));
- break;
- case CHIP_PITCAIRN:
- radeon_program_register_sequence(rdev,
- pitcairn_golden_registers,
- (const u32)ARRAY_SIZE(pitcairn_golden_registers));
- radeon_program_register_sequence(rdev,
- pitcairn_golden_rlc_registers,
- (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
- radeon_program_register_sequence(rdev,
- pitcairn_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
- break;
- case CHIP_VERDE:
- radeon_program_register_sequence(rdev,
- verde_golden_registers,
- (const u32)ARRAY_SIZE(verde_golden_registers));
- radeon_program_register_sequence(rdev,
- verde_golden_rlc_registers,
- (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
- radeon_program_register_sequence(rdev,
- verde_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
- radeon_program_register_sequence(rdev,
- verde_pg_init,
- (const u32)ARRAY_SIZE(verde_pg_init));
- break;
- case CHIP_OLAND:
- radeon_program_register_sequence(rdev,
- oland_golden_registers,
- (const u32)ARRAY_SIZE(oland_golden_registers));
- radeon_program_register_sequence(rdev,
- oland_golden_rlc_registers,
- (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
- radeon_program_register_sequence(rdev,
- oland_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
- break;
- case CHIP_HAINAN:
- radeon_program_register_sequence(rdev,
- hainan_golden_registers,
- (const u32)ARRAY_SIZE(hainan_golden_registers));
- radeon_program_register_sequence(rdev,
- hainan_golden_registers2,
- (const u32)ARRAY_SIZE(hainan_golden_registers2));
- radeon_program_register_sequence(rdev,
- hainan_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
- break;
- default:
- break;
- }
-}
-
-/**
- * si_get_allowed_info_register - fetch the register for the info ioctl
- *
- * @rdev: radeon_device pointer
- * @reg: register offset in bytes
- * @val: register value
- *
- * Returns 0 for success or -EINVAL for an invalid register
- *
- */
-int si_get_allowed_info_register(struct radeon_device *rdev,
- u32 reg, u32 *val)
-{
- switch (reg) {
- case GRBM_STATUS:
- case GRBM_STATUS2:
- case GRBM_STATUS_SE0:
- case GRBM_STATUS_SE1:
- case SRBM_STATUS:
- case SRBM_STATUS2:
- case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
- case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
- case UVD_STATUS:
- *val = RREG32(reg);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-#define PCIE_BUS_CLK 10000
-#define TCLK (PCIE_BUS_CLK / 10)
-
-/**
- * si_get_xclk - get the xclk
- *
- * @rdev: radeon_device pointer
- *
- * Returns the reference clock used by the gfx engine
- * (SI).
- */
-u32 si_get_xclk(struct radeon_device *rdev)
-{
- u32 reference_clock = rdev->clock.spll.reference_freq;
- u32 tmp;
-
- tmp = RREG32(CG_CLKPIN_CNTL_2);
- if (tmp & MUX_TCLK_TO_XCLK)
- return TCLK;
-
- tmp = RREG32(CG_CLKPIN_CNTL);
- if (tmp & XTALIN_DIVIDE)
- return reference_clock / 4;
-
- return reference_clock;
-}
/* get temperature in millidegrees */
int si_get_temp(struct radeon_device *rdev)
@@ -1456,135 +197,36 @@ static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
{0x0000009f, 0x00a37400}
};
-static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
- {0x0000006f, 0x03044000},
- {0x00000070, 0x0480c018},
- {0x00000071, 0x00000040},
- {0x00000072, 0x01000000},
- {0x00000074, 0x000000ff},
- {0x00000075, 0x00143400},
- {0x00000076, 0x08ec0800},
- {0x00000077, 0x040000cc},
- {0x00000079, 0x00000000},
- {0x0000007a, 0x21000409},
- {0x0000007c, 0x00000000},
- {0x0000007d, 0xe8000000},
- {0x0000007e, 0x044408a8},
- {0x0000007f, 0x00000003},
- {0x00000080, 0x00000000},
- {0x00000081, 0x01000000},
- {0x00000082, 0x02000000},
- {0x00000083, 0x00000000},
- {0x00000084, 0xe3f3e4f4},
- {0x00000085, 0x00052024},
- {0x00000087, 0x00000000},
- {0x00000088, 0x66036603},
- {0x00000089, 0x01000000},
- {0x0000008b, 0x1c0a0000},
- {0x0000008c, 0xff010000},
- {0x0000008e, 0xffffefff},
- {0x0000008f, 0xfff3efff},
- {0x00000090, 0xfff3efbf},
- {0x00000094, 0x00101101},
- {0x00000095, 0x00000fff},
- {0x00000096, 0x00116fff},
- {0x00000097, 0x60010000},
- {0x00000098, 0x10010000},
- {0x00000099, 0x00006000},
- {0x0000009a, 0x00001000},
- {0x0000009f, 0x00a17730}
-};
-
-static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
- {0x0000006f, 0x03044000},
- {0x00000070, 0x0480c018},
- {0x00000071, 0x00000040},
- {0x00000072, 0x01000000},
- {0x00000074, 0x000000ff},
- {0x00000075, 0x00143400},
- {0x00000076, 0x08ec0800},
- {0x00000077, 0x040000cc},
- {0x00000079, 0x00000000},
- {0x0000007a, 0x21000409},
- {0x0000007c, 0x00000000},
- {0x0000007d, 0xe8000000},
- {0x0000007e, 0x044408a8},
- {0x0000007f, 0x00000003},
- {0x00000080, 0x00000000},
- {0x00000081, 0x01000000},
- {0x00000082, 0x02000000},
- {0x00000083, 0x00000000},
- {0x00000084, 0xe3f3e4f4},
- {0x00000085, 0x00052024},
- {0x00000087, 0x00000000},
- {0x00000088, 0x66036603},
- {0x00000089, 0x01000000},
- {0x0000008b, 0x1c0a0000},
- {0x0000008c, 0xff010000},
- {0x0000008e, 0xffffefff},
- {0x0000008f, 0xfff3efff},
- {0x00000090, 0xfff3efbf},
- {0x00000094, 0x00101101},
- {0x00000095, 0x00000fff},
- {0x00000096, 0x00116fff},
- {0x00000097, 0x60010000},
- {0x00000098, 0x10010000},
- {0x00000099, 0x00006000},
- {0x0000009a, 0x00001000},
- {0x0000009f, 0x00a07730}
-};
-
/* ucode loading */
-int si_mc_load_microcode(struct radeon_device *rdev)
+static int si_mc_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data = NULL;
- const __le32 *new_fw_data = NULL;
+ const __be32 *fw_data;
u32 running, blackout = 0;
- u32 *io_mc_regs = NULL;
- const __le32 *new_io_mc_regs = NULL;
+ u32 *io_mc_regs;
int i, regs_size, ucode_size;
if (!rdev->mc_fw)
return -EINVAL;
- if (rdev->new_fw) {
- const struct mc_firmware_header_v1_0 *hdr =
- (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
-
- radeon_ucode_print_mc_hdr(&hdr->header);
- regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
- new_io_mc_regs = (const __le32 *)
- (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- new_fw_data = (const __le32 *)
- (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- } else {
- ucode_size = rdev->mc_fw->size / 4;
+ ucode_size = rdev->mc_fw_size / 4;
- switch (rdev->family) {
- case CHIP_TAHITI:
- io_mc_regs = (u32 *)&tahiti_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_PITCAIRN:
- io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_VERDE:
- default:
- io_mc_regs = (u32 *)&verde_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_OLAND:
- io_mc_regs = (u32 *)&oland_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_HAINAN:
- io_mc_regs = (u32 *)&hainan_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- }
- fw_data = (const __be32 *)rdev->mc_fw->data;
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ io_mc_regs = (u32 *)&tahiti_io_mc_regs;
+ ucode_size = SI_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_PITCAIRN:
+ io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
+ ucode_size = SI_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_VERDE:
+ default:
+ io_mc_regs = (u32 *)&verde_io_mc_regs;
+ ucode_size = SI_MC_UCODE_SIZE;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1601,21 +243,13 @@ int si_mc_load_microcode(struct radeon_device *rdev)
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
- if (rdev->new_fw) {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
- WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
- } else {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
- WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
- }
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
}
/* load the MC ucode */
- for (i = 0; i < ucode_size; i++) {
- if (rdev->new_fw)
- WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
- else
- WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
- }
+ fw_data = (const __be32 *)rdev->mc_fw;
+ for (i = 0; i < ucode_size; i++)
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
/* put the engine back into the active state */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
@@ -1644,262 +278,128 @@ int si_mc_load_microcode(struct radeon_device *rdev)
static int si_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
- const char *new_chip_name;
+ const char *rlc_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
- size_t smc_req_size, mc2_req_size;
char fw_name[30];
int err;
- int new_fw = 0;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_TAHITI:
- chip_name = "TAHITI";
- new_chip_name = "tahiti";
+ chip_name = "tahiti";
+ rlc_chip_name = "tahiti";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
- mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
- smc_req_size = roundup2(TAHITI_SMC_UCODE_SIZE, 4);
break;
case CHIP_PITCAIRN:
- chip_name = "PITCAIRN";
- new_chip_name = "pitcairn";
+ chip_name = "pitcairn";
+ rlc_chip_name = "pitcairn";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
- mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
- smc_req_size = roundup2(PITCAIRN_SMC_UCODE_SIZE, 4);
break;
case CHIP_VERDE:
- chip_name = "VERDE";
- new_chip_name = "verde";
+ chip_name = "verde";
+ rlc_chip_name = "verde";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
- mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
- smc_req_size = roundup2(VERDE_SMC_UCODE_SIZE, 4);
- break;
- case CHIP_OLAND:
- chip_name = "OLAND";
- new_chip_name = "oland";
- pfp_req_size = SI_PFP_UCODE_SIZE * 4;
- me_req_size = SI_PM4_UCODE_SIZE * 4;
- ce_req_size = SI_CE_UCODE_SIZE * 4;
- rlc_req_size = SI_RLC_UCODE_SIZE * 4;
- mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
- smc_req_size = roundup2(OLAND_SMC_UCODE_SIZE, 4);
- break;
- case CHIP_HAINAN:
- chip_name = "HAINAN";
- new_chip_name = "hainan";
- pfp_req_size = SI_PFP_UCODE_SIZE * 4;
- me_req_size = SI_PM4_UCODE_SIZE * 4;
- ce_req_size = SI_CE_UCODE_SIZE * 4;
- rlc_req_size = SI_RLC_UCODE_SIZE * 4;
- mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
- smc_req_size = roundup2(HAINAN_SMC_UCODE_SIZE, 4);
break;
default: BUG();
}
- DRM_INFO("Loading %s Microcode\n", new_chip_name);
-
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
- err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
- if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
- err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->pfp_fw->size != pfp_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->pfp_fw->size, fw_name);
- err = -EINVAL;
- goto out;
- }
- } else {
- err = radeon_ucode_validate(rdev->pfp_fw);
- if (err) {
- printk(KERN_ERR
- "si_cp: validation failed for firmware \"%s\"\n",
- fw_name);
- goto out;
- } else {
- new_fw++;
- }
- }
-
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
- err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
- if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
- err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->me_fw->size != me_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
- err = -EINVAL;
- }
- } else {
- err = radeon_ucode_validate(rdev->me_fw);
- if (err) {
- printk(KERN_ERR
- "si_cp: validation failed for firmware \"%s\"\n",
- fw_name);
- goto out;
- } else {
- new_fw++;
- }
- }
+ DRM_INFO("Loading %s Microcode\n", chip_name);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
- err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
- if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
- err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->ce_fw->size != ce_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->ce_fw->size, fw_name);
- err = -EINVAL;
- }
- } else {
- err = radeon_ucode_validate(rdev->ce_fw);
- if (err) {
- printk(KERN_ERR
- "si_cp: validation failed for firmware \"%s\"\n",
- fw_name);
- goto out;
- } else {
- new_fw++;
- }
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_pfp", chip_name);
+ err = loadfirmware(fw_name, &rdev->pfp_fw, &rdev->pfp_fw_size);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw_size != pfp_req_size) {
+ DRM_ERROR(
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw_size, fw_name);
+ err = -EINVAL;
+ goto out;
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
- err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
- if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
- err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->rlc_fw->size != rlc_req_size) {
- printk(KERN_ERR
- "si_rlc: Bogus length %zu in firmware \"%s\"\n",
- rdev->rlc_fw->size, fw_name);
- err = -EINVAL;
- }
- } else {
- err = radeon_ucode_validate(rdev->rlc_fw);
- if (err) {
- printk(KERN_ERR
- "si_cp: validation failed for firmware \"%s\"\n",
- fw_name);
- goto out;
- } else {
- new_fw++;
- }
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_me", chip_name);
+ err = loadfirmware(fw_name, &rdev->me_fw, &rdev->me_fw_size);
+ if (err)
+ goto out;
+ if (rdev->me_fw_size != me_req_size) {
+ DRM_ERROR(
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw_size, fw_name);
+ err = -EINVAL;
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
- err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
- err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
- err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- }
- if ((rdev->mc_fw->size != mc_req_size) &&
- (rdev->mc_fw->size != mc2_req_size)) {
- printk(KERN_ERR
- "si_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
- err = -EINVAL;
- }
- DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
- } else {
- err = radeon_ucode_validate(rdev->mc_fw);
- if (err) {
- printk(KERN_ERR
- "si_cp: validation failed for firmware \"%s\"\n",
- fw_name);
- goto out;
- } else {
- new_fw++;
- }
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_ce", chip_name);
+ err = loadfirmware(fw_name, &rdev->ce_fw, &rdev->ce_fw_size);
+ if (err)
+ goto out;
+ if (rdev->ce_fw_size != ce_req_size) {
+ DRM_ERROR(
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->ce_fw_size, fw_name);
+ err = -EINVAL;
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
- err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
- err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err) {
- printk(KERN_ERR
- "smc: error loading firmware \"%s\"\n",
- fw_name);
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
- err = 0;
- } else if (rdev->smc_fw->size != smc_req_size) {
- printk(KERN_ERR
- "si_smc: Bogus length %zu in firmware \"%s\"\n",
- rdev->smc_fw->size, fw_name);
- err = -EINVAL;
- }
- } else {
- err = radeon_ucode_validate(rdev->smc_fw);
- if (err) {
- printk(KERN_ERR
- "si_cp: validation failed for firmware \"%s\"\n",
- fw_name);
- goto out;
- } else {
- new_fw++;
- }
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_rlc", rlc_chip_name);
+ err = loadfirmware(fw_name, &rdev->rlc_fw, &rdev->rlc_fw_size);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw_size != rlc_req_size) {
+ DRM_ERROR(
+ "si_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw_size, fw_name);
+ err = -EINVAL;
}
- if (new_fw == 0) {
- rdev->new_fw = false;
- } else if (new_fw < 6) {
- printk(KERN_ERR "si_fw: mixing new and old firmware!\n");
+ snprintf(fw_name, sizeof(fw_name), "radeon-%s_mc", chip_name);
+ err = loadfirmware(fw_name, &rdev->mc_fw, &rdev->mc_fw_size);
+ if (err)
+ goto out;
+ if (rdev->mc_fw_size != mc_req_size) {
+ DRM_ERROR(
+ "si_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw_size, fw_name);
err = -EINVAL;
- } else {
- rdev->new_fw = true;
}
+
out:
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
"si_cp: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(rdev->pfp_fw);
- rdev->pfp_fw = NULL;
- release_firmware(rdev->me_fw);
- rdev->me_fw = NULL;
- release_firmware(rdev->ce_fw);
- rdev->ce_fw = NULL;
- release_firmware(rdev->rlc_fw);
- rdev->rlc_fw = NULL;
- release_firmware(rdev->mc_fw);
- rdev->mc_fw = NULL;
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
+ if (rdev->pfp_fw) {
+ free(rdev->pfp_fw, M_DEVBUF, 0);
+ rdev->pfp_fw = NULL;
+ }
+ if (rdev->me_fw) {
+ free(rdev->me_fw, M_DEVBUF, 0);
+ rdev->me_fw = NULL;
+ }
+ if (rdev->ce_fw) {
+ free(rdev->ce_fw, M_DEVBUF, 0);
+ rdev->ce_fw = NULL;
+ }
+ if (rdev->rlc_fw) {
+ free(rdev->rlc_fw, M_DEVBUF, 0);
+ rdev->rlc_fw = NULL;
+ }
+ if (rdev->mc_fw) {
+ free(rdev->mc_fw, M_DEVBUF, 0);
+ rdev->mc_fw = NULL;
+ }
}
return err;
}
@@ -2250,8 +750,7 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
u32 lb_size, u32 num_heads)
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
- struct dce6_wm_params wm_low, wm_high;
- u32 dram_channels;
+ struct dce6_wm_params wm;
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
@@ -2267,83 +766,38 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
priority_a_cnt = 0;
priority_b_cnt = 0;
- if (rdev->family == CHIP_ARUBA)
- dram_channels = evergreen_get_number_of_dram_channels(rdev);
- else
- dram_channels = si_get_number_of_dram_channels(rdev);
-
- /* watermark for high clocks */
- if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- wm_high.yclk =
- radeon_dpm_get_mclk(rdev, false) * 10;
- wm_high.sclk =
- radeon_dpm_get_sclk(rdev, false) * 10;
- } else {
- wm_high.yclk = rdev->pm.current_mclk * 10;
- wm_high.sclk = rdev->pm.current_sclk * 10;
- }
-
- wm_high.disp_clk = mode->clock;
- wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
- wm_high.blank_time = line_time - wm_high.active_time;
- wm_high.interlaced = false;
+ wm.yclk = rdev->pm.current_mclk * 10;
+ wm.sclk = rdev->pm.current_sclk * 10;
+ wm.disp_clk = mode->clock;
+ wm.src_width = mode->crtc_hdisplay;
+ wm.active_time = mode->crtc_hdisplay * pixel_period;
+ wm.blank_time = line_time - wm.active_time;
+ wm.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_high.interlaced = true;
- wm_high.vsc = radeon_crtc->vsc;
- wm_high.vtaps = 1;
+ wm.interlaced = true;
+ wm.vsc = radeon_crtc->vsc;
+ wm.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
- wm_high.vtaps = 2;
- wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_high.lb_size = lb_size;
- wm_high.dram_channels = dram_channels;
- wm_high.num_heads = num_heads;
-
- /* watermark for low clocks */
- if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- wm_low.yclk =
- radeon_dpm_get_mclk(rdev, true) * 10;
- wm_low.sclk =
- radeon_dpm_get_sclk(rdev, true) * 10;
- } else {
- wm_low.yclk = rdev->pm.current_mclk * 10;
- wm_low.sclk = rdev->pm.current_sclk * 10;
- }
-
- wm_low.disp_clk = mode->clock;
- wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
- wm_low.blank_time = line_time - wm_low.active_time;
- wm_low.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_low.interlaced = true;
- wm_low.vsc = radeon_crtc->vsc;
- wm_low.vtaps = 1;
- if (radeon_crtc->rmx_type != RMX_OFF)
- wm_low.vtaps = 2;
- wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_low.lb_size = lb_size;
- wm_low.dram_channels = dram_channels;
- wm_low.num_heads = num_heads;
+ wm.vtaps = 2;
+ wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm.lb_size = lb_size;
+ if (rdev->family == CHIP_ARUBA)
+ wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
+ else
+ wm.dram_channels = si_get_number_of_dram_channels(rdev);
+ wm.num_heads = num_heads;
/* set for high clocks */
- latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
+ latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535);
/* set for low clocks */
- latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
+ /* wm.yclk = low clk; wm.sclk = low clk */
+ latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
- if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
- !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
- !dce6_check_latency_hiding(&wm_high) ||
- (rdev->disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- priority_a_cnt |= PRIORITY_ALWAYS_ON;
- priority_b_cnt |= PRIORITY_ALWAYS_ON;
- }
- if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
- !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
- !dce6_check_latency_hiding(&wm_low) ||
+ if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
+ !dce6_average_bandwidth_vs_available_bandwidth(&wm) ||
+ !dce6_check_latency_hiding(&wm) ||
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
@@ -2373,9 +827,6 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
c.full = dfixed_div(c, a);
priority_b_mark = dfixed_trunc(c);
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
-
- /* Save number of lines the linebuffer leads before the scanout */
- radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
}
/* select wm A */
@@ -2402,10 +853,6 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
- /* save values for DPM */
- radeon_crtc->line_time = line_time;
- radeon_crtc->wm_high = latency_watermark_a;
- radeon_crtc->wm_low = latency_watermark_b;
}
void dce6_bandwidth_update(struct radeon_device *rdev)
@@ -2415,9 +862,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
u32 num_heads = 0, lb_size;
int i;
- if (!rdev->mode_info.mode_config_initialized)
- return;
-
radeon_update_display_priority(rdev);
for (i = 0; i < rdev->num_crtc; i++) {
@@ -2693,12 +1137,9 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
- rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
- } else if ((rdev->family == CHIP_VERDE) ||
- (rdev->family == CHIP_OLAND) ||
- (rdev->family == CHIP_HAINAN)) {
+ } else if (rdev->family == CHIP_VERDE) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0: /* non-AA compressed depth or any compressed stencil */
@@ -2935,7 +1376,6 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
- rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else
@@ -3146,40 +1586,6 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
break;
- case CHIP_OLAND:
- rdev->config.si.max_shader_engines = 1;
- rdev->config.si.max_tile_pipes = 4;
- rdev->config.si.max_cu_per_sh = 6;
- rdev->config.si.max_sh_per_se = 1;
- rdev->config.si.max_backends_per_se = 2;
- rdev->config.si.max_texture_channel_caches = 4;
- rdev->config.si.max_gprs = 256;
- rdev->config.si.max_gs_threads = 16;
- rdev->config.si.max_hw_contexts = 8;
-
- rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
- rdev->config.si.sc_prim_fifo_size_backend = 0x40;
- rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
- rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
- break;
- case CHIP_HAINAN:
- rdev->config.si.max_shader_engines = 1;
- rdev->config.si.max_tile_pipes = 4;
- rdev->config.si.max_cu_per_sh = 5;
- rdev->config.si.max_sh_per_se = 1;
- rdev->config.si.max_backends_per_se = 1;
- rdev->config.si.max_texture_channel_caches = 2;
- rdev->config.si.max_gprs = 256;
- rdev->config.si.max_gs_threads = 16;
- rdev->config.si.max_hw_contexts = 8;
-
- rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
- rdev->config.si.sc_prim_fifo_size_backend = 0x40;
- rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
- rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
- gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
- break;
}
/* Initialize HDP */
@@ -3192,8 +1598,6 @@ static void si_gpu_init(struct radeon_device *rdev)
}
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
- WREG32(SRBM_INT_CNTL, 1);
- WREG32(SRBM_INT_ACK, 1);
evergreen_fix_pci_max_read_req_size(rdev);
@@ -3275,11 +1679,6 @@ static void si_gpu_init(struct radeon_device *rdev)
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
- if (rdev->has_uvd) {
- WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
- WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
- }
si_tiling_mode_table_init(rdev);
@@ -3291,13 +1690,6 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.max_sh_per_se,
rdev->config.si.max_cu_per_sh);
- rdev->config.si.active_cus = 0;
- for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
- for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
- rdev->config.si.active_cus +=
- hweight32(si_get_cu_active_bitmap(rdev, i, j));
- }
- }
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
@@ -3386,7 +1778,7 @@ void si_fence_ring_emit(struct radeon_device *rdev,
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
- radeon_ring_write(ring, lower_32_bits(addr));
+ radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
@@ -3398,7 +1790,6 @@ void si_fence_ring_emit(struct radeon_device *rdev,
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
- unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
u32 header;
if (ib->is_const_ib) {
@@ -3420,7 +1811,7 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (1 << 8));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
radeon_ring_write(ring, next_rptr);
}
@@ -3434,13 +1825,14 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
- radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
+ radeon_ring_write(ring, ib->length_dw |
+ (ib->vm ? (ib->vm->id << 24) : 0));
if (!ib->is_const_ib) {
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, vm_id);
+ radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
PACKET3_TC_ACTION_ENA |
@@ -3460,8 +1852,7 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
if (enable)
WREG32(CP_ME_CNTL, 0);
else {
- if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
WREG32(SCRATCH_UMSK, 0);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
@@ -3473,77 +1864,34 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
static int si_cp_load_microcode(struct radeon_device *rdev)
{
+ const __be32 *fw_data;
int i;
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
+ if (!rdev->me_fw || !rdev->pfp_fw)
return -EINVAL;
si_cp_enable(rdev, false);
- if (rdev->new_fw) {
- const struct gfx_firmware_header_v1_0 *pfp_hdr =
- (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
- const struct gfx_firmware_header_v1_0 *ce_hdr =
- (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
- const struct gfx_firmware_header_v1_0 *me_hdr =
- (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
- const __le32 *fw_data;
- u32 fw_size;
-
- radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
- radeon_ucode_print_gfx_hdr(&ce_hdr->header);
- radeon_ucode_print_gfx_hdr(&me_hdr->header);
-
- /* PFP */
- fw_data = (const __le32 *)
- (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
- fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
- WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < fw_size; i++)
- WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
-
- /* CE */
- fw_data = (const __le32 *)
- (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
- fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
- WREG32(CP_CE_UCODE_ADDR, 0);
- for (i = 0; i < fw_size; i++)
- WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
-
- /* ME */
- fw_data = (const __be32 *)
- (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
- fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
- WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < fw_size; i++)
- WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
- } else {
- const __be32 *fw_data;
-
- /* PFP */
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
- WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
-
- /* CE */
- fw_data = (const __be32 *)rdev->ce_fw->data;
- WREG32(CP_CE_UCODE_ADDR, 0);
- for (i = 0; i < SI_CE_UCODE_SIZE; i++)
- WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
-
- /* ME */
- fw_data = (const __be32 *)rdev->me_fw->data;
- WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
- WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
- }
+ /* PFP */
+ fw_data = (const __be32 *)rdev->pfp_fw;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __be32 *)rdev->ce_fw;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < SI_CE_UCODE_SIZE; i++)
+ WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)rdev->me_fw;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
@@ -3576,7 +1924,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
radeon_ring_write(ring, 0xc000);
radeon_ring_write(ring, 0xe000);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
si_cp_enable(rdev, true);
@@ -3605,7 +1953,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
ring = &rdev->ring[i];
@@ -3615,7 +1963,7 @@ static int si_cp_start(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
- radeon_ring_unlock_commit(rdev, ring, false);
+ radeon_ring_unlock_commit(rdev, ring);
}
return 0;
@@ -3646,7 +1994,16 @@ static int si_cp_resume(struct radeon_device *rdev)
u32 rb_bufsz;
int r;
- si_enable_gui_idle_interrupt(rdev, false);
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
@@ -3660,8 +2017,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- rb_bufsz = order_base_2(ring->ring_size / 8);
- tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = drm_order(ring->ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3688,11 +2045,13 @@ static int si_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
+ ring->rptr = RREG32(CP_RB0_RPTR);
+
/* ring1 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
- rb_bufsz = order_base_2(ring->ring_size / 8);
- tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = drm_order(ring->ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3712,11 +2071,13 @@ static int si_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
+ ring->rptr = RREG32(CP_RB1_RPTR);
+
/* ring2 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
- rb_bufsz = order_base_2(ring->ring_size / 8);
- tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = drm_order(ring->ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3736,6 +2097,8 @@ static int si_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
+ ring->rptr = RREG32(CP_RB2_RPTR);
+
/* start the rings */
si_cp_start(rdev);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
@@ -3757,373 +2120,157 @@ static int si_cp_resume(struct radeon_device *rdev)
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
}
- si_enable_gui_idle_interrupt(rdev, true);
-
- if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
return 0;
}
-u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
+bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
- u32 reset_mask = 0;
- u32 tmp;
-
- /* GRBM_STATUS */
- tmp = RREG32(GRBM_STATUS);
- if (tmp & (PA_BUSY | SC_BUSY |
- BCI_BUSY | SX_BUSY |
- TA_BUSY | VGT_BUSY |
- DB_BUSY | CB_BUSY |
- GDS_BUSY | SPI_BUSY |
- IA_BUSY | IA_BUSY_NO_DMA))
- reset_mask |= RADEON_RESET_GFX;
-
- if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
- CP_BUSY | CP_COHERENCY_BUSY))
- reset_mask |= RADEON_RESET_CP;
-
- if (tmp & GRBM_EE_BUSY)
- reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
-
- /* GRBM_STATUS2 */
- tmp = RREG32(GRBM_STATUS2);
- if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
- reset_mask |= RADEON_RESET_RLC;
-
- /* DMA_STATUS_REG 0 */
- tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
- if (!(tmp & DMA_IDLE))
- reset_mask |= RADEON_RESET_DMA;
-
- /* DMA_STATUS_REG 1 */
- tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
- if (!(tmp & DMA_IDLE))
- reset_mask |= RADEON_RESET_DMA1;
-
- /* SRBM_STATUS2 */
- tmp = RREG32(SRBM_STATUS2);
- if (tmp & DMA_BUSY)
- reset_mask |= RADEON_RESET_DMA;
-
- if (tmp & DMA1_BUSY)
- reset_mask |= RADEON_RESET_DMA1;
+ u32 srbm_status;
+ u32 grbm_status, grbm_status2;
+ u32 grbm_status_se0, grbm_status_se1;
- /* SRBM_STATUS */
- tmp = RREG32(SRBM_STATUS);
-
- if (tmp & IH_BUSY)
- reset_mask |= RADEON_RESET_IH;
-
- if (tmp & SEM_BUSY)
- reset_mask |= RADEON_RESET_SEM;
-
- if (tmp & GRBM_RQ_PENDING)
- reset_mask |= RADEON_RESET_GRBM;
-
- if (tmp & VMC_BUSY)
- reset_mask |= RADEON_RESET_VMC;
-
- if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
- MCC_BUSY | MCD_BUSY))
- reset_mask |= RADEON_RESET_MC;
-
- if (evergreen_is_display_hung(rdev))
- reset_mask |= RADEON_RESET_DISPLAY;
-
- /* VM_L2_STATUS */
- tmp = RREG32(VM_L2_STATUS);
- if (tmp & L2_BUSY)
- reset_mask |= RADEON_RESET_VMC;
-
- /* Skip MC reset as it's mostly likely not hung, just busy */
- if (reset_mask & RADEON_RESET_MC) {
- DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
- reset_mask &= ~RADEON_RESET_MC;
+ srbm_status = RREG32(SRBM_STATUS);
+ grbm_status = RREG32(GRBM_STATUS);
+ grbm_status2 = RREG32(GRBM_STATUS2);
+ grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+ grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+ if (!(grbm_status & GUI_ACTIVE)) {
+ radeon_ring_lockup_update(ring);
+ return false;
}
-
- return reset_mask;
+ /* force CP activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
}
-static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
+static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
{
- struct evergreen_mc_save save;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- u32 tmp;
+ u32 grbm_reset = 0;
- if (reset_mask == 0)
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
return;
- dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
-
- evergreen_print_gpu_status_regs(rdev);
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
- dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
-
- /* disable PG/CG */
- si_fini_pg(rdev);
- si_fini_cg(rdev);
-
- /* stop the rlc */
- si_rlc_stop(rdev);
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
+ RREG32(GRBM_STATUS2));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
- if (reset_mask & RADEON_RESET_DMA) {
- /* dma0 */
- tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
- }
- if (reset_mask & RADEON_RESET_DMA1) {
- /* dma1 */
- tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
- tmp &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
- }
-
+ /* reset all the gfx blocks */
+ grbm_reset = (SOFT_RESET_CP |
+ SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_BCI |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA);
+
+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+ WREG32(GRBM_SOFT_RESET, grbm_reset);
+ (void)RREG32(GRBM_SOFT_RESET);
udelay(50);
-
- evergreen_mc_stop(rdev, &save);
- if (evergreen_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
- }
-
- if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
- grbm_soft_reset = SOFT_RESET_CB |
- SOFT_RESET_DB |
- SOFT_RESET_GDS |
- SOFT_RESET_PA |
- SOFT_RESET_SC |
- SOFT_RESET_BCI |
- SOFT_RESET_SPI |
- SOFT_RESET_SX |
- SOFT_RESET_TC |
- SOFT_RESET_TA |
- SOFT_RESET_VGT |
- SOFT_RESET_IA;
- }
-
- if (reset_mask & RADEON_RESET_CP) {
- grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
-
- srbm_soft_reset |= SOFT_RESET_GRBM;
- }
-
- if (reset_mask & RADEON_RESET_DMA)
- srbm_soft_reset |= SOFT_RESET_DMA;
-
- if (reset_mask & RADEON_RESET_DMA1)
- srbm_soft_reset |= SOFT_RESET_DMA1;
-
- if (reset_mask & RADEON_RESET_DISPLAY)
- srbm_soft_reset |= SOFT_RESET_DC;
-
- if (reset_mask & RADEON_RESET_RLC)
- grbm_soft_reset |= SOFT_RESET_RLC;
-
- if (reset_mask & RADEON_RESET_SEM)
- srbm_soft_reset |= SOFT_RESET_SEM;
-
- if (reset_mask & RADEON_RESET_IH)
- srbm_soft_reset |= SOFT_RESET_IH;
-
- if (reset_mask & RADEON_RESET_GRBM)
- srbm_soft_reset |= SOFT_RESET_GRBM;
-
- if (reset_mask & RADEON_RESET_VMC)
- srbm_soft_reset |= SOFT_RESET_VMC;
-
- if (reset_mask & RADEON_RESET_MC)
- srbm_soft_reset |= SOFT_RESET_MC;
-
- if (grbm_soft_reset) {
- tmp = RREG32(GRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(GRBM_SOFT_RESET, tmp);
- tmp = RREG32(GRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~grbm_soft_reset;
- WREG32(GRBM_SOFT_RESET, tmp);
- tmp = RREG32(GRBM_SOFT_RESET);
- }
-
- if (srbm_soft_reset) {
- tmp = RREG32(SRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(SRBM_SOFT_RESET, tmp);
- tmp = RREG32(SRBM_SOFT_RESET);
- }
-
- /* Wait a little for things to settle down */
- udelay(50);
-
- evergreen_mc_resume(rdev, &save);
- udelay(50);
-
- evergreen_print_gpu_status_regs(rdev);
+ WREG32(GRBM_SOFT_RESET, 0);
+ (void)RREG32(GRBM_SOFT_RESET);
+
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
+ RREG32(GRBM_STATUS2));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
}
-static void si_set_clk_bypass_mode(struct radeon_device *rdev)
-{
- u32 tmp, i;
-
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_BYPASS_EN;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
-
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp |= SPLL_CTLREQ_CHG;
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
- break;
- udelay(1);
- }
-
- tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
- tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
- WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
-
- tmp = RREG32(MPLL_CNTL_MODE);
- tmp &= ~MPLL_MCLK_SEL;
- WREG32(MPLL_CNTL_MODE, tmp);
-}
-
-static void si_spll_powerdown(struct radeon_device *rdev)
+static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
{
u32 tmp;
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp |= SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
-
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_RESET;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
-
- tmp = RREG32(CG_SPLL_FUNC_CNTL);
- tmp |= SPLL_SLEEP;
- WREG32(CG_SPLL_FUNC_CNTL, tmp);
-
- tmp = RREG32(SPLL_CNTL_MODE);
- tmp &= ~SPLL_SW_DIR_CONTROL;
- WREG32(SPLL_CNTL_MODE, tmp);
-}
-
-static void si_gpu_pci_config_reset(struct radeon_device *rdev)
-{
- struct evergreen_mc_save save;
- u32 tmp, i;
-
- dev_info(rdev->dev, "GPU pci config reset\n");
-
- /* disable dpm? */
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ return;
- /* disable cg/pg */
- si_fini_pg(rdev);
- si_fini_cg(rdev);
+ dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
- /* Disable CP parsing/prefetching */
- WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
/* dma0 */
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
+
/* dma1 */
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
tmp &= ~DMA_RB_ENABLE;
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
- /* XXX other engines? */
-
- /* halt the rlc, disable cp internal ints */
- si_rlc_stop(rdev);
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
- /* disable mem access */
- evergreen_mc_stop(rdev, &save);
- if (evergreen_mc_wait_for_idle(rdev)) {
- dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
- }
-
- /* set mclk/sclk to bypass */
- si_set_clk_bypass_mode(rdev);
- /* powerdown spll */
- si_spll_powerdown(rdev);
- /* disable BM */
- pci_clear_master(rdev->pdev);
- /* reset */
- radeon_pci_config_reset(rdev);
- /* wait for asic to come out of reset */
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
- break;
- udelay(1);
- }
+ dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
+ RREG32(DMA_STATUS_REG));
}
-int si_asic_reset(struct radeon_device *rdev)
+static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
{
- u32 reset_mask;
+ struct evergreen_mc_save save;
- reset_mask = si_gpu_check_soft_reset(rdev);
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
- if (reset_mask)
- r600_set_bios_scratch_engine_hung(rdev, true);
+ if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
+ reset_mask &= ~RADEON_RESET_DMA;
- /* try soft reset */
- si_gpu_soft_reset(rdev, reset_mask);
+ if (reset_mask == 0)
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
- reset_mask = si_gpu_check_soft_reset(rdev);
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+
+ evergreen_mc_stop(rdev, &save);
+ if (radeon_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
- /* try pci config reset */
- if (reset_mask && radeon_hard_reset)
- si_gpu_pci_config_reset(rdev);
+ if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
+ si_gpu_soft_reset_gfx(rdev);
- reset_mask = si_gpu_check_soft_reset(rdev);
+ if (reset_mask & RADEON_RESET_DMA)
+ si_gpu_soft_reset_dma(rdev);
- if (!reset_mask)
- r600_set_bios_scratch_engine_hung(rdev, false);
+ /* Wait a little for things to settle down */
+ udelay(50);
+ evergreen_mc_resume(rdev, &save);
return 0;
}
-/**
- * si_gfx_is_lockup - Check if the GFX engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the GFX engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+int si_asic_reset(struct radeon_device *rdev)
{
- u32 reset_mask = si_gpu_check_soft_reset(rdev);
-
- if (!(reset_mask & (RADEON_RESET_GFX |
- RADEON_RESET_COMPUTE |
- RADEON_RESET_CP))) {
- radeon_ring_lockup_update(rdev, ring);
- return false;
- }
- return radeon_ring_test_lockup(rdev, ring);
+ return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
+ RADEON_RESET_COMPUTE |
+ RADEON_RESET_DMA));
}
/* MC */
@@ -4147,9 +2294,8 @@ static void si_mc_program(struct radeon_device *rdev)
if (radeon_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
- if (!ASIC_IS_NODCE(rdev))
- /* Lockout access through VGA aperture*/
- WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ /* Lockout access through VGA aperture*/
+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
/* Update configuration */
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
rdev->mc.vram_start >> 12);
@@ -4171,15 +2317,53 @@ static void si_mc_program(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
evergreen_mc_resume(rdev, &save);
- if (!ASIC_IS_NODCE(rdev)) {
- /* we need to own VRAM, so turn off the VGA renderer here
- * to stop it overwriting our objects */
- rv515_vga_render_disable(rdev);
+ /* we need to own VRAM, so turn off the VGA renderer here
+ * to stop it overwriting our objects */
+ rv515_vga_render_disable(rdev);
+}
+
+/* SI MC address space is 40 bits */
+static void si_vram_location(struct radeon_device *rdev,
+ struct radeon_mc *mc, u64 base)
+{
+ mc->vram_start = base;
+ if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) {
+ dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+ mc->real_vram_size = mc->aper_size;
+ mc->mc_vram_size = mc->aper_size;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+}
+
+static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+ u64 size_af, size_bf;
+
+ size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
+ size_bf = mc->vram_start & ~mc->gtt_base_align;
+ if (size_bf > size_af) {
+ if (mc->gtt_size > size_bf) {
+ dev_warn(rdev->dev, "limiting GTT\n");
+ mc->gtt_size = size_bf;
+ }
+ mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
+ } else {
+ if (mc->gtt_size > size_af) {
+ dev_warn(rdev->dev, "limiting GTT\n");
+ mc->gtt_size = size_af;
+ }
+ mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
}
+ mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+ dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
+ mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
-void si_vram_gtt_location(struct radeon_device *rdev,
- struct radeon_mc *mc)
+static void si_vram_gtt_location(struct radeon_device *rdev,
+ struct radeon_mc *mc)
{
if (mc->mc_vram_size > 0xFFC0000000ULL) {
/* leave room for at least 1024M GTT */
@@ -4187,9 +2371,9 @@ void si_vram_gtt_location(struct radeon_device *rdev,
mc->real_vram_size = 0xFFC0000000ULL;
mc->mc_vram_size = 0xFFC0000000ULL;
}
- radeon_vram_location(rdev, &rdev->mc, 0);
+ si_vram_location(rdev, &rdev->mc, 0);
rdev->mc.gtt_base_align = 0;
- radeon_gtt_location(rdev, mc);
+ si_gtt_location(rdev, mc);
}
static int si_mc_init(struct radeon_device *rdev)
@@ -4282,25 +2466,23 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
ENABLE_L1_TLB |
- ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
- ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7) |
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
- BANK_SELECT(4) |
- L2_CACHE_BIGK_FRAGMENT_SIZE(4));
+ L2_CACHE_BIGK_FRAGMENT_SIZE(0));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
@@ -4318,7 +2500,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
/* empty context1-15 */
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
/* Assign the pt base to something valid for now; the pts used for
* the VMs are determined by the application and setup and assigned
* on the fly in the vm part of radeon_gart.c
@@ -4326,10 +2508,10 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
- rdev->vm_manager.saved_table_addr[i]);
+ rdev->gart.table_addr >> 12);
else
WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
- rdev->vm_manager.saved_table_addr[i]);
+ rdev->gart.table_addr >> 12);
}
/* enable context1-15 */
@@ -4337,7 +2519,6 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 4);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
- PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
@@ -4361,17 +2542,6 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
static void si_pcie_gart_disable(struct radeon_device *rdev)
{
- unsigned i;
-
- for (i = 1; i < 16; ++i) {
- uint32_t reg;
- if (i < 8)
- reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
- else
- reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
- rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
- }
-
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
@@ -4720,24 +2890,24 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev,
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
{
int ret = 0;
- u32 idx = 0, i;
+ u32 idx = 0;
struct radeon_cs_packet pkt;
do {
pkt.idx = idx;
- pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
- pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt.one_reg_wr = 0;
switch (pkt.type) {
- case RADEON_PACKET_TYPE0:
+ case PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
ret = -EINVAL;
break;
- case RADEON_PACKET_TYPE2:
+ case PACKET_TYPE2:
idx += 1;
break;
- case RADEON_PACKET_TYPE3:
- pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ case PACKET_TYPE3:
+ pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
if (ib->is_const_ib)
ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
else {
@@ -4762,15 +2932,8 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
ret = -EINVAL;
break;
}
- if (ret) {
- for (i = 0; i < ib->length_dw; i++) {
- if (i == idx)
- printk("\t0x%08x <---\n", ib->ptr[i]);
- else
- printk("\t0x%08x\n", ib->ptr[i]);
- }
+ if (ret)
break;
- }
} while (idx < ib->length_dw);
return ret;
@@ -4794,290 +2957,132 @@ void si_vm_fini(struct radeon_device *rdev)
}
/**
- * si_vm_decode_fault - print human readable fault info
+ * si_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
- * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
- * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
*
- * Print human readable fault information (SI).
+ * Update the page tables using the CP (cayman-si).
*/
-static void si_vm_decode_fault(struct radeon_device *rdev,
- u32 status, u32 addr)
-{
- u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
- u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
- u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
- char *block;
-
- if (rdev->family == CHIP_TAHITI) {
- switch (mc_id) {
- case 160:
- case 144:
- case 96:
- case 80:
- case 224:
- case 208:
- case 32:
- case 16:
- block = "CB";
- break;
- case 161:
- case 145:
- case 97:
- case 81:
- case 225:
- case 209:
- case 33:
- case 17:
- block = "CB_FMASK";
- break;
- case 162:
- case 146:
- case 98:
- case 82:
- case 226:
- case 210:
- case 34:
- case 18:
- block = "CB_CMASK";
- break;
- case 163:
- case 147:
- case 99:
- case 83:
- case 227:
- case 211:
- case 35:
- case 19:
- block = "CB_IMMED";
- break;
- case 164:
- case 148:
- case 100:
- case 84:
- case 228:
- case 212:
- case 36:
- case 20:
- block = "DB";
- break;
- case 165:
- case 149:
- case 101:
- case 85:
- case 229:
- case 213:
- case 37:
- case 21:
- block = "DB_HTILE";
- break;
- case 167:
- case 151:
- case 103:
- case 87:
- case 231:
- case 215:
- case 39:
- case 23:
- block = "DB_STEN";
- break;
- case 72:
- case 68:
- case 64:
- case 8:
- case 4:
- case 0:
- case 136:
- case 132:
- case 128:
- case 200:
- case 196:
- case 192:
- block = "TC";
- break;
- case 112:
- case 48:
- block = "CP";
- break;
- case 49:
- case 177:
- case 50:
- case 178:
- block = "SH";
- break;
- case 53:
- case 190:
- block = "VGT";
- break;
- case 117:
- block = "IH";
- break;
- case 51:
- case 115:
- block = "RLC";
- break;
- case 119:
- case 183:
- block = "DMA0";
- break;
- case 61:
- block = "DMA1";
- break;
- case 248:
- case 120:
- block = "HDP";
- break;
- default:
- block = "unknown";
- break;
+void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 2 + count * 2;
+ if (ndw > 0x3FFE)
+ ndw = 0x3FFE;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ WRITE_DATA_DST_SEL(1)));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe));
+ for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
}
} else {
- switch (mc_id) {
- case 32:
- case 16:
- case 96:
- case 80:
- case 160:
- case 144:
- case 224:
- case 208:
- block = "CB";
- break;
- case 33:
- case 17:
- case 97:
- case 81:
- case 161:
- case 145:
- case 225:
- case 209:
- block = "CB_FMASK";
- break;
- case 34:
- case 18:
- case 98:
- case 82:
- case 162:
- case 146:
- case 226:
- case 210:
- block = "CB_CMASK";
- break;
- case 35:
- case 19:
- case 99:
- case 83:
- case 163:
- case 147:
- case 227:
- case 211:
- block = "CB_IMMED";
- break;
- case 36:
- case 20:
- case 100:
- case 84:
- case 164:
- case 148:
- case 228:
- case 212:
- block = "DB";
- break;
- case 37:
- case 21:
- case 101:
- case 85:
- case 165:
- case 149:
- case 229:
- case 213:
- block = "DB_HTILE";
- break;
- case 39:
- case 23:
- case 103:
- case 87:
- case 167:
- case 151:
- case 231:
- case 215:
- block = "DB_STEN";
- break;
- case 72:
- case 68:
- case 8:
- case 4:
- case 136:
- case 132:
- case 200:
- case 196:
- block = "TC";
- break;
- case 112:
- case 48:
- block = "CP";
- break;
- case 49:
- case 177:
- case 50:
- case 178:
- block = "SH";
- break;
- case 53:
- block = "VGT";
- break;
- case 117:
- block = "IH";
- break;
- case 51:
- case 115:
- block = "RLC";
- break;
- case 119:
- case 183:
- block = "DMA0";
- break;
- case 61:
- block = "DMA1";
- break;
- case 248:
- case 120:
- block = "HDP";
- break;
- default:
- block = "unknown";
- break;
+ /* DMA */
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
+ radeon_ring_write(ring, pe); /* dst addr */
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ radeon_ring_write(ring, r600_flags); /* mask */
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, value); /* value */
+ radeon_ring_write(ring, upper_32_bits(value));
+ radeon_ring_write(ring, incr); /* increment size */
+ radeon_ring_write(ring, 0);
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
}
}
-
- printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
- protections, vmid, addr,
- (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
- block, mc_id);
}
-void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
/* write new base address */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
- if (vm_id < 8) {
+ if (vm->id < 8) {
radeon_ring_write(ring,
- (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
+ (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
} else {
radeon_ring_write(ring,
- (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
+ (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
}
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, pd_addr >> 12);
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
/* flush hdp cache */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0);
@@ -5085,817 +3090,171 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 1 << vm_id);
-
- /* wait for the invalidate to complete */
- radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
- WAIT_REG_MEM_ENGINE(0))); /* me */
- radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0); /* ref */
- radeon_ring_write(ring, 0); /* mask */
- radeon_ring_write(ring, 0x20); /* poll interval */
+ radeon_ring_write(ring, 1 << vm->id);
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
-/*
- * Power and clock gating
- */
-static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
-{
- int i;
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
- break;
- udelay(1);
- }
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
- break;
- udelay(1);
- }
-}
-
-static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
- bool enable)
-{
- u32 tmp = RREG32(CP_INT_CNTL_RING0);
- u32 mask;
- int i;
-
- if (enable)
- tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
- else
- tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
- WREG32(CP_INT_CNTL_RING0, tmp);
-
- if (!enable) {
- /* read a gfx register */
- tmp = RREG32(DB_DEPTH_INFO);
-
- mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
- for (i = 0; i < rdev->usec_timeout; i++) {
- if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
- break;
- udelay(1);
- }
- }
-}
-
-static void si_set_uvd_dcm(struct radeon_device *rdev,
- bool sw_mode)
-{
- u32 tmp, tmp2;
-
- tmp = RREG32(UVD_CGC_CTRL);
- tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
- tmp |= DCM | CG_DT(1) | CLK_OD(4);
-
- if (sw_mode) {
- tmp &= ~0x7ffff800;
- tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
- } else {
- tmp |= 0x7ffff800;
- tmp2 = 0;
- }
-
- WREG32(UVD_CGC_CTRL, tmp);
- WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
-}
-
-void si_init_uvd_internal_cg(struct radeon_device *rdev)
-{
- bool hw_mode = true;
-
- if (hw_mode) {
- si_set_uvd_dcm(rdev, false);
- } else {
- u32 tmp = RREG32(UVD_CGC_CTRL);
- tmp &= ~DCM;
- WREG32(UVD_CGC_CTRL, tmp);
- }
-}
-
-static u32 si_halt_rlc(struct radeon_device *rdev)
-{
- u32 data, orig;
-
- orig = data = RREG32(RLC_CNTL);
-
- if (data & RLC_ENABLE) {
- data &= ~RLC_ENABLE;
- WREG32(RLC_CNTL, data);
-
- si_wait_for_rlc_serdes(rdev);
- }
-
- return orig;
-}
-
-static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
-{
- u32 tmp;
-
- tmp = RREG32(RLC_CNTL);
- if (tmp != rlc)
- WREG32(RLC_CNTL, rlc);
-}
-
-static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
-{
- u32 data, orig;
-
- orig = data = RREG32(DMA_PG);
- if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
- data |= PG_CNTL_ENABLE;
- else
- data &= ~PG_CNTL_ENABLE;
- if (orig != data)
- WREG32(DMA_PG, data);
-}
-
-static void si_init_dma_pg(struct radeon_device *rdev)
-{
- u32 tmp;
-
- WREG32(DMA_PGFSM_WRITE, 0x00002000);
- WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
-
- for (tmp = 0; tmp < 5; tmp++)
- WREG32(DMA_PGFSM_WRITE, 0);
-}
-
-static void si_enable_gfx_cgpg(struct radeon_device *rdev,
- bool enable)
-{
- u32 tmp;
-
- if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
- tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
- WREG32(RLC_TTOP_D, tmp);
-
- tmp = RREG32(RLC_PG_CNTL);
- tmp |= GFX_PG_ENABLE;
- WREG32(RLC_PG_CNTL, tmp);
-
- tmp = RREG32(RLC_AUTO_PG_CTRL);
- tmp |= AUTO_PG_EN;
- WREG32(RLC_AUTO_PG_CTRL, tmp);
- } else {
- tmp = RREG32(RLC_AUTO_PG_CTRL);
- tmp &= ~AUTO_PG_EN;
- WREG32(RLC_AUTO_PG_CTRL, tmp);
-
- tmp = RREG32(DB_RENDER_CONTROL);
- }
-}
-
-static void si_init_gfx_cgpg(struct radeon_device *rdev)
-{
- u32 tmp;
-
- WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
-
- tmp = RREG32(RLC_PG_CNTL);
- tmp |= GFX_PG_SRC;
- WREG32(RLC_PG_CNTL, tmp);
-
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
-
- tmp = RREG32(RLC_AUTO_PG_CTRL);
-
- tmp &= ~GRBM_REG_SGIT_MASK;
- tmp |= GRBM_REG_SGIT(0x700);
- tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
- WREG32(RLC_AUTO_PG_CTRL, tmp);
-}
-
-static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
-{
- u32 mask = 0, tmp, tmp1;
- int i;
-
- si_select_se_sh(rdev, se, sh);
- tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
- tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
- si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
-
- tmp &= 0xffff0000;
-
- tmp |= tmp1;
- tmp >>= 16;
-
- for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
- mask <<= 1;
- mask |= 1;
- }
-
- return (~tmp) & mask;
-}
-
-static void si_init_ao_cu_mask(struct radeon_device *rdev)
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
- u32 i, j, k, active_cu_number = 0;
- u32 mask, counter, cu_bitmap;
- u32 tmp = 0;
+ struct radeon_ring *ring = &rdev->ring[ridx];
- for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
- for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
- mask = 1;
- cu_bitmap = 0;
- counter = 0;
- for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
- if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
- if (counter < 2)
- cu_bitmap |= mask;
- counter++;
- }
- mask <<= 1;
- }
-
- active_cu_number += counter;
- tmp |= (cu_bitmap << (i * 16 + j * 8));
- }
- }
-
- WREG32(RLC_PG_AO_CU_MASK, tmp);
-
- tmp = RREG32(RLC_MAX_PG_CU);
- tmp &= ~MAX_PU_CU_MASK;
- tmp |= MAX_PU_CU(active_cu_number);
- WREG32(RLC_MAX_PG_CU, tmp);
-}
-
-static void si_enable_cgcg(struct radeon_device *rdev,
- bool enable)
-{
- u32 data, orig, tmp;
-
- orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
-
- if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
- si_enable_gui_idle_interrupt(rdev, true);
-
- WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
-
- tmp = si_halt_rlc(rdev);
-
- WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
- WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
- WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
-
- si_wait_for_rlc_serdes(rdev);
-
- si_update_rlc(rdev, tmp);
-
- WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
-
- data |= CGCG_EN | CGLS_EN;
- } else {
- si_enable_gui_idle_interrupt(rdev, false);
-
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
-
- data &= ~(CGCG_EN | CGLS_EN);
- }
-
- if (orig != data)
- WREG32(RLC_CGCG_CGLS_CTRL, data);
-}
-
-static void si_enable_mgcg(struct radeon_device *rdev,
- bool enable)
-{
- u32 data, orig, tmp = 0;
-
- if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
- orig = data = RREG32(CGTS_SM_CTRL_REG);
- data = 0x96940200;
- if (orig != data)
- WREG32(CGTS_SM_CTRL_REG, data);
-
- if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
- orig = data = RREG32(CP_MEM_SLP_CNTL);
- data |= CP_MEM_LS_EN;
- if (orig != data)
- WREG32(CP_MEM_SLP_CNTL, data);
- }
-
- orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
- data &= 0xffffffc0;
- if (orig != data)
- WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
-
- tmp = si_halt_rlc(rdev);
-
- WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
- WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
- WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
+ if (vm == NULL)
+ return;
- si_update_rlc(rdev, tmp);
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
} else {
- orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
- data |= 0x00000003;
- if (orig != data)
- WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
-
- data = RREG32(CP_MEM_SLP_CNTL);
- if (data & CP_MEM_LS_EN) {
- data &= ~CP_MEM_LS_EN;
- WREG32(CP_MEM_SLP_CNTL, data);
- }
- orig = data = RREG32(CGTS_SM_CTRL_REG);
- data |= LS_OVERRIDE | OVERRIDE;
- if (orig != data)
- WREG32(CGTS_SM_CTRL_REG, data);
-
- tmp = si_halt_rlc(rdev);
-
- WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
- WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
- WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
-
- si_update_rlc(rdev, tmp);
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
}
-}
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
-static void si_enable_uvd_mgcg(struct radeon_device *rdev,
- bool enable)
-{
- u32 orig, data, tmp;
-
- if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
- tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
- tmp |= 0x3fff;
- WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
-
- orig = data = RREG32(UVD_CGC_CTRL);
- data |= DCM;
- if (orig != data)
- WREG32(UVD_CGC_CTRL, data);
-
- WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
- WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
- } else {
- tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
- tmp &= ~0x3fff;
- WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
-
- orig = data = RREG32(UVD_CGC_CTRL);
- data &= ~DCM;
- if (orig != data)
- WREG32(UVD_CGC_CTRL, data);
-
- WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
- WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
- }
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
}
-static const u32 mc_cg_registers[] =
-{
- MC_HUB_MISC_HUB_CG,
- MC_HUB_MISC_SIP_CG,
- MC_HUB_MISC_VM_CG,
- MC_XPB_CLK_GAT,
- ATC_MISC_CG,
- MC_CITF_MISC_WR_CG,
- MC_CITF_MISC_RD_CG,
- MC_CITF_MISC_VM_CG,
- VM_L2_CG,
-};
-
-static void si_enable_mc_ls(struct radeon_device *rdev,
- bool enable)
+/*
+ * RLC
+ */
+void si_rlc_fini(struct radeon_device *rdev)
{
- int i;
- u32 orig, data;
-
- for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
- orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
- data |= MC_LS_ENABLE;
- else
- data &= ~MC_LS_ENABLE;
- if (data != orig)
- WREG32(mc_cg_registers[i], data);
- }
-}
+ int r;
-static void si_enable_mc_mgcg(struct radeon_device *rdev,
- bool enable)
-{
- int i;
- u32 orig, data;
+ /* save restore block */
+ if (rdev->rlc.save_restore_obj) {
+ r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
+ radeon_bo_unpin(rdev->rlc.save_restore_obj);
+ radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
- orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
- data |= MC_CG_ENABLE;
- else
- data &= ~MC_CG_ENABLE;
- if (data != orig)
- WREG32(mc_cg_registers[i], data);
+ radeon_bo_unref(&rdev->rlc.save_restore_obj);
+ rdev->rlc.save_restore_obj = NULL;
}
-}
-static void si_enable_dma_mgcg(struct radeon_device *rdev,
- bool enable)
-{
- u32 orig, data, offset;
- int i;
+ /* clear state block */
+ if (rdev->rlc.clear_state_obj) {
+ r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
+ radeon_bo_unpin(rdev->rlc.clear_state_obj);
+ radeon_bo_unreserve(rdev->rlc.clear_state_obj);
- if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
- for (i = 0; i < 2; i++) {
- if (i == 0)
- offset = DMA0_REGISTER_OFFSET;
- else
- offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data &= ~MEM_POWER_OVERRIDE;
- if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
- WREG32(DMA_CLK_CTRL + offset, 0x00000100);
- }
- } else {
- for (i = 0; i < 2; i++) {
- if (i == 0)
- offset = DMA0_REGISTER_OFFSET;
- else
- offset = DMA1_REGISTER_OFFSET;
- orig = data = RREG32(DMA_POWER_CNTL + offset);
- data |= MEM_POWER_OVERRIDE;
- if (data != orig)
- WREG32(DMA_POWER_CNTL + offset, data);
-
- orig = data = RREG32(DMA_CLK_CTRL + offset);
- data = 0xff000000;
- if (data != orig)
- WREG32(DMA_CLK_CTRL + offset, data);
- }
+ radeon_bo_unref(&rdev->rlc.clear_state_obj);
+ rdev->rlc.clear_state_obj = NULL;
}
}
-static void si_enable_bif_mgls(struct radeon_device *rdev,
- bool enable)
+int si_rlc_init(struct radeon_device *rdev)
{
- u32 orig, data;
-
- orig = data = RREG32_PCIE(PCIE_CNTL2);
-
- if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
- data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
- REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
- else
- data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
- REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
-
- if (orig != data)
- WREG32_PCIE(PCIE_CNTL2, data);
-}
-
-static void si_enable_hdp_mgcg(struct radeon_device *rdev,
- bool enable)
-{
- u32 orig, data;
-
- orig = data = RREG32(HDP_HOST_PATH_CNTL);
-
- if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
- data &= ~CLOCK_GATING_DIS;
- else
- data |= CLOCK_GATING_DIS;
-
- if (orig != data)
- WREG32(HDP_HOST_PATH_CNTL, data);
-}
-
-static void si_enable_hdp_ls(struct radeon_device *rdev,
- bool enable)
-{
- u32 orig, data;
-
- orig = data = RREG32(HDP_MEM_POWER_LS);
-
- if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
- data |= HDP_LS_ENABLE;
- else
- data &= ~HDP_LS_ENABLE;
-
- if (orig != data)
- WREG32(HDP_MEM_POWER_LS, data);
-}
-
-static void si_update_cg(struct radeon_device *rdev,
- u32 block, bool enable)
-{
- if (block & RADEON_CG_BLOCK_GFX) {
- si_enable_gui_idle_interrupt(rdev, false);
- /* order matters! */
- if (enable) {
- si_enable_mgcg(rdev, true);
- si_enable_cgcg(rdev, true);
- } else {
- si_enable_cgcg(rdev, false);
- si_enable_mgcg(rdev, false);
- }
- si_enable_gui_idle_interrupt(rdev, true);
- }
-
- if (block & RADEON_CG_BLOCK_MC) {
- si_enable_mc_mgcg(rdev, enable);
- si_enable_mc_ls(rdev, enable);
- }
-
- if (block & RADEON_CG_BLOCK_SDMA) {
- si_enable_dma_mgcg(rdev, enable);
- }
-
- if (block & RADEON_CG_BLOCK_BIF) {
- si_enable_bif_mgls(rdev, enable);
- }
+ int r;
- if (block & RADEON_CG_BLOCK_UVD) {
- if (rdev->has_uvd) {
- si_enable_uvd_mgcg(rdev, enable);
+ /* save restore block */
+ if (rdev->rlc.save_restore_obj == NULL) {
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &rdev->rlc.save_restore_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
+ return r;
}
}
- if (block & RADEON_CG_BLOCK_HDP) {
- si_enable_hdp_mgcg(rdev, enable);
- si_enable_hdp_ls(rdev, enable);
- }
-}
-
-static void si_init_cg(struct radeon_device *rdev)
-{
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), true);
- if (rdev->has_uvd) {
- si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
- si_init_uvd_internal_cg(rdev);
+ r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+ if (unlikely(r != 0)) {
+ si_rlc_fini(rdev);
+ return r;
}
-}
-
-static void si_fini_cg(struct radeon_device *rdev)
-{
- if (rdev->has_uvd) {
- si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
- }
- si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
- RADEON_CG_BLOCK_MC |
- RADEON_CG_BLOCK_SDMA |
- RADEON_CG_BLOCK_BIF |
- RADEON_CG_BLOCK_HDP), false);
-}
-
-u32 si_get_csb_size(struct radeon_device *rdev)
-{
- u32 count = 0;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
-
- if (rdev->rlc.cs_data == NULL)
- return 0;
-
- /* begin clear state */
- count += 2;
- /* context control state */
- count += 3;
-
- for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT)
- count += 2 + ext->reg_count;
- else
- return 0;
- }
+ r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.save_restore_gpu_addr);
+ radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
+ si_rlc_fini(rdev);
+ return r;
}
- /* pa_sc_raster_config */
- count += 3;
- /* end clear state */
- count += 2;
- /* clear state */
- count += 2;
- return count;
-}
-
-void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
-{
- u32 count = 0, i;
- const struct cs_section_def *sect = NULL;
- const struct cs_extent_def *ext = NULL;
-
- if (rdev->rlc.cs_data == NULL)
- return;
- if (buffer == NULL)
- return;
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
- buffer[count++] = cpu_to_le32(0x80000000);
- buffer[count++] = cpu_to_le32(0x80000000);
-
- for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
- for (ext = sect->section; ext->extent != NULL; ++ext) {
- if (sect->id == SECT_CONTEXT) {
- buffer[count++] =
- cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
- buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
- for (i = 0; i < ext->reg_count; i++)
- buffer[count++] = cpu_to_le32(ext->extent[i]);
- } else {
- return;
- }
+ /* clear state block */
+ if (rdev->rlc.clear_state_obj == NULL) {
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &rdev->rlc.clear_state_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
+ si_rlc_fini(rdev);
+ return r;
}
}
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
- switch (rdev->family) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- buffer[count++] = cpu_to_le32(0x2a00126a);
- break;
- case CHIP_VERDE:
- buffer[count++] = cpu_to_le32(0x0000124a);
- break;
- case CHIP_OLAND:
- buffer[count++] = cpu_to_le32(0x00000082);
- break;
- case CHIP_HAINAN:
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- default:
- buffer[count++] = cpu_to_le32(0x00000000);
- break;
- }
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
-
- buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
- buffer[count++] = cpu_to_le32(0);
-}
-
-static void si_init_pg(struct radeon_device *rdev)
-{
- if (rdev->pg_flags) {
- if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
- si_init_dma_pg(rdev);
- }
- si_init_ao_cu_mask(rdev);
- if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
- si_init_gfx_cgpg(rdev);
- } else {
- WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
- }
- si_enable_dma_pg(rdev, true);
- si_enable_gfx_cgpg(rdev, true);
- } else {
- WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+ r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+ if (unlikely(r != 0)) {
+ si_rlc_fini(rdev);
+ return r;
}
-}
-
-static void si_fini_pg(struct radeon_device *rdev)
-{
- if (rdev->pg_flags) {
- si_enable_dma_pg(rdev, false);
- si_enable_gfx_cgpg(rdev, false);
+ r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.clear_state_gpu_addr);
+ radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
+ si_rlc_fini(rdev);
+ return r;
}
-}
-/*
- * RLC
- */
-void si_rlc_reset(struct radeon_device *rdev)
-{
- u32 tmp = RREG32(GRBM_SOFT_RESET);
-
- tmp |= SOFT_RESET_RLC;
- WREG32(GRBM_SOFT_RESET, tmp);
- udelay(50);
- tmp &= ~SOFT_RESET_RLC;
- WREG32(GRBM_SOFT_RESET, tmp);
- udelay(50);
+ return 0;
}
static void si_rlc_stop(struct radeon_device *rdev)
{
WREG32(RLC_CNTL, 0);
-
- si_enable_gui_idle_interrupt(rdev, false);
-
- si_wait_for_rlc_serdes(rdev);
}
static void si_rlc_start(struct radeon_device *rdev)
{
WREG32(RLC_CNTL, RLC_ENABLE);
-
- si_enable_gui_idle_interrupt(rdev, true);
-
- udelay(50);
-}
-
-static bool si_lbpw_supported(struct radeon_device *rdev)
-{
- u32 tmp;
-
- /* Enable LBPW only for DDR3 */
- tmp = RREG32(MC_SEQ_MISC0);
- if ((tmp & 0xF0000000) == 0xB0000000)
- return true;
- return false;
-}
-
-static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
-{
- u32 tmp;
-
- tmp = RREG32(RLC_LB_CNTL);
- if (enable)
- tmp |= LOAD_BALANCE_ENABLE;
- else
- tmp &= ~LOAD_BALANCE_ENABLE;
- WREG32(RLC_LB_CNTL, tmp);
-
- if (!enable) {
- si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
- WREG32(SPI_LB_CU_MASK, 0x00ff);
- }
}
static int si_rlc_resume(struct radeon_device *rdev)
{
u32 i;
+ const __be32 *fw_data;
if (!rdev->rlc_fw)
return -EINVAL;
si_rlc_stop(rdev);
- si_rlc_reset(rdev);
-
- si_init_pg(rdev);
-
- si_init_cg(rdev);
-
WREG32(RLC_RL_BASE, 0);
WREG32(RLC_RL_SIZE, 0);
WREG32(RLC_LB_CNTL, 0);
WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
WREG32(RLC_LB_CNTR_INIT, 0);
- WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
+
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
- if (rdev->new_fw) {
- const struct rlc_firmware_header_v1_0 *hdr =
- (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
- u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
- const __le32 *fw_data = (const __le32 *)
- (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- radeon_ucode_print_rlc_hdr(&hdr->header);
-
- for (i = 0; i < fw_size; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
- }
- } else {
- const __be32 *fw_data =
- (const __be32 *)rdev->rlc_fw->data;
- for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
- }
+ fw_data = (const __be32 *)rdev->rlc_fw;
+ for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
WREG32(RLC_UCODE_ADDR, 0);
- si_enable_lbpw(rdev, si_lbpw_supported(rdev));
-
si_rlc_start(rdev);
return 0;
@@ -5933,9 +3292,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
- tmp = RREG32(CP_INT_CNTL_RING0) &
- (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
- WREG32(CP_INT_CNTL_RING0, tmp);
+ WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING1, 0);
WREG32(CP_INT_CNTL_RING2, 0);
tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -5943,11 +3300,8 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
WREG32(GRBM_INT_CNTL, 0);
- WREG32(SRBM_INT_CNTL, 0);
- if (rdev->num_crtc >= 2) {
- WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
- WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- }
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
@@ -5957,10 +3311,8 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
- if (rdev->num_crtc >= 2) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- }
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
@@ -5970,22 +3322,21 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
- if (!ASIC_IS_NODCE(rdev)) {
- WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
+ WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
+
+ tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD1_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD2_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD3_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD4_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
+ tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
- tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD1_INT_CONTROL, tmp);
- tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD2_INT_CONTROL, tmp);
- tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD3_INT_CONTROL, tmp);
- tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD4_INT_CONTROL, tmp);
- tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD5_INT_CONTROL, tmp);
- tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD6_INT_CONTROL, tmp);
- }
}
static int si_irq_init(struct radeon_device *rdev)
@@ -6022,7 +3373,7 @@ static int si_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
+ rb_bufsz = drm_order(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
@@ -6051,7 +3402,9 @@ static int si_irq_init(struct radeon_device *rdev)
/* force the active interrupt state to all disabled */
si_disable_interrupt_state(rdev);
+#ifdef notyet
pci_set_master(rdev->pdev);
+#endif
/* enable irqs */
si_enable_interrupts(rdev);
@@ -6061,13 +3414,13 @@ static int si_irq_init(struct radeon_device *rdev)
int si_irq_set(struct radeon_device *rdev)
{
- u32 cp_int_cntl;
+ u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
- u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
+ u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 dma_cntl, dma_cntl1;
- u32 thermal_int = 0;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -6081,24 +3434,16 @@ int si_irq_set(struct radeon_device *rdev)
return 0;
}
- cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
- (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
-
- if (!ASIC_IS_NODCE(rdev)) {
- hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
- }
+ hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
+ hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
- thermal_int = RREG32(CG_THERMAL_INT) &
- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
-
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -6153,27 +3498,27 @@ int si_irq_set(struct radeon_device *rdev)
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("si_irq_set: hpd 1\n");
- hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd1 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("si_irq_set: hpd 2\n");
- hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd2 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("si_irq_set: hpd 3\n");
- hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd3 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("si_irq_set: hpd 4\n");
- hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd4 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("si_irq_set: hpd 5\n");
- hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd5 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("si_irq_set: hpd 6\n");
- hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
+ hpd6 |= DC_HPDx_INT_EN;
}
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
@@ -6185,15 +3530,8 @@ int si_irq_set(struct radeon_device *rdev)
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
- if (rdev->irq.dpm_thermal) {
- DRM_DEBUG("dpm thermal\n");
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- }
-
- if (rdev->num_crtc >= 2) {
- WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
- WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
- }
+ WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
+ WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
@@ -6203,38 +3541,23 @@ int si_irq_set(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
- if (rdev->num_crtc >= 2) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
- }
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
if (rdev->num_crtc >= 4) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
}
if (rdev->num_crtc >= 6) {
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
- GRPH_PFLIP_INT_MASK);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
}
- if (!ASIC_IS_NODCE(rdev)) {
- WREG32(DC_HPD1_INT_CONTROL, hpd1);
- WREG32(DC_HPD2_INT_CONTROL, hpd2);
- WREG32(DC_HPD3_INT_CONTROL, hpd3);
- WREG32(DC_HPD4_INT_CONTROL, hpd4);
- WREG32(DC_HPD5_INT_CONTROL, hpd5);
- WREG32(DC_HPD6_INT_CONTROL, hpd6);
- }
-
- WREG32(CG_THERMAL_INT, thermal_int);
-
- /* posting read */
- RREG32(SRBM_STATUS);
+ WREG32(DC_HPD1_INT_CONTROL, hpd1);
+ WREG32(DC_HPD2_INT_CONTROL, hpd2);
+ WREG32(DC_HPD3_INT_CONTROL, hpd3);
+ WREG32(DC_HPD4_INT_CONTROL, hpd4);
+ WREG32(DC_HPD5_INT_CONTROL, hpd5);
+ WREG32(DC_HPD6_INT_CONTROL, hpd6);
return 0;
}
@@ -6243,9 +3566,6 @@ static inline void si_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
- if (ASIC_IS_NODCE(rdev))
- return;
-
rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
@@ -6332,39 +3652,8 @@ static inline void si_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD6_INT_CONTROL);
- tmp |= DC_HPDx_INT_ACK;
- WREG32(DC_HPD6_INT_CONTROL, tmp);
- }
-
- if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD1_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
- WREG32(DC_HPD1_INT_CONTROL, tmp);
- }
- if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD2_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
- WREG32(DC_HPD2_INT_CONTROL, tmp);
- }
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD3_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
- WREG32(DC_HPD3_INT_CONTROL, tmp);
- }
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD4_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
- WREG32(DC_HPD4_INT_CONTROL, tmp);
- }
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
- WREG32(DC_HPD5_INT_CONTROL, tmp);
- }
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD6_INT_CONTROL);
- tmp |= DC_HPDx_RX_INT_ACK;
+ tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
}
@@ -6400,13 +3689,12 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
wptr = RREG32(IH_RB_WPTR);
if (wptr & RB_OVERFLOW) {
- wptr &= ~RB_OVERFLOW;
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
- dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
+ dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
+ wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
@@ -6432,15 +3720,14 @@ int si_irq_process(struct radeon_device *rdev)
u32 src_id, src_data, ring_id;
u32 ring_index;
bool queue_hotplug = false;
- bool queue_dp = false;
- bool queue_thermal = false;
- u32 status, addr;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
wptr = si_get_ih_wptr(rdev);
+ if (wptr == rdev->ih.rptr)
+ return IRQ_NONE;
restart_ih:
/* is somebody else already processing irqs? */
if (atomic_xchg(&rdev->ih.lock, 1))
@@ -6466,27 +3753,23 @@ restart_ih:
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[0]) {
- drm_handle_vblank(rdev->ddev, 0);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[0]) {
+ drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[0]))
+ radeon_crtc_handle_flip(rdev, 0);
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D1 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[0]))
- radeon_crtc_handle_vblank(rdev, 0);
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D1 vblank\n");
-
break;
case 1: /* D1 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D1 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D1 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6496,27 +3779,23 @@ restart_ih:
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[1]) {
- drm_handle_vblank(rdev->ddev, 1);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[1]) {
+ drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[1]))
+ radeon_crtc_handle_flip(rdev, 1);
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D2 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[1]))
- radeon_crtc_handle_vblank(rdev, 1);
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D2 vblank\n");
-
break;
case 1: /* D2 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D2 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D2 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6526,27 +3805,23 @@ restart_ih:
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[2]) {
- drm_handle_vblank(rdev->ddev, 2);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[2]) {
+ drm_handle_vblank(rdev->ddev, 2);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[2]))
+ radeon_crtc_handle_flip(rdev, 2);
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D3 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[2]))
- radeon_crtc_handle_vblank(rdev, 2);
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D3 vblank\n");
-
break;
case 1: /* D3 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D3 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D3 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6556,27 +3831,23 @@ restart_ih:
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[3]) {
- drm_handle_vblank(rdev->ddev, 3);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[3]) {
+ drm_handle_vblank(rdev->ddev, 3);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[3]))
+ radeon_crtc_handle_flip(rdev, 3);
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D4 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[3]))
- radeon_crtc_handle_vblank(rdev, 3);
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D4 vblank\n");
-
break;
case 1: /* D4 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D4 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D4 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6586,27 +3857,23 @@ restart_ih:
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[4]) {
- drm_handle_vblank(rdev->ddev, 4);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[4]) {
+ drm_handle_vblank(rdev->ddev, 4);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[4]))
+ radeon_crtc_handle_flip(rdev, 4);
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D5 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[4]))
- radeon_crtc_handle_vblank(rdev, 4);
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D5 vblank\n");
-
break;
case 1: /* D5 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D5 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D5 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6616,180 +3883,87 @@ restart_ih:
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (rdev->irq.crtc_vblank_int[5]) {
- drm_handle_vblank(rdev->ddev, 5);
- rdev->pm.vblank_sync = true;
- wake_up(&rdev->irq.vblank_queue);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+ if (rdev->irq.crtc_vblank_int[5]) {
+ drm_handle_vblank(rdev->ddev, 5);
+ rdev->pm.vblank_sync = true;
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (atomic_read(&rdev->irq.pflip[5]))
+ radeon_crtc_handle_flip(rdev, 5);
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+ DRM_DEBUG("IH: D6 vblank\n");
}
- if (atomic_read(&rdev->irq.pflip[5]))
- radeon_crtc_handle_vblank(rdev, 5);
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
- DRM_DEBUG("IH: D6 vblank\n");
-
break;
case 1: /* D6 vline */
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
- DRM_DEBUG("IH: D6 vline\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+ DRM_DEBUG("IH: D6 vline\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
- case 8: /* D1 page flip */
- case 10: /* D2 page flip */
- case 12: /* D3 page flip */
- case 14: /* D4 page flip */
- case 16: /* D5 page flip */
- case 18: /* D6 page flip */
- DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- if (radeon_use_pflipirq > 0)
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
- break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
- if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD1\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD1\n");
+ }
break;
case 1:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD2\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD2\n");
+ }
break;
case 2:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD3\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD3\n");
+ }
break;
case 3:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD4\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD4\n");
+ }
break;
case 4:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD5\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD5\n");
+ }
break;
case 5:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
- queue_hotplug = true;
- DRM_DEBUG("IH: HPD6\n");
-
- break;
- case 6:
- if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 1\n");
-
- break;
- case 7:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 2\n");
-
- break;
- case 8:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 3\n");
-
- break;
- case 9:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 4\n");
-
- break;
- case 10:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 5\n");
-
- break;
- case 11:
- if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
- queue_dp = true;
- DRM_DEBUG("IH: HPD_RX 6\n");
-
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+ rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+ queue_hotplug = true;
+ DRM_DEBUG("IH: HPD6\n");
+ }
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
- case 96:
- DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
- WREG32(SRBM_INT_ACK, 0x1);
- break;
- case 124: /* UVD */
- DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
- radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
- break;
case 146:
case 147:
- addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
- status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
- /* reset addr and status */
- WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
- if (addr == 0x0 && status == 0x0)
- break;
dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
- addr);
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
- status);
- si_vm_decode_fault(rdev, status, addr);
+ RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
+ /* reset addr and status */
+ WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
break;
case 176: /* RINGID0 CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
@@ -6818,16 +3992,6 @@ restart_ih:
DRM_DEBUG("IH: DMA trap\n");
radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
break;
- case 230: /* thermal low to high */
- DRM_DEBUG("IH: thermal low to high\n");
- rdev->pm.dpm.thermal.high_to_low = false;
- queue_thermal = true;
- break;
- case 231: /* thermal high to low */
- DRM_DEBUG("IH: thermal high to low\n");
- rdev->pm.dpm.thermal.high_to_low = true;
- queue_thermal = true;
- break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
@@ -6843,15 +4007,11 @@ restart_ih:
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
- WREG32(IH_RB_RPTR, rptr);
}
- if (queue_dp)
- schedule_work(&rdev->dp_work);
if (queue_hotplug)
- schedule_delayed_work(&rdev->hotplug_work, 0);
- if (queue_thermal && rdev->pm.dpm_enabled)
- schedule_work(&rdev->pm.dpm.thermal.work);
+ task_add(systq, &rdev->hotplug_task);
rdev->ih.rptr = rptr;
+ WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
/* make sure wptr hasn't changed while processing */
@@ -6862,6 +4022,80 @@ restart_ih:
return IRQ_HANDLED;
}
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0xFFFFF)
+ cur_size_in_bytes = 0xFFFFF;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
/*
* startup/shutdown callbacks
*/
@@ -6870,39 +4104,42 @@ static int si_startup(struct radeon_device *rdev)
struct radeon_ring *ring;
int r;
- /* enable pcie gen2/3 link */
- si_pcie_gen3_enable(rdev);
- /* enable aspm */
- si_program_aspm(rdev);
-
- /* scratch needs to be initialized before MC */
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
si_mc_program(rdev);
- if (!rdev->pm.dpm_enabled) {
- r = si_mc_load_microcode(rdev);
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
+ !rdev->rlc_fw || !rdev->mc_fw) {
+ r = si_init_microcode(rdev);
if (r) {
- DRM_ERROR("Failed to load MC firmware!\n");
+ DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
+ r = si_mc_load_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+ return r;
+ }
+
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
r = si_pcie_gart_enable(rdev);
if (r)
return r;
si_gpu_init(rdev);
- /* allocate rlc buffers */
- if (rdev->family == CHIP_VERDE) {
- rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
- rdev->rlc.reg_list_size =
- (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
+#if 0
+ r = evergreen_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
- rdev->rlc.cs_data = si_cs_data;
- r = sumo_rlc_init(rdev);
+#endif
+ /* allocate rlc buffers */
+ r = si_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -6943,34 +4180,6 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
- if (rdev->has_uvd) {
- r = uvd_v2_2_resume(rdev);
- if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
- }
- if (r)
- rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
- }
-
- r = radeon_vce_resume(rdev);
- if (!r) {
- r = vce_v1_0_resume(rdev);
- if (!r)
- r = radeon_fence_driver_start_ring(rdev,
- TN_RING_TYPE_VCE1_INDEX);
- if (!r)
- r = radeon_fence_driver_start_ring(rdev,
- TN_RING_TYPE_VCE2_INDEX);
- }
- if (r) {
- dev_err(rdev->dev, "VCE init error (%d).\n", r);
- rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
- rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
- }
-
/* Enable IRQ */
if (!rdev->irq.installed) {
r = radeon_irq_kms_init(rdev);
@@ -6988,31 +4197,38 @@ static int si_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- RADEON_CP_PACKET2);
+ CP_RB0_RPTR, CP_RB0_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
- RADEON_CP_PACKET2);
+ CP_RB1_RPTR, CP_RB1_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
- RADEON_CP_PACKET2);
+ CP_RB2_RPTR, CP_RB2_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
- DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
+ DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
+ 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
@@ -7027,35 +4243,6 @@ static int si_startup(struct radeon_device *rdev)
if (r)
return r;
- if (rdev->has_uvd) {
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- RADEON_CP_PACKET2);
- if (!r)
- r = uvd_v1_0_init(rdev);
- if (r)
- DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
- }
- }
-
- r = -ENOENT;
-
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- VCE_CMD_NO_OP);
-
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- if (ring->ring_size)
- r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
- VCE_CMD_NO_OP);
-
- if (!r)
- r = vce_v1_0_init(rdev);
- else if (r != -ENOENT)
- DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
-
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -7068,10 +4255,6 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
- r = radeon_audio_init(rdev);
- if (r)
- return r;
-
return 0;
}
@@ -7086,12 +4269,6 @@ int si_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- /* init golden registers */
- si_init_golden_registers(rdev);
-
- if (rdev->pm.pm_method == PM_METHOD_DPM)
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = si_startup(rdev);
if (r) {
@@ -7106,18 +4283,9 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
- radeon_pm_suspend(rdev);
- radeon_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
- if (rdev->has_uvd) {
- uvd_v1_0_fini(rdev);
- radeon_uvd_suspend(rdev);
- radeon_vce_suspend(rdev);
- }
- si_fini_pg(rdev);
- si_fini_cg(rdev);
si_irq_suspend(rdev);
radeon_wb_disable(rdev);
si_pcie_gart_disable(rdev);
@@ -7158,8 +4326,6 @@ int si_init(struct radeon_device *rdev)
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
- /* init golden registers */
- si_init_golden_registers(rdev);
/* Initialize scratch registers */
si_scratch_init(rdev);
/* Initialize surface registers */
@@ -7181,18 +4347,6 @@ int si_init(struct radeon_device *rdev)
if (r)
return r;
- if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
- !rdev->rlc_fw || !rdev->mc_fw) {
- r = si_init_microcode(rdev);
- if (r) {
- DRM_ERROR("Failed to load firmware!\n");
- return r;
- }
- }
-
- /* Initialize power management */
- radeon_pm_init(rdev);
-
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
@@ -7213,26 +4367,6 @@ int si_init(struct radeon_device *rdev)
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 64 * 1024);
- if (rdev->has_uvd) {
- r = radeon_uvd_init(rdev);
- if (!r) {
- ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
- }
- }
-
- r = radeon_vce_init(rdev);
- if (!r) {
- ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
-
- ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
- ring->ring_obj = NULL;
- r600_ring_init(rdev, ring, 4096);
- }
-
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -7247,7 +4381,7 @@ int si_init(struct radeon_device *rdev)
si_cp_fini(rdev);
cayman_dma_fini(rdev);
si_irq_fini(rdev);
- sumo_rlc_fini(rdev);
+ si_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_vm_manager_fini(rdev);
@@ -7270,22 +4404,17 @@ int si_init(struct radeon_device *rdev)
void si_fini(struct radeon_device *rdev)
{
- radeon_pm_fini(rdev);
+#if 0
+ r600_blit_fini(rdev);
+#endif
si_cp_fini(rdev);
cayman_dma_fini(rdev);
- si_fini_pg(rdev);
- si_fini_cg(rdev);
si_irq_fini(rdev);
- sumo_rlc_fini(rdev);
+ si_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- if (rdev->has_uvd) {
- uvd_v1_0_fini(rdev);
- radeon_uvd_fini(rdev);
- radeon_vce_fini(rdev);
- }
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
@@ -7297,14 +4426,14 @@ void si_fini(struct radeon_device *rdev)
}
/**
- * si_get_gpu_clock_counter - return GPU clock counter snapshot
+ * si_get_gpu_clock - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (SI).
* Returns the 64 bit clock counter snapshot.
*/
-uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
+uint64_t si_get_gpu_clock(struct radeon_device *rdev)
{
uint64_t clock;
@@ -7315,588 +4444,3 @@ uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
-
-int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
-{
- unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
- int r;
-
- /* bypass vclk and dclk with bclk */
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
- ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
-
- /* put PLL in bypass mode */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
-
- if (!vclk || !dclk) {
- /* keep the Bypass mode */
- return 0;
- }
-
- r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
- 16384, 0x03FFFFFF, 0, 128, 5,
- &fb_div, &vclk_div, &dclk_div);
- if (r)
- return r;
-
- /* set RESET_ANTI_MUX to 0 */
- WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
-
- /* set VCO_MODE to 1 */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
-
- /* disable sleep mode */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
-
- /* deassert UPLL_RESET */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
-
- mdelay(1);
-
- r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
- if (r)
- return r;
-
- /* assert UPLL_RESET again */
- WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
-
- /* disable spread spectrum. */
- WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
-
- /* set feedback divider */
- WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
-
- /* set ref divider to 0 */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
-
- if (fb_div < 307200)
- WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
- else
- WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
-
- /* set PDIV_A and PDIV_B */
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
- ~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
-
- /* give the PLL some time to settle */
- mdelay(15);
-
- /* deassert PLL_RESET */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
-
- mdelay(15);
-
- /* switch from bypass mode to normal mode */
- WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
-
- r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
- if (r)
- return r;
-
- /* switch VCLK and DCLK selection */
- WREG32_P(CG_UPLL_FUNC_CNTL_2,
- VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
- ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
-
- mdelay(100);
-
- return 0;
-}
-
-static void si_pcie_gen3_enable(struct radeon_device *rdev)
-{
- struct pci_dev _root;
- struct pci_dev *root;
- int bridge_pos, gpu_pos;
- u32 speed_cntl, mask, current_data_rate;
- int ret, i;
- u16 tmp16;
-
- root = &_root;
- root->pc = rdev->pdev->pc;
- root->tag = *rdev->ddev->bridgetag;
-
- if (pci_is_root_bus(rdev->pdev->bus))
- return;
-
- if (radeon_pcie_gen2 == 0)
- return;
-
- if (rdev->flags & RADEON_IS_IGP)
- return;
-
- if (!(rdev->flags & RADEON_IS_PCIE))
- return;
-
- ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
- if (ret != 0)
- return;
-
- if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
- return;
-
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
- LC_CURRENT_DATA_RATE_SHIFT;
- if (mask & DRM_PCIE_SPEED_80) {
- if (current_data_rate == 2) {
- DRM_INFO("PCIE gen 3 link speeds already enabled\n");
- return;
- }
- DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
- } else if (mask & DRM_PCIE_SPEED_50) {
- if (current_data_rate == 1) {
- DRM_INFO("PCIE gen 2 link speeds already enabled\n");
- return;
- }
- DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
- }
-
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(rdev->pdev);
- if (!gpu_pos)
- return;
-
- if (mask & DRM_PCIE_SPEED_80) {
- /* re-try equalization if gen3 is not already enabled */
- if (current_data_rate != 2) {
- u16 bridge_cfg, gpu_cfg;
- u16 bridge_cfg2, gpu_cfg2;
- u32 max_lw, current_lw, tmp;
-
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
-
- tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
-
- tmp = RREG32_PCIE(PCIE_LC_STATUS1);
- max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
- current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
-
- if (current_lw < max_lw) {
- tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- if (tmp & LC_RENEGOTIATION_SUPPORT) {
- tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
- tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
- tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
- }
- }
-
- for (i = 0; i < 10; i++) {
- /* check status */
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
- if (tmp16 & PCI_EXP_DEVSTA_TRPND)
- break;
-
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
-
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
-
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
-
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp |= LC_REDO_EQ;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
-
- mdelay(100);
-
- /* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
-
- /* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
-
- tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
- tmp &= ~LC_SET_QUIESCE;
- WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
- }
- }
- }
-
- /* set the link speed */
- speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
- speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
-
- pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
- if (mask & DRM_PCIE_SPEED_80)
- tmp16 |= 3; /* gen3 */
- else if (mask & DRM_PCIE_SPEED_50)
- tmp16 |= 2; /* gen2 */
- else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
-
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
- WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
- if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
- break;
- udelay(1);
- }
-}
-
-static void si_program_aspm(struct radeon_device *rdev)
-{
- u32 data, orig;
- bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
- bool disable_clkreq = false;
-
- if (radeon_aspm == 0)
- return;
-
- if (!(rdev->flags & RADEON_IS_PCIE))
- return;
-
- orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- data &= ~LC_XMIT_N_FTS_MASK;
- data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
- if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
-
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
- data |= LC_GO_TO_RECOVERY;
- if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
-
- orig = data = RREG32_PCIE(PCIE_P_CNTL);
- data |= P_IGNORE_EDB_ERR;
- if (orig != data)
- WREG32_PCIE(PCIE_P_CNTL, data);
-
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
- data |= LC_PMI_TO_L1_DIS;
- if (!disable_l0s)
- data |= LC_L0S_INACTIVITY(7);
-
- if (!disable_l1) {
- data |= LC_L1_INACTIVITY(7);
- data &= ~LC_PMI_TO_L1_DIS;
- if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
-
- if (!disable_plloff_in_l1) {
- bool clk_req_support;
-
- orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
- if (orig != data)
- WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
-
- orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
- if (orig != data)
- WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
-
- orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
- data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
- data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
- if (orig != data)
- WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
-
- orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
- data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
- data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
- if (orig != data)
- WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
-
- if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
- orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
- if (orig != data)
- WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
-
- orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
- if (orig != data)
- WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
-
- orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
- if (orig != data)
- WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
-
- orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
- if (orig != data)
- WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
-
- orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
- data &= ~PLL_RAMP_UP_TIME_0_MASK;
- if (orig != data)
- WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
-
- orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
- data &= ~PLL_RAMP_UP_TIME_1_MASK;
- if (orig != data)
- WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
-
- orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
- data &= ~PLL_RAMP_UP_TIME_2_MASK;
- if (orig != data)
- WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
-
- orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
- data &= ~PLL_RAMP_UP_TIME_3_MASK;
- if (orig != data)
- WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
- }
- orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
- data &= ~LC_DYN_LANES_PWR_STATE_MASK;
- data |= LC_DYN_LANES_PWR_STATE(3);
- if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
-
- orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
- if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
- if (orig != data)
- WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
-
- orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
- data &= ~LS2_EXIT_TIME_MASK;
- if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
- data |= LS2_EXIT_TIME(5);
- if (orig != data)
- WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
-
- if (!disable_clkreq &&
- !pci_is_root_bus(rdev->pdev->bus)) {
- u32 lnkcap;
- struct pci_dev _root;
- struct pci_dev *root;
-
- root = &_root;
- root->pc = rdev->pdev->pc;
- root->tag = *rdev->ddev->bridgetag;
-
- clk_req_support = false;
- pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
- if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
- clk_req_support = true;
- } else {
- clk_req_support = false;
- }
-
- if (clk_req_support) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
- data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
- if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
-
- orig = data = RREG32(THM_CLK_CNTL);
- data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
- data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
- if (orig != data)
- WREG32(THM_CLK_CNTL, data);
-
- orig = data = RREG32(MISC_CLK_CNTL);
- data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
- data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
- if (orig != data)
- WREG32(MISC_CLK_CNTL, data);
-
- orig = data = RREG32(CG_CLKPIN_CNTL);
- data &= ~BCLK_AS_XCLK;
- if (orig != data)
- WREG32(CG_CLKPIN_CNTL, data);
-
- orig = data = RREG32(CG_CLKPIN_CNTL_2);
- data &= ~FORCE_BIF_REFCLK_EN;
- if (orig != data)
- WREG32(CG_CLKPIN_CNTL_2, data);
-
- orig = data = RREG32(MPLL_BYPASSCLK_SEL);
- data &= ~MPLL_CLKOUT_SEL_MASK;
- data |= MPLL_CLKOUT_SEL(4);
- if (orig != data)
- WREG32(MPLL_BYPASSCLK_SEL, data);
-
- orig = data = RREG32(SPLL_CNTL_MODE);
- data &= ~SPLL_REFCLK_SEL_MASK;
- if (orig != data)
- WREG32(SPLL_CNTL_MODE, data);
- }
- }
- } else {
- if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
- }
-
- orig = data = RREG32_PCIE(PCIE_CNTL2);
- data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
- if (orig != data)
- WREG32_PCIE(PCIE_CNTL2, data);
-
- if (!disable_l0s) {
- data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
- if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
- data = RREG32_PCIE(PCIE_LC_STATUS1);
- if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
- orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
- data &= ~LC_L0S_INACTIVITY_MASK;
- if (orig != data)
- WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
- }
- }
- }
-}
-
-int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
-{
- unsigned i;
-
- /* make sure VCEPLL_CTLREQ is deasserted */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
-
- mdelay(10);
-
- /* assert UPLL_CTLREQ */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
-
- /* wait for CTLACK and CTLACK2 to get asserted */
- for (i = 0; i < 100; ++i) {
- uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
- if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
- break;
- mdelay(10);
- }
-
- /* deassert UPLL_CTLREQ */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
-
- if (i == 100) {
- DRM_ERROR("Timeout setting UVD clocks!\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
-{
- unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0;
- int r;
-
- /* bypass evclk and ecclk with bclk */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
- EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1),
- ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
-
- /* put PLL in bypass mode */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK,
- ~VCEPLL_BYPASS_EN_MASK);
-
- if (!evclk || !ecclk) {
- /* keep the Bypass mode, put PLL to sleep */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
- ~VCEPLL_SLEEP_MASK);
- return 0;
- }
-
- r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000,
- 16384, 0x03FFFFFF, 0, 128, 5,
- &fb_div, &evclk_div, &ecclk_div);
- if (r)
- return r;
-
- /* set RESET_ANTI_MUX to 0 */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
-
- /* set VCO_MODE to 1 */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK,
- ~VCEPLL_VCO_MODE_MASK);
-
- /* toggle VCEPLL_SLEEP to 1 then back to 0 */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
- ~VCEPLL_SLEEP_MASK);
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK);
-
- /* deassert VCEPLL_RESET */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
-
- mdelay(1);
-
- r = si_vce_send_vcepll_ctlreq(rdev);
- if (r)
- return r;
-
- /* assert VCEPLL_RESET again */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK);
-
- /* disable spread spectrum. */
- WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
-
- /* set feedback divider */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, VCEPLL_FB_DIV(fb_div), ~VCEPLL_FB_DIV_MASK);
-
- /* set ref divider to 0 */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK);
-
- /* set PDIV_A and PDIV_B */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
- VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div),
- ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK));
-
- /* give the PLL some time to settle */
- mdelay(15);
-
- /* deassert PLL_RESET */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
-
- mdelay(15);
-
- /* switch from bypass mode to normal mode */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK);
-
- r = si_vce_send_vcepll_ctlreq(rdev);
- if (r)
- return r;
-
- /* switch VCLK and DCLK selection */
- WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
- EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16),
- ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
-
- mdelay(100);
-
- return 0;
-}
diff --git a/sys/dev/pci/drm/radeon/si_blit_shaders.c b/sys/dev/pci/drm/radeon/si_blit_shaders.c
index 7e1ff00997d..45f87fdb676 100644
--- a/sys/dev/pci/drm/radeon/si_blit_shaders.c
+++ b/sys/dev/pci/drm/radeon/si_blit_shaders.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: si_blit_shaders.c,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
@@ -24,7 +25,9 @@
* Alex Deucher <alexander.deucher@amd.com>
*/
-#include <dev/pci/drm/drm_linux.h>
+#include <sys/types.h>
+
+#include <dev/pci/drm/drmP.h>
const u32 si_default_state[] =
{
diff --git a/sys/dev/pci/drm/radeon/si_blit_shaders.h b/sys/dev/pci/drm/radeon/si_blit_shaders.h
index c739e51e396..ac2abf9bf50 100644
--- a/sys/dev/pci/drm/radeon/si_blit_shaders.h
+++ b/sys/dev/pci/drm/radeon/si_blit_shaders.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: si_blit_shaders.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
diff --git a/sys/dev/pci/drm/radeon/si_reg.h b/sys/dev/pci/drm/radeon/si_reg.h
index 501f9d431d5..ba6918a46cf 100644
--- a/sys/dev/pci/drm/radeon/si_reg.h
+++ b/sys/dev/pci/drm/radeon/si_reg.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: si_reg.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
diff --git a/sys/dev/pci/drm/radeon/sid.h b/sys/dev/pci/drm/radeon/sid.h
index d1a7b58dd29..132d48ff337 100644
--- a/sys/dev/pci/drm/radeon/sid.h
+++ b/sys/dev/pci/drm/radeon/sid.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: sid.h,v 1.6 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
@@ -28,7 +29,14 @@
#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
-#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
+
+#define CG_MULT_THERMAL_STATUS 0x714
+#define ASIC_MAX_TEMP(x) ((x) << 0)
+#define ASIC_MAX_TEMP_MASK 0x000001ff
+#define ASIC_MAX_TEMP_SHIFT 0
+#define CTF_TEMP(x) ((x) << 9)
+#define CTF_TEMP_MASK 0x0003fe00
+#define CTF_TEMP_SHIFT 9
#define SI_MAX_SH_GPRS 256
#define SI_MAX_TEMP_GPRS 16
@@ -48,279 +56,9 @@
#define SI_MAX_TCC 16
#define SI_MAX_TCC_MASK 0xFFFF
-/* SMC IND accessor regs */
-#define SMC_IND_INDEX_0 0x200
-#define SMC_IND_DATA_0 0x204
-
-#define SMC_IND_ACCESS_CNTL 0x228
-# define AUTO_INCREMENT_IND_0 (1 << 0)
-#define SMC_MESSAGE_0 0x22c
-#define SMC_RESP_0 0x230
-
-/* CG IND registers are accessed via SMC indirect space + SMC_CG_IND_START */
-#define SMC_CG_IND_START 0xc0030000
-#define SMC_CG_IND_END 0xc0040000
-
-#define CG_CGTT_LOCAL_0 0x400
-#define CG_CGTT_LOCAL_1 0x401
-
-/* SMC IND registers */
-#define SMC_SYSCON_RESET_CNTL 0x80000000
-# define RST_REG (1 << 0)
-#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
-# define CK_DISABLE (1 << 0)
-# define CKEN (1 << 24)
-
#define VGA_HDP_CONTROL 0x328
#define VGA_MEMORY_DISABLE (1 << 4)
-#define DCCG_DISP_SLOW_SELECT_REG 0x4fc
-#define DCCG_DISP1_SLOW_SELECT(x) ((x) << 0)
-#define DCCG_DISP1_SLOW_SELECT_MASK (7 << 0)
-#define DCCG_DISP1_SLOW_SELECT_SHIFT 0
-#define DCCG_DISP2_SLOW_SELECT(x) ((x) << 4)
-#define DCCG_DISP2_SLOW_SELECT_MASK (7 << 4)
-#define DCCG_DISP2_SLOW_SELECT_SHIFT 4
-
-#define CG_SPLL_FUNC_CNTL 0x600
-#define SPLL_RESET (1 << 0)
-#define SPLL_SLEEP (1 << 1)
-#define SPLL_BYPASS_EN (1 << 3)
-#define SPLL_REF_DIV(x) ((x) << 4)
-#define SPLL_REF_DIV_MASK (0x3f << 4)
-#define SPLL_PDIV_A(x) ((x) << 20)
-#define SPLL_PDIV_A_MASK (0x7f << 20)
-#define SPLL_PDIV_A_SHIFT 20
-#define CG_SPLL_FUNC_CNTL_2 0x604
-#define SCLK_MUX_SEL(x) ((x) << 0)
-#define SCLK_MUX_SEL_MASK (0x1ff << 0)
-#define SPLL_CTLREQ_CHG (1 << 23)
-#define SCLK_MUX_UPDATE (1 << 26)
-#define CG_SPLL_FUNC_CNTL_3 0x608
-#define SPLL_FB_DIV(x) ((x) << 0)
-#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
-#define SPLL_FB_DIV_SHIFT 0
-#define SPLL_DITHEN (1 << 28)
-#define CG_SPLL_FUNC_CNTL_4 0x60c
-
-#define SPLL_STATUS 0x614
-#define SPLL_CHG_STATUS (1 << 1)
-#define SPLL_CNTL_MODE 0x618
-#define SPLL_SW_DIR_CONTROL (1 << 0)
-# define SPLL_REFCLK_SEL(x) ((x) << 26)
-# define SPLL_REFCLK_SEL_MASK (3 << 26)
-
-#define CG_SPLL_SPREAD_SPECTRUM 0x620
-#define SSEN (1 << 0)
-#define CLK_S(x) ((x) << 4)
-#define CLK_S_MASK (0xfff << 4)
-#define CLK_S_SHIFT 4
-#define CG_SPLL_SPREAD_SPECTRUM_2 0x624
-#define CLK_V(x) ((x) << 0)
-#define CLK_V_MASK (0x3ffffff << 0)
-#define CLK_V_SHIFT 0
-
-#define CG_SPLL_AUTOSCALE_CNTL 0x62c
-# define AUTOSCALE_ON_SS_CLEAR (1 << 9)
-
-/* discrete uvd clocks */
-#define CG_UPLL_FUNC_CNTL 0x634
-# define UPLL_RESET_MASK 0x00000001
-# define UPLL_SLEEP_MASK 0x00000002
-# define UPLL_BYPASS_EN_MASK 0x00000004
-# define UPLL_CTLREQ_MASK 0x00000008
-# define UPLL_VCO_MODE_MASK 0x00000600
-# define UPLL_REF_DIV_MASK 0x003F0000
-# define UPLL_CTLACK_MASK 0x40000000
-# define UPLL_CTLACK2_MASK 0x80000000
-#define CG_UPLL_FUNC_CNTL_2 0x638
-# define UPLL_PDIV_A(x) ((x) << 0)
-# define UPLL_PDIV_A_MASK 0x0000007F
-# define UPLL_PDIV_B(x) ((x) << 8)
-# define UPLL_PDIV_B_MASK 0x00007F00
-# define VCLK_SRC_SEL(x) ((x) << 20)
-# define VCLK_SRC_SEL_MASK 0x01F00000
-# define DCLK_SRC_SEL(x) ((x) << 25)
-# define DCLK_SRC_SEL_MASK 0x3E000000
-#define CG_UPLL_FUNC_CNTL_3 0x63C
-# define UPLL_FB_DIV(x) ((x) << 0)
-# define UPLL_FB_DIV_MASK 0x01FFFFFF
-#define CG_UPLL_FUNC_CNTL_4 0x644
-# define UPLL_SPARE_ISPARE9 0x00020000
-#define CG_UPLL_FUNC_CNTL_5 0x648
-# define RESET_ANTI_MUX_MASK 0x00000200
-#define CG_UPLL_SPREAD_SPECTRUM 0x650
-# define SSEN_MASK 0x00000001
-
-#define MPLL_BYPASSCLK_SEL 0x65c
-# define MPLL_CLKOUT_SEL(x) ((x) << 8)
-# define MPLL_CLKOUT_SEL_MASK 0xFF00
-
-#define CG_CLKPIN_CNTL 0x660
-# define XTALIN_DIVIDE (1 << 1)
-# define BCLK_AS_XCLK (1 << 2)
-#define CG_CLKPIN_CNTL_2 0x664
-# define FORCE_BIF_REFCLK_EN (1 << 3)
-# define MUX_TCLK_TO_XCLK (1 << 8)
-
-#define THM_CLK_CNTL 0x66c
-# define CMON_CLK_SEL(x) ((x) << 0)
-# define CMON_CLK_SEL_MASK 0xFF
-# define TMON_CLK_SEL(x) ((x) << 8)
-# define TMON_CLK_SEL_MASK 0xFF00
-#define MISC_CLK_CNTL 0x670
-# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
-# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
-# define ZCLK_SEL(x) ((x) << 8)
-# define ZCLK_SEL_MASK 0xFF00
-
-#define CG_THERMAL_CTRL 0x700
-#define DPM_EVENT_SRC(x) ((x) << 0)
-#define DPM_EVENT_SRC_MASK (7 << 0)
-#define DIG_THERM_DPM(x) ((x) << 14)
-#define DIG_THERM_DPM_MASK 0x003FC000
-#define DIG_THERM_DPM_SHIFT 14
-#define CG_THERMAL_STATUS 0x704
-#define FDO_PWM_DUTY(x) ((x) << 9)
-#define FDO_PWM_DUTY_MASK (0xff << 9)
-#define FDO_PWM_DUTY_SHIFT 9
-#define CG_THERMAL_INT 0x708
-#define DIG_THERM_INTH(x) ((x) << 8)
-#define DIG_THERM_INTH_MASK 0x0000FF00
-#define DIG_THERM_INTH_SHIFT 8
-#define DIG_THERM_INTL(x) ((x) << 16)
-#define DIG_THERM_INTL_MASK 0x00FF0000
-#define DIG_THERM_INTL_SHIFT 16
-#define THERM_INT_MASK_HIGH (1 << 24)
-#define THERM_INT_MASK_LOW (1 << 25)
-
-#define CG_MULT_THERMAL_CTRL 0x710
-#define TEMP_SEL(x) ((x) << 20)
-#define TEMP_SEL_MASK (0xff << 20)
-#define TEMP_SEL_SHIFT 20
-#define CG_MULT_THERMAL_STATUS 0x714
-#define ASIC_MAX_TEMP(x) ((x) << 0)
-#define ASIC_MAX_TEMP_MASK 0x000001ff
-#define ASIC_MAX_TEMP_SHIFT 0
-#define CTF_TEMP(x) ((x) << 9)
-#define CTF_TEMP_MASK 0x0003fe00
-#define CTF_TEMP_SHIFT 9
-
-#define CG_FDO_CTRL0 0x754
-#define FDO_STATIC_DUTY(x) ((x) << 0)
-#define FDO_STATIC_DUTY_MASK 0x000000FF
-#define FDO_STATIC_DUTY_SHIFT 0
-#define CG_FDO_CTRL1 0x758
-#define FMAX_DUTY100(x) ((x) << 0)
-#define FMAX_DUTY100_MASK 0x000000FF
-#define FMAX_DUTY100_SHIFT 0
-#define CG_FDO_CTRL2 0x75C
-#define TMIN(x) ((x) << 0)
-#define TMIN_MASK 0x000000FF
-#define TMIN_SHIFT 0
-#define FDO_PWM_MODE(x) ((x) << 11)
-#define FDO_PWM_MODE_MASK (7 << 11)
-#define FDO_PWM_MODE_SHIFT 11
-#define TACH_PWM_RESP_RATE(x) ((x) << 25)
-#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
-#define TACH_PWM_RESP_RATE_SHIFT 25
-
-#define CG_TACH_CTRL 0x770
-# define EDGE_PER_REV(x) ((x) << 0)
-# define EDGE_PER_REV_MASK (0x7 << 0)
-# define EDGE_PER_REV_SHIFT 0
-# define TARGET_PERIOD(x) ((x) << 3)
-# define TARGET_PERIOD_MASK 0xfffffff8
-# define TARGET_PERIOD_SHIFT 3
-#define CG_TACH_STATUS 0x774
-# define TACH_PERIOD(x) ((x) << 0)
-# define TACH_PERIOD_MASK 0xffffffff
-# define TACH_PERIOD_SHIFT 0
-
-#define GENERAL_PWRMGT 0x780
-# define GLOBAL_PWRMGT_EN (1 << 0)
-# define STATIC_PM_EN (1 << 1)
-# define THERMAL_PROTECTION_DIS (1 << 2)
-# define THERMAL_PROTECTION_TYPE (1 << 3)
-# define SW_SMIO_INDEX(x) ((x) << 6)
-# define SW_SMIO_INDEX_MASK (1 << 6)
-# define SW_SMIO_INDEX_SHIFT 6
-# define VOLT_PWRMGT_EN (1 << 10)
-# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
-#define CG_TPC 0x784
-#define SCLK_PWRMGT_CNTL 0x788
-# define SCLK_PWRMGT_OFF (1 << 0)
-# define SCLK_LOW_D1 (1 << 1)
-# define FIR_RESET (1 << 4)
-# define FIR_FORCE_TREND_SEL (1 << 5)
-# define FIR_TREND_MODE (1 << 6)
-# define DYN_GFX_CLK_OFF_EN (1 << 7)
-# define GFX_CLK_FORCE_ON (1 << 8)
-# define GFX_CLK_REQUEST_OFF (1 << 9)
-# define GFX_CLK_FORCE_OFF (1 << 10)
-# define GFX_CLK_OFF_ACPI_D1 (1 << 11)
-# define GFX_CLK_OFF_ACPI_D2 (1 << 12)
-# define GFX_CLK_OFF_ACPI_D3 (1 << 13)
-# define DYN_LIGHT_SLEEP_EN (1 << 14)
-
-#define TARGET_AND_CURRENT_PROFILE_INDEX 0x798
-# define CURRENT_STATE_INDEX_MASK (0xf << 4)
-# define CURRENT_STATE_INDEX_SHIFT 4
-
-#define CG_FTV 0x7bc
-
-#define CG_FFCT_0 0x7c0
-# define UTC_0(x) ((x) << 0)
-# define UTC_0_MASK (0x3ff << 0)
-# define DTC_0(x) ((x) << 10)
-# define DTC_0_MASK (0x3ff << 10)
-
-#define CG_BSP 0x7fc
-# define BSP(x) ((x) << 0)
-# define BSP_MASK (0xffff << 0)
-# define BSU(x) ((x) << 16)
-# define BSU_MASK (0xf << 16)
-#define CG_AT 0x800
-# define CG_R(x) ((x) << 0)
-# define CG_R_MASK (0xffff << 0)
-# define CG_L(x) ((x) << 16)
-# define CG_L_MASK (0xffff << 16)
-
-#define CG_GIT 0x804
-# define CG_GICST(x) ((x) << 0)
-# define CG_GICST_MASK (0xffff << 0)
-# define CG_GIPOT(x) ((x) << 16)
-# define CG_GIPOT_MASK (0xffff << 16)
-
-#define CG_SSP 0x80c
-# define SST(x) ((x) << 0)
-# define SST_MASK (0xffff << 0)
-# define SSTU(x) ((x) << 16)
-# define SSTU_MASK (0xf << 16)
-
-#define CG_DISPLAY_GAP_CNTL 0x828
-# define DISP1_GAP(x) ((x) << 0)
-# define DISP1_GAP_MASK (3 << 0)
-# define DISP2_GAP(x) ((x) << 2)
-# define DISP2_GAP_MASK (3 << 2)
-# define VBI_TIMER_COUNT(x) ((x) << 4)
-# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
-# define VBI_TIMER_UNIT(x) ((x) << 20)
-# define VBI_TIMER_UNIT_MASK (7 << 20)
-# define DISP1_GAP_MCHG(x) ((x) << 24)
-# define DISP1_GAP_MCHG_MASK (3 << 24)
-# define DISP2_GAP_MCHG(x) ((x) << 26)
-# define DISP2_GAP_MCHG_MASK (3 << 26)
-
-#define CG_ULV_CONTROL 0x878
-#define CG_ULV_PARAMETER 0x87c
-
-#define SMC_SCRATCH0 0x884
-
-#define CG_CAC_CTRL 0x8b8
-# define CAC_WINDOW(x) ((x) << 0)
-# define CAC_WINDOW_MASK 0x00ffffff
-
#define DMIF_ADDR_CONFIG 0xBD4
#define DMIF_ADDR_CALC 0xC00
@@ -330,14 +68,6 @@
# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
#define SRBM_STATUS 0xE50
-#define GRBM_RQ_PENDING (1 << 5)
-#define VMC_BUSY (1 << 8)
-#define MCB_BUSY (1 << 9)
-#define MCB_NON_DISPLAY_BUSY (1 << 10)
-#define MCC_BUSY (1 << 11)
-#define MCD_BUSY (1 << 12)
-#define SEM_BUSY (1 << 14)
-#define IH_BUSY (1 << 17)
#define SRBM_SOFT_RESET 0x0E60
#define SOFT_RESET_BIF (1 << 1)
@@ -358,14 +88,6 @@
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
-#define SRBM_READ_ERROR 0xE98
-#define SRBM_INT_CNTL 0xEA0
-#define SRBM_INT_ACK 0xEA8
-
-#define SRBM_STATUS2 0x0EC4
-#define DMA_BUSY (1 << 5)
-#define DMA1_BUSY (1 << 6)
-
#define VM_L2_CNTL 0x1400
#define ENABLE_L2_CACHE (1 << 0)
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
@@ -404,7 +126,6 @@
#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
-#define PAGE_TABLE_BLOCK_SIZE(x) (((x) & 0xF) << 24)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
@@ -419,20 +140,6 @@
#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
-#define PROTECTIONS_MASK (0xf << 0)
-#define PROTECTIONS_SHIFT 0
- /* bit 0: range
- * bit 1: pde0
- * bit 2: valid
- * bit 3: read
- * bit 4: write
- */
-#define MEMORY_CLIENT_ID_MASK (0xff << 12)
-#define MEMORY_CLIENT_ID_SHIFT 12
-#define MEMORY_CLIENT_RW_MASK (1 << 24)
-#define MEMORY_CLIENT_RW_SHIFT 24
-#define FAULT_VMID_MASK (0xf << 25)
-#define FAULT_VMID_SHIFT 25
#define VM_INVALIDATE_REQUEST 0x1478
#define VM_INVALIDATE_RESPONSE 0x147c
@@ -454,10 +161,6 @@
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
-#define VM_L2_CG 0x15c0
-#define MC_CG_ENABLE (1 << 18)
-#define MC_LS_ENABLE (1 << 19)
-
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x0000f000
@@ -483,17 +186,6 @@
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
-#define MC_HUB_MISC_HUB_CG 0x20b8
-#define MC_HUB_MISC_VM_CG 0x20bc
-
-#define MC_HUB_MISC_SIP_CG 0x20c0
-
-#define MC_XPB_CLK_GAT 0x2478
-
-#define MC_CITF_MISC_RD_CG 0x2648
-#define MC_CITF_MISC_WR_CG 0x264c
-#define MC_CITF_MISC_VM_CG 0x2650
-
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
@@ -509,23 +201,6 @@
#define NOOFGROUPS_SHIFT 12
#define NOOFGROUPS_MASK 0x00001000
-#define MC_ARB_DRAM_TIMING 0x2774
-#define MC_ARB_DRAM_TIMING2 0x2778
-
-#define MC_ARB_BURST_TIME 0x2808
-#define STATE0(x) ((x) << 0)
-#define STATE0_MASK (0x1f << 0)
-#define STATE0_SHIFT 0
-#define STATE1(x) ((x) << 5)
-#define STATE1_MASK (0x1f << 5)
-#define STATE1_SHIFT 5
-#define STATE2(x) ((x) << 10)
-#define STATE2_MASK (0x1f << 10)
-#define STATE2_SHIFT 10
-#define STATE3(x) ((x) << 15)
-#define STATE3_MASK (0x1f << 15)
-#define STATE3_SHIFT 15
-
#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
#define TRAIN_DONE_D0 (1 << 30)
#define TRAIN_DONE_D1 (1 << 31)
@@ -533,109 +208,14 @@
#define MC_SEQ_SUP_CNTL 0x28c8
#define RUN_MASK (1 << 0)
#define MC_SEQ_SUP_PGM 0x28cc
-#define MC_PMG_AUTO_CMD 0x28d0
#define MC_IO_PAD_CNTL_D0 0x29d0
#define MEM_FALL_OUT_CMD (1 << 8)
-#define MC_SEQ_RAS_TIMING 0x28a0
-#define MC_SEQ_CAS_TIMING 0x28a4
-#define MC_SEQ_MISC_TIMING 0x28a8
-#define MC_SEQ_MISC_TIMING2 0x28ac
-#define MC_SEQ_PMG_TIMING 0x28b0
-#define MC_SEQ_RD_CTL_D0 0x28b4
-#define MC_SEQ_RD_CTL_D1 0x28b8
-#define MC_SEQ_WR_CTL_D0 0x28bc
-#define MC_SEQ_WR_CTL_D1 0x28c0
-
-#define MC_SEQ_MISC0 0x2a00
-#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
-#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
-#define MC_SEQ_MISC0_VEN_ID_VALUE 3
-#define MC_SEQ_MISC0_REV_ID_SHIFT 12
-#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
-#define MC_SEQ_MISC0_REV_ID_VALUE 1
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-#define MC_SEQ_MISC1 0x2a04
-#define MC_SEQ_RESERVE_M 0x2a08
-#define MC_PMG_CMD_EMRS 0x2a0c
-
#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
#define MC_SEQ_IO_DEBUG_DATA 0x2a48
-#define MC_SEQ_MISC5 0x2a54
-#define MC_SEQ_MISC6 0x2a58
-
-#define MC_SEQ_MISC7 0x2a64
-
-#define MC_SEQ_RAS_TIMING_LP 0x2a6c
-#define MC_SEQ_CAS_TIMING_LP 0x2a70
-#define MC_SEQ_MISC_TIMING_LP 0x2a74
-#define MC_SEQ_MISC_TIMING2_LP 0x2a78
-#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
-#define MC_SEQ_WR_CTL_D1_LP 0x2a80
-#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
-#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
-
-#define MC_PMG_CMD_MRS 0x2aac
-
-#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
-#define MC_SEQ_RD_CTL_D1_LP 0x2b20
-
-#define MC_PMG_CMD_MRS1 0x2b44
-#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
-#define MC_SEQ_PMG_TIMING_LP 0x2b4c
-
-#define MC_SEQ_WR_CTL_2 0x2b54
-#define MC_SEQ_WR_CTL_2_LP 0x2b58
-#define MC_PMG_CMD_MRS2 0x2b5c
-#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
-
-#define MCLK_PWRMGT_CNTL 0x2ba0
-# define DLL_SPEED(x) ((x) << 0)
-# define DLL_SPEED_MASK (0x1f << 0)
-# define DLL_READY (1 << 6)
-# define MC_INT_CNTL (1 << 7)
-# define MRDCK0_PDNB (1 << 8)
-# define MRDCK1_PDNB (1 << 9)
-# define MRDCK0_RESET (1 << 16)
-# define MRDCK1_RESET (1 << 17)
-# define DLL_READY_READ (1 << 24)
-#define DLL_CNTL 0x2ba4
-# define MRDCK0_BYPASS (1 << 24)
-# define MRDCK1_BYPASS (1 << 25)
-
-#define MPLL_CNTL_MODE 0x2bb0
-# define MPLL_MCLK_SEL (1 << 11)
-#define MPLL_FUNC_CNTL 0x2bb4
-#define BWCTRL(x) ((x) << 20)
-#define BWCTRL_MASK (0xff << 20)
-#define MPLL_FUNC_CNTL_1 0x2bb8
-#define VCO_MODE(x) ((x) << 0)
-#define VCO_MODE_MASK (3 << 0)
-#define CLKFRAC(x) ((x) << 4)
-#define CLKFRAC_MASK (0xfff << 4)
-#define CLKF(x) ((x) << 16)
-#define CLKF_MASK (0xfff << 16)
-#define MPLL_FUNC_CNTL_2 0x2bbc
-#define MPLL_AD_FUNC_CNTL 0x2bc0
-#define YCLK_POST_DIV(x) ((x) << 0)
-#define YCLK_POST_DIV_MASK (7 << 0)
-#define MPLL_DQ_FUNC_CNTL 0x2bc4
-#define YCLK_SEL(x) ((x) << 4)
-#define YCLK_SEL_MASK (1 << 4)
-
-#define MPLL_SS1 0x2bcc
-#define CLKV(x) ((x) << 0)
-#define CLKV_MASK (0x3ffffff << 0)
-#define MPLL_SS2 0x2bd0
-#define CLKS(x) ((x) << 0)
-#define CLKS_MASK (0xfff << 0)
-
#define HDP_HOST_PATH_CNTL 0x2C00
-#define CLOCK_GATING_DIS (1 << 23)
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
@@ -643,10 +223,6 @@
#define HDP_ADDR_CONFIG 0x2F48
#define HDP_MISC_CNTL 0x2F4C
#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
-#define HDP_MEM_POWER_LS 0x2F50
-#define HDP_LS_ENABLE (1 << 0)
-
-#define ATC_MISC_CG 0x3350
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
@@ -692,99 +268,6 @@
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
-/* DCE6 ELD audio interface */
-#define AZ_F0_CODEC_ENDPOINT_INDEX 0x5E00
-# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0)
-# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8)
-#define AZ_F0_CODEC_ENDPOINT_DATA 0x5E04
-
-#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
-#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
-#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
-#define SPEAKER_ALLOCATION_SHIFT 0
-#define HDMI_CONNECTION (1 << 16)
-#define DP_CONNECTION (1 << 17)
-
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
-#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
-# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
-/* max channels minus one. 7 = 8 channels */
-# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
-# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
-# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
-/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
- * bit0 = 32 kHz
- * bit1 = 44.1 kHz
- * bit2 = 48 kHz
- * bit3 = 88.2 kHz
- * bit4 = 96 kHz
- * bit5 = 176.4 kHz
- * bit6 = 192 kHz
- */
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x37
-# define VIDEO_LIPSYNC(x) (((x) & 0xff) << 0)
-# define AUDIO_LIPSYNC(x) (((x) & 0xff) << 8)
-/* VIDEO_LIPSYNC, AUDIO_LIPSYNC
- * 0 = invalid
- * x = legal delay value
- * 255 = sync not supported
- */
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x38
-# define HBR_CAPABLE (1 << 0) /* enabled by default */
-
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x3a
-# define MANUFACTURER_ID(x) (((x) & 0xffff) << 0)
-# define PRODUCT_ID(x) (((x) & 0xffff) << 16)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x3b
-# define SINK_DESCRIPTION_LEN(x) (((x) & 0xff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x3c
-# define PORT_ID0(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x3d
-# define PORT_ID1(x) (((x) & 0xffffffff) << 0)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x3e
-# define DESCRIPTION0(x) (((x) & 0xff) << 0)
-# define DESCRIPTION1(x) (((x) & 0xff) << 8)
-# define DESCRIPTION2(x) (((x) & 0xff) << 16)
-# define DESCRIPTION3(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x3f
-# define DESCRIPTION4(x) (((x) & 0xff) << 0)
-# define DESCRIPTION5(x) (((x) & 0xff) << 8)
-# define DESCRIPTION6(x) (((x) & 0xff) << 16)
-# define DESCRIPTION7(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x40
-# define DESCRIPTION8(x) (((x) & 0xff) << 0)
-# define DESCRIPTION9(x) (((x) & 0xff) << 8)
-# define DESCRIPTION10(x) (((x) & 0xff) << 16)
-# define DESCRIPTION11(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x41
-# define DESCRIPTION12(x) (((x) & 0xff) << 0)
-# define DESCRIPTION13(x) (((x) & 0xff) << 8)
-# define DESCRIPTION14(x) (((x) & 0xff) << 16)
-# define DESCRIPTION15(x) (((x) & 0xff) << 24)
-#define AZ_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x42
-# define DESCRIPTION16(x) (((x) & 0xff) << 0)
-# define DESCRIPTION17(x) (((x) & 0xff) << 8)
-
-#define AZ_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x54
-# define AUDIO_ENABLED (1 << 31)
-
-#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
-#define PORT_CONNECTIVITY_MASK (3 << 30)
-#define PORT_CONNECTIVITY_SHIFT 30
-
#define DC_LB_MEMORY_SPLIT 0x6b0c
#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
@@ -865,7 +348,7 @@
# define GRPH_PFLIP_INT_MASK (1 << 0)
# define GRPH_PFLIP_INT_TYPE (1 << 8)
-#define DAC_AUTODETECT_INT_CONTROL 0x67c8
+#define DACA_AUTODETECT_INT_CONTROL 0x66c8
#define DC_HPD1_INT_STATUS 0x601c
#define DC_HPD2_INT_STATUS 0x6028
@@ -899,38 +382,9 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
-#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
-# define STUTTER_ENABLE (1 << 0)
-
/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
#define CRTC_STATUS_FRAME_COUNT 0x6e98
-/* Audio clocks */
-#define DCCG_AUDIO_DTO_SOURCE 0x05ac
-# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
-# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
-
-#define DCCG_AUDIO_DTO0_PHASE 0x05b0
-#define DCCG_AUDIO_DTO0_MODULE 0x05b4
-#define DCCG_AUDIO_DTO1_PHASE 0x05c0
-#define DCCG_AUDIO_DTO1_MODULE 0x05c4
-
-#define DENTIST_DISPCLK_CNTL 0x0490
-# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
-# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
-# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
-
-#define AFMT_AUDIO_SRC_CONTROL 0x713c
-#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
-/* AFMT_AUDIO_SRC_SELECT
- * 0 = stream0
- * 1 = stream1
- * 2 = stream2
- * 3 = stream3
- * 4 = stream4
- * 5 = stream5
- */
-
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -1103,24 +557,6 @@
#define SQC_CACHES 0x8C08
-#define SQ_POWER_THROTTLE 0x8e58
-#define MIN_POWER(x) ((x) << 0)
-#define MIN_POWER_MASK (0x3fff << 0)
-#define MIN_POWER_SHIFT 0
-#define MAX_POWER(x) ((x) << 16)
-#define MAX_POWER_MASK (0x3fff << 16)
-#define MAX_POWER_SHIFT 0
-#define SQ_POWER_THROTTLE2 0x8e5c
-#define MAX_POWER_DELTA(x) ((x) << 0)
-#define MAX_POWER_DELTA_MASK (0x3fff << 0)
-#define MAX_POWER_DELTA_SHIFT 0
-#define STI_SIZE(x) ((x) << 16)
-#define STI_SIZE_MASK (0x3ff << 16)
-#define STI_SIZE_SHIFT 16
-#define LTI_RATIO(x) ((x) << 27)
-#define LTI_RATIO_MASK (0xf << 27)
-#define LTI_RATIO_SHIFT 27
-
#define SX_DEBUG_1 0x9060
#define SPI_STATIC_THREAD_MGMT_1 0x90E0
@@ -1138,11 +574,6 @@
#define CGTS_USER_TCC_DISABLE 0x914C
#define TCC_DISABLE_MASK 0xFFFF0000
#define TCC_DISABLE_SHIFT 16
-#define CGTS_SM_CTRL_REG 0x9150
-#define OVERRIDE (1 << 21)
-#define LS_OVERRIDE (1 << 22)
-
-#define SPI_LB_CU_MASK 0x9354
#define TA_CNTL_AUX 0x9508
@@ -1232,8 +663,6 @@
#define CB_PERFCOUNTER3_SELECT0 0x9a38
#define CB_PERFCOUNTER3_SELECT1 0x9a3c
-#define CB_CGTT_SCLK_CTRL 0x9a60
-
#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
#define BACKEND_DISABLE_MASK 0x00FF0000
#define BACKEND_DISABLE_SHIFT 16
@@ -1291,9 +720,6 @@
# define CP_RINGID1_INT_STAT (1 << 30)
# define CP_RINGID0_INT_STAT (1 << 31)
-#define CP_MEM_SLP_CNTL 0xC1E4
-# define CP_MEM_LS_EN (1 << 0)
-
#define CP_DEBUG 0xC1FC
#define RLC_CNTL 0xC300
@@ -1301,7 +727,6 @@
#define RLC_RL_BASE 0xC304
#define RLC_RL_SIZE 0xC308
#define RLC_LB_CNTL 0xC30C
-# define LOAD_BALANCE_ENABLE (1 << 0)
#define RLC_SAVE_AND_RESTORE_BASE 0xC310
#define RLC_LB_CNTR_MAX 0xC314
#define RLC_LB_CNTR_INIT 0xC318
@@ -1316,56 +741,6 @@
#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340
#define RLC_MC_CNTL 0xC344
#define RLC_UCODE_CNTL 0xC348
-#define RLC_STAT 0xC34C
-# define RLC_BUSY_STATUS (1 << 0)
-# define GFX_POWER_STATUS (1 << 1)
-# define GFX_CLOCK_STATUS (1 << 2)
-# define GFX_LS_STATUS (1 << 3)
-
-#define RLC_PG_CNTL 0xC35C
-# define GFX_PG_ENABLE (1 << 0)
-# define GFX_PG_SRC (1 << 1)
-
-#define RLC_CGTT_MGCG_OVERRIDE 0xC400
-#define RLC_CGCG_CGLS_CTRL 0xC404
-# define CGCG_EN (1 << 0)
-# define CGLS_EN (1 << 1)
-
-#define RLC_TTOP_D 0xC414
-# define RLC_PUD(x) ((x) << 0)
-# define RLC_PUD_MASK (0xff << 0)
-# define RLC_PDD(x) ((x) << 8)
-# define RLC_PDD_MASK (0xff << 8)
-# define RLC_TTPD(x) ((x) << 16)
-# define RLC_TTPD_MASK (0xff << 16)
-# define RLC_MSD(x) ((x) << 24)
-# define RLC_MSD_MASK (0xff << 24)
-
-#define RLC_LB_INIT_CU_MASK 0xC41C
-
-#define RLC_PG_AO_CU_MASK 0xC42C
-#define RLC_MAX_PG_CU 0xC430
-# define MAX_PU_CU(x) ((x) << 0)
-# define MAX_PU_CU_MASK (0xff << 0)
-#define RLC_AUTO_PG_CTRL 0xC434
-# define AUTO_PG_EN (1 << 0)
-# define GRBM_REG_SGIT(x) ((x) << 3)
-# define GRBM_REG_SGIT_MASK (0xffff << 3)
-# define PG_AFTER_GRBM_REG_ST(x) ((x) << 19)
-# define PG_AFTER_GRBM_REG_ST_MASK (0x1fff << 19)
-
-#define RLC_SERDES_WR_MASTER_MASK_0 0xC454
-#define RLC_SERDES_WR_MASTER_MASK_1 0xC458
-#define RLC_SERDES_WR_CTRL 0xC45C
-
-#define RLC_SERDES_MASTER_BUSY_0 0xC464
-#define RLC_SERDES_MASTER_BUSY_1 0xC468
-
-#define RLC_GCPM_GENERAL_3 0xC478
-
-#define DB_RENDER_CONTROL 0x28000
-
-#define DB_DEPTH_INFO 0x2803c
#define PA_SC_RASTER_CONFIG 0x28350
# define RASTER_CONFIG_RB_MAP_0 0
@@ -1412,176 +787,19 @@
# define THREAD_TRACE_FLUSH (54 << 0)
# define THREAD_TRACE_FINISH (55 << 0)
-/* PIF PHY0 registers idx/data 0x8/0xc */
-#define PB0_PIF_CNTL 0x10
-# define LS2_EXIT_TIME(x) ((x) << 17)
-# define LS2_EXIT_TIME_MASK (0x7 << 17)
-# define LS2_EXIT_TIME_SHIFT 17
-#define PB0_PIF_PAIRING 0x11
-# define MULTI_PIF (1 << 25)
-#define PB0_PIF_PWRDOWN_0 0x12
-# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
-# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_0_SHIFT 24
-#define PB0_PIF_PWRDOWN_1 0x13
-# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
-# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_1_SHIFT 24
-
-#define PB0_PIF_PWRDOWN_2 0x17
-# define PLL_POWER_STATE_IN_TXS2_2(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_2_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_2(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_2_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_2_SHIFT 10
-# define PLL_RAMP_UP_TIME_2(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_2_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_2_SHIFT 24
-#define PB0_PIF_PWRDOWN_3 0x18
-# define PLL_POWER_STATE_IN_TXS2_3(x) ((x) << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_MASK (0x7 << 7)
-# define PLL_POWER_STATE_IN_TXS2_3_SHIFT 7
-# define PLL_POWER_STATE_IN_OFF_3(x) ((x) << 10)
-# define PLL_POWER_STATE_IN_OFF_3_MASK (0x7 << 10)
-# define PLL_POWER_STATE_IN_OFF_3_SHIFT 10
-# define PLL_RAMP_UP_TIME_3(x) ((x) << 24)
-# define PLL_RAMP_UP_TIME_3_MASK (0x7 << 24)
-# define PLL_RAMP_UP_TIME_3_SHIFT 24
-/* PIF PHY1 registers idx/data 0x10/0x14 */
-#define PB1_PIF_CNTL 0x10
-#define PB1_PIF_PAIRING 0x11
-#define PB1_PIF_PWRDOWN_0 0x12
-#define PB1_PIF_PWRDOWN_1 0x13
-
-#define PB1_PIF_PWRDOWN_2 0x17
-#define PB1_PIF_PWRDOWN_3 0x18
-/* PCIE registers idx/data 0x30/0x34 */
-#define PCIE_CNTL2 0x1c /* PCIE */
-# define SLV_MEM_LS_EN (1 << 16)
-# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
-# define MST_MEM_LS_EN (1 << 18)
-# define REPLAY_MEM_LS_EN (1 << 19)
-#define PCIE_LC_STATUS1 0x28 /* PCIE */
-# define LC_REVERSE_RCVR (1 << 0)
-# define LC_REVERSE_XMIT (1 << 1)
-# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
-# define LC_OPERATING_LINK_WIDTH_SHIFT 2
-# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
-# define LC_DETECTED_LINK_WIDTH_SHIFT 5
-
-#define PCIE_P_CNTL 0x40 /* PCIE */
-# define P_IGNORE_EDB_ERR (1 << 6)
-
-/* PCIE PORT registers idx/data 0x38/0x3c */
-#define PCIE_LC_CNTL 0xa0
-# define LC_L0S_INACTIVITY(x) ((x) << 8)
-# define LC_L0S_INACTIVITY_MASK (0xf << 8)
-# define LC_L0S_INACTIVITY_SHIFT 8
-# define LC_L1_INACTIVITY(x) ((x) << 12)
-# define LC_L1_INACTIVITY_MASK (0xf << 12)
-# define LC_L1_INACTIVITY_SHIFT 12
-# define LC_PMI_TO_L1_DIS (1 << 16)
-# define LC_ASPM_TO_L1_DIS (1 << 24)
-#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
-# define LC_LINK_WIDTH_SHIFT 0
-# define LC_LINK_WIDTH_MASK 0x7
-# define LC_LINK_WIDTH_X0 0
-# define LC_LINK_WIDTH_X1 1
-# define LC_LINK_WIDTH_X2 2
-# define LC_LINK_WIDTH_X4 3
-# define LC_LINK_WIDTH_X8 4
-# define LC_LINK_WIDTH_X16 6
-# define LC_LINK_WIDTH_RD_SHIFT 4
-# define LC_LINK_WIDTH_RD_MASK 0x70
-# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
-# define LC_RECONFIG_NOW (1 << 8)
-# define LC_RENEGOTIATION_SUPPORT (1 << 9)
-# define LC_RENEGOTIATE_EN (1 << 10)
-# define LC_SHORT_RECONFIG_EN (1 << 11)
-# define LC_UPCONFIGURE_SUPPORT (1 << 12)
-# define LC_UPCONFIGURE_DIS (1 << 13)
-# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
-# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
-# define LC_DYN_LANES_PWR_STATE_SHIFT 21
-#define PCIE_LC_N_FTS_CNTL 0xa3 /* PCIE_P */
-# define LC_XMIT_N_FTS(x) ((x) << 0)
-# define LC_XMIT_N_FTS_MASK (0xff << 0)
-# define LC_XMIT_N_FTS_SHIFT 0
-# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
-# define LC_N_FTS_MASK (0xff << 24)
-#define PCIE_LC_SPEED_CNTL 0xa4 /* PCIE_P */
-# define LC_GEN2_EN_STRAP (1 << 0)
-# define LC_GEN3_EN_STRAP (1 << 1)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
-# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
-# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
-# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
-# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
-# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
-# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
-# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
-# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
-# define LC_CURRENT_DATA_RATE_SHIFT 13
-# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
-# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
-# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
-# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
-# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
-
-#define PCIE_LC_CNTL2 0xb1
-# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
-# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
-
-#define PCIE_LC_CNTL3 0xb5 /* PCIE_P */
-# define LC_GO_TO_RECOVERY (1 << 30)
-#define PCIE_LC_CNTL4 0xb6 /* PCIE_P */
-# define LC_REDO_EQ (1 << 5)
-# define LC_SET_QUIESCE (1 << 13)
-
-/*
- * UVD
- */
-#define UVD_UDEC_ADDR_CONFIG 0xEF4C
-#define UVD_UDEC_DB_ADDR_CONFIG 0xEF50
-#define UVD_UDEC_DBW_ADDR_CONFIG 0xEF54
-#define UVD_RBC_RB_RPTR 0xF690
-#define UVD_RBC_RB_WPTR 0xF694
-#define UVD_STATUS 0xf6bc
-
-#define UVD_CGC_CTRL 0xF4B0
-# define DCM (1 << 0)
-# define CG_DT(x) ((x) << 2)
-# define CG_DT_MASK (0xf << 2)
-# define CLK_OD(x) ((x) << 6)
-# define CLK_OD_MASK (0x1f << 6)
-
- /* UVD CTX indirect */
-#define UVD_CGC_MEM_CTRL 0xC0
-#define UVD_CGC_CTRL2 0xC1
-# define DYN_OR_EN (1 << 0)
-# define DYN_RR_EN (1 << 1)
-# define G_DIV_ID(x) ((x) << 2)
-# define G_DIV_ID_MASK (0x7 << 2)
-
/*
* PM4
*/
-#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
@@ -1590,7 +808,7 @@
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
-#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
@@ -1652,23 +870,6 @@
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
-#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
- /* 0 - always
- * 1 - <
- * 2 - <=
- * 3 - ==
- * 4 - !=
- * 5 - >=
- * 6 - >
- */
-#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
- /* 0 - reg
- * 1 - mem
- */
-#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
- /* 0 - me
- * 1 - pfp
- */
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_COPY_DATA 0x40
#define PACKET3_CP_DMA 0x41
@@ -1839,15 +1040,6 @@
# define DMA_IDLE (1 << 0)
#define DMA_TILING_CONFIG 0xd0b8
-#define DMA_POWER_CNTL 0xd0bc
-# define MEM_POWER_OVERRIDE (1 << 8)
-#define DMA_CLK_CTRL 0xd0c0
-
-#define DMA_PG 0xd0d4
-# define PG_CNTL_ENABLE (1 << 0)
-#define DMA_PGFSM_CONFIG 0xd0d8
-#define DMA_PGFSM_WRITE 0xd0dc
-
#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
(((b) & 0x1) << 26) | \
(((t) & 0x1) << 23) | \
@@ -1872,83 +1064,6 @@
#define DMA_PACKET_TRAP 0x7
#define DMA_PACKET_SRBM_WRITE 0x9
#define DMA_PACKET_CONSTANT_FILL 0xd
-#define DMA_PACKET_POLL_REG_MEM 0xe
#define DMA_PACKET_NOP 0xf
-#define VCE_STATUS 0x20004
-#define VCE_VCPU_CNTL 0x20014
-#define VCE_CLK_EN (1 << 0)
-#define VCE_VCPU_CACHE_OFFSET0 0x20024
-#define VCE_VCPU_CACHE_SIZE0 0x20028
-#define VCE_VCPU_CACHE_OFFSET1 0x2002c
-#define VCE_VCPU_CACHE_SIZE1 0x20030
-#define VCE_VCPU_CACHE_OFFSET2 0x20034
-#define VCE_VCPU_CACHE_SIZE2 0x20038
-#define VCE_VCPU_SCRATCH7 0x200dc
-#define VCE_SOFT_RESET 0x20120
-#define VCE_ECPU_SOFT_RESET (1 << 0)
-#define VCE_FME_SOFT_RESET (1 << 2)
-#define VCE_RB_BASE_LO2 0x2016c
-#define VCE_RB_BASE_HI2 0x20170
-#define VCE_RB_SIZE2 0x20174
-#define VCE_RB_RPTR2 0x20178
-#define VCE_RB_WPTR2 0x2017c
-#define VCE_RB_BASE_LO 0x20180
-#define VCE_RB_BASE_HI 0x20184
-#define VCE_RB_SIZE 0x20188
-#define VCE_RB_RPTR 0x2018c
-#define VCE_RB_WPTR 0x20190
-#define VCE_CLOCK_GATING_A 0x202f8
-# define CGC_DYN_CLOCK_MODE (1 << 16)
-#define VCE_CLOCK_GATING_B 0x202fc
-#define VCE_UENC_CLOCK_GATING 0x205bc
-#define VCE_UENC_REG_CLOCK_GATING 0x205c0
-#define VCE_FW_REG_STATUS 0x20e10
-# define VCE_FW_REG_STATUS_BUSY (1 << 0)
-# define VCE_FW_REG_STATUS_PASS (1 << 3)
-# define VCE_FW_REG_STATUS_DONE (1 << 11)
-#define VCE_LMI_FW_START_KEYSEL 0x20e18
-#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20
-#define VCE_LMI_CTRL2 0x20e74
-#define VCE_LMI_CTRL 0x20e98
-#define VCE_LMI_VM_CTRL 0x20ea0
-#define VCE_LMI_SWAP_CNTL 0x20eb4
-#define VCE_LMI_SWAP_CNTL1 0x20eb8
-#define VCE_LMI_CACHE_CTRL 0x20ef4
-
-#define VCE_CMD_NO_OP 0x00000000
-#define VCE_CMD_END 0x00000001
-#define VCE_CMD_IB 0x00000002
-#define VCE_CMD_FENCE 0x00000003
-#define VCE_CMD_TRAP 0x00000004
-#define VCE_CMD_IB_AUTO 0x00000005
-#define VCE_CMD_SEMAPHORE 0x00000006
-
-/* discrete vce clocks */
-#define CG_VCEPLL_FUNC_CNTL 0xc0030600
-# define VCEPLL_RESET_MASK 0x00000001
-# define VCEPLL_SLEEP_MASK 0x00000002
-# define VCEPLL_BYPASS_EN_MASK 0x00000004
-# define VCEPLL_CTLREQ_MASK 0x00000008
-# define VCEPLL_VCO_MODE_MASK 0x00000600
-# define VCEPLL_REF_DIV_MASK 0x003F0000
-# define VCEPLL_CTLACK_MASK 0x40000000
-# define VCEPLL_CTLACK2_MASK 0x80000000
-#define CG_VCEPLL_FUNC_CNTL_2 0xc0030601
-# define VCEPLL_PDIV_A(x) ((x) << 0)
-# define VCEPLL_PDIV_A_MASK 0x0000007F
-# define VCEPLL_PDIV_B(x) ((x) << 8)
-# define VCEPLL_PDIV_B_MASK 0x00007F00
-# define EVCLK_SRC_SEL(x) ((x) << 20)
-# define EVCLK_SRC_SEL_MASK 0x01F00000
-# define ECCLK_SRC_SEL(x) ((x) << 25)
-# define ECCLK_SRC_SEL_MASK 0x3E000000
-#define CG_VCEPLL_FUNC_CNTL_3 0xc0030602
-# define VCEPLL_FB_DIV(x) ((x) << 0)
-# define VCEPLL_FB_DIV_MASK 0x01FFFFFF
-#define CG_VCEPLL_FUNC_CNTL_4 0xc0030603
-#define CG_VCEPLL_FUNC_CNTL_5 0xc0030604
-#define CG_VCEPLL_SPREAD_SPECTRUM 0xc0030606
-# define VCEPLL_SSEN_MASK 0x00000001
-
#endif
diff --git a/sys/dev/pci/drm/radeon_drm.h b/sys/dev/pci/drm/radeon_drm.h
index 7872a8d30bf..109e93d191d 100644
--- a/sys/dev/pci/drm/radeon_drm.h
+++ b/sys/dev/pci/drm/radeon_drm.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: radeon_drm.h,v 1.14 2018/04/20 16:09:36 deraadt Exp $ */
/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
@@ -511,7 +512,6 @@ typedef struct {
#define DRM_RADEON_GEM_BUSY 0x2a
#define DRM_RADEON_GEM_VA 0x2b
#define DRM_RADEON_GEM_OP 0x2c
-#define DRM_RADEON_GEM_USERPTR 0x2d
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
@@ -555,7 +555,6 @@ typedef struct {
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op)
-#define DRM_IOCTL_RADEON_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_USERPTR, struct drm_radeon_gem_userptr)
typedef struct drm_radeon_init {
enum {
@@ -716,6 +715,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
#define RADEON_PARAM_DEVICE_ID 16
#define RADEON_PARAM_NUM_Z_PIPES 17 /* num Z pipes */
+#define RADEON_PARAM_LAST_SWI 18
typedef struct drm_radeon_getparam {
int param;
@@ -798,13 +798,7 @@ struct drm_radeon_gem_info {
uint64_t vram_visible;
};
-#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
-#define RADEON_GEM_GTT_UC (1 << 1)
-#define RADEON_GEM_GTT_WC (1 << 2)
-/* BO is expected to be accessed by the CPU */
-#define RADEON_GEM_CPU_ACCESS (1 << 3)
-/* CPU access is not expected to work for this BO */
-#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
+#define RADEON_GEM_NO_BACKING_STORE 1
struct drm_radeon_gem_create {
uint64_t size;
@@ -814,26 +808,10 @@ struct drm_radeon_gem_create {
uint32_t flags;
};
-/*
- * This is not a reliable API and you should expect it to fail for any
- * number of reasons and have fallback path that do not use userptr to
- * perform any operation.
- */
-#define RADEON_GEM_USERPTR_READONLY (1 << 0)
-#define RADEON_GEM_USERPTR_ANONONLY (1 << 1)
-#define RADEON_GEM_USERPTR_VALIDATE (1 << 2)
-#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
-
-struct drm_radeon_gem_userptr {
- uint64_t addr;
- uint64_t size;
- uint32_t flags;
- uint32_t handle;
-};
-
#define RADEON_TILING_MACRO 0x1
#define RADEON_TILING_MICRO 0x2
#define RADEON_TILING_SWAP_16BIT 0x4
+#define RADEON_TILING_R600_NO_SCANOUT RADEON_TILING_SWAP_16BIT
#define RADEON_TILING_SWAP_32BIT 0x8
/* this object requires a surface when mapped - i.e. front buffer */
#define RADEON_TILING_SURFACE 0x10
@@ -946,6 +924,7 @@ struct drm_radeon_gem_va {
#define RADEON_CHUNK_ID_IB 0x02
#define RADEON_CHUNK_ID_FLAGS 0x03
#define RADEON_CHUNK_ID_CONST_IB 0x04
+#define RADEON_CHUNK_ID_OLD 0xff
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
#define RADEON_CS_KEEP_TILING_FLAGS 0x01
@@ -967,7 +946,6 @@ struct drm_radeon_cs_chunk {
};
/* drm_radeon_cs_reloc.flags */
-#define RADEON_RELOC_PRIO_MASK (0xf << 0)
struct drm_radeon_cs_reloc {
uint32_t handle;
@@ -1033,13 +1011,7 @@ struct drm_radeon_cs {
#define RADEON_INFO_NUM_BYTES_MOVED 0x1d
#define RADEON_INFO_VRAM_USAGE 0x1e
#define RADEON_INFO_GTT_USAGE 0x1f
-#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
-#define RADEON_INFO_CURRENT_GPU_TEMP 0x21
-#define RADEON_INFO_CURRENT_GPU_SCLK 0x22
-#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
-#define RADEON_INFO_READ_REG 0x24
-#define RADEON_INFO_VA_UNMAP_WORKING 0x25
-#define RADEON_INFO_GPU_RESET_COUNTER 0x26
+
struct drm_radeon_info {
uint32_t request;
@@ -1065,6 +1037,13 @@ struct drm_radeon_info {
#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
+#define CIK_TILE_MODE_COLOR_2D 14
+#define CIK_TILE_MODE_COLOR_2D_SCANOUT 10
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_64 0
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_128 1
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_256 2
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_512 3
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_ROW_SIZE 4
#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
#endif
diff --git a/sys/dev/pci/drm/ttm/ttm_agp_backend.c b/sys/dev/pci/drm/ttm/ttm_agp_backend.c
index 571453e695d..9ece4dee0ec 100644
--- a/sys/dev/pci/drm/ttm/ttm_agp_backend.c
+++ b/sys/dev/pci/drm/ttm/ttm_agp_backend.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_agp_backend.c,v 1.5 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -31,7 +32,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/ttm/ttm_module.h>
#include <dev/pci/drm/ttm/ttm_bo_driver.h>
#include <dev/pci/drm/ttm/ttm_page_alloc.h>
diff --git a/sys/dev/pci/drm/ttm/ttm_bo.c b/sys/dev/pci/drm/ttm/ttm_bo.c
index 7f03a515f03..d0a49f6f715 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_bo.c,v 1.22 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -33,18 +34,18 @@
#include <dev/pci/drm/ttm/ttm_module.h>
#include <dev/pci/drm/ttm/ttm_bo_driver.h>
#include <dev/pci/drm/ttm/ttm_placement.h>
-#include <dev/pci/drm/drm_linux.h>
-#include <dev/pci/drm/drm_linux_atomic.h>
-#include <dev/pci/drm/linux_rcupdate.h>
-#include <dev/pci/drm/linux_ww_mutex.h>
#define TTM_ASSERT_LOCKED(param)
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
static void ttm_bo_global_kobj_release(struct kobject *kobj);
+int ttm_bo_move_buffer(struct ttm_buffer_object *, struct ttm_placement *,
+ bool, bool);
+
#ifdef notyet
static struct attribute ttm_bo_count = {
.name = "bo_count",
@@ -58,13 +59,12 @@ ttm_get_kobj(void)
return (NULL);
}
-static inline int ttm_mem_type_from_place(const struct ttm_place *place,
- uint32_t *mem_type)
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
{
int i;
for (i = 0; i <= TTM_PL_PRIV5; i++)
- if (place->flags & (1 << i)) {
+ if (flags & (1 << i)) {
*mem_type = i;
return 0;
}
@@ -78,7 +78,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
pr_err(" has_type: %d\n", man->has_type);
pr_err(" use_type: %d\n", man->use_type);
pr_err(" flags: 0x%08X\n", man->flags);
- pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
+ pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
pr_err(" size: %llu\n", man->size);
pr_err(" available_caching: 0x%08X\n", man->available_caching);
pr_err(" default_caching: 0x%08X\n", man->default_caching);
@@ -95,12 +95,12 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
bo, bo->mem.num_pages, bo->mem.size >> 10,
bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
- ret = ttm_mem_type_from_place(&placement->placement[i],
+ ret = ttm_mem_type_from_flags(placement->placement[i],
&mem_type);
if (ret)
return;
pr_err(" placement[%d]=0x%08X (%d)\n",
- i, placement->placement[i].flags, mem_type);
+ i, placement->placement[i], mem_type);
ttm_mem_type_debug(bo->bdev, mem_type);
}
}
@@ -135,7 +135,6 @@ static struct kobj_type ttm_bo_glob_kobj_type = {
#endif
};
-
static inline uint32_t ttm_bo_type_flags(unsigned type)
{
return 1 << (type);
@@ -151,6 +150,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
BUG_ON(atomic_read(&bo->list_kref.refcount));
BUG_ON(atomic_read(&bo->kref.refcount));
BUG_ON(atomic_read(&bo->cpu_writers));
+ BUG_ON(bo->sync_obj != NULL);
BUG_ON(bo->mem.mm_node != NULL);
BUG_ON(!list_empty(&bo->lru));
BUG_ON(!list_empty(&bo->ddestroy));
@@ -158,9 +158,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
if (bo->ttm)
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
- if (bo->resv == &bo->ttm_resv)
- reservation_object_fini(&bo->ttm_resv);
- mutex_destroy(&bo->wu_mutex);
if (bo->destroy)
bo->destroy(bo);
else {
@@ -169,14 +166,28 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
+{
+ int ret = 0;
+
+ while (ret == 0) {
+ if (!ttm_bo_is_reserved(bo))
+ break;
+ ret = -tsleep(&bo->event_queue,
+ PZERO | (interruptible ? PCATCH : 0), "ttmwt", 0);
+
+ }
+
+ return (ret);
+}
+EXPORT_SYMBOL(ttm_bo_wait_unreserved);
+
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
-#ifdef notyet
- lockdep_assert_held(&bo->resv->lock.base);
-#endif
+ BUG_ON(!ttm_bo_is_reserved(bo));
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
@@ -192,7 +203,6 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
}
}
}
-EXPORT_SYMBOL(ttm_bo_add_to_lru);
int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
@@ -215,6 +225,62 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
return put_count;
}
+int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int ret;
+
+ while (unlikely(atomic_read(&bo->reserved) != 0)) {
+ /**
+ * Deadlock avoidance for multi-bo reserving.
+ */
+ if (use_sequence && bo->seq_valid) {
+ /**
+ * We've already reserved this one.
+ */
+ if (unlikely(sequence == bo->val_seq))
+ return -EDEADLK;
+ /**
+ * Already reserved by a thread that will not back
+ * off for us. We need to back off.
+ */
+ if (unlikely(sequence - bo->val_seq < (1 << 31)))
+ return -EAGAIN;
+ }
+
+ if (no_wait)
+ return -EBUSY;
+
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+ spin_lock(&glob->lru_lock);
+
+ if (unlikely(ret))
+ return ret;
+ }
+
+ atomic_set(&bo->reserved, 1);
+ if (use_sequence) {
+ /**
+ * Wake up waiters that may need to recheck for deadlock,
+ * if we decreased the sequence number.
+ */
+ if (unlikely((bo->val_seq - sequence < (1 << 31))
+ || !bo->seq_valid))
+ wake_up_all(&bo->event_queue);
+
+ bo->val_seq = sequence;
+ bo->seq_valid = true;
+ } else {
+ bo->seq_valid = false;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_reserve);
+
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
@@ -227,16 +293,42 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
(never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
}
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
+int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence)
{
- int put_count;
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count = 0;
+ int ret;
+
+ spin_lock(&glob->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
+ sequence);
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&glob->lru_lock);
- spin_lock(&bo->glob->lru_lock);
- put_count = ttm_bo_del_from_lru(bo);
- spin_unlock(&bo->glob->lru_lock);
ttm_bo_list_ref_sub(bo, put_count, true);
+
+ return ret;
+}
+
+void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
+{
+ ttm_bo_add_to_lru(bo);
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+}
+
+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_global *glob = bo->glob;
+
+ spin_lock(&glob->lru_lock);
+ ttm_bo_unreserve_locked(bo);
+ spin_unlock(&glob->lru_lock);
}
-EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
+EXPORT_SYMBOL(ttm_bo_unreserve);
/*
* Call bo->mutex locked.
@@ -248,7 +340,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
int ret = 0;
uint32_t page_flags = 0;
- TTM_ASSERT_LOCKED(&bo->mutex);
+#ifdef notyet
+ rw_assert_wrlock(&bo->mutex);
+#endif
bo->ttm = NULL;
if (bdev->need_dma32)
@@ -410,69 +504,61 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
}
ttm_bo_mem_put(bo, &bo->mem);
- ww_mutex_unlock (&bo->resv->lock);
-}
-
-static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
-{
- struct reservation_object_list *fobj;
- struct fence *fence;
- int i;
-
- fobj = reservation_object_get_list(bo->resv);
- fence = reservation_object_get_excl(bo->resv);
- if (fence && !fence->ops->signaled)
- fence_enable_sw_signaling(fence);
-
- for (i = 0; fobj && i < fobj->shared_count; ++i) {
- fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(bo->resv));
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
- if (!fence->ops->signaled)
- fence_enable_sw_signaling(fence);
- }
+ /*
+ * Since the final reference to this bo may not be dropped by
+ * the current task we have to put a memory barrier here to make
+ * sure the changes done in this function are always visible.
+ *
+ * This function only needs protection against the final kref_put.
+ */
+ smp_mb__before_atomic_dec();
}
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
+ struct ttm_bo_driver *driver = bdev->driver;
+ void *sync_obj = NULL;
int put_count;
int ret;
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, false, NULL);
-
- if (!ret) {
- if (!ttm_bo_wait(bo, false, false, true)) {
- put_count = ttm_bo_del_from_lru(bo);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
- spin_unlock(&glob->lru_lock);
- ttm_bo_cleanup_memtype_use(bo);
+ spin_lock(&bdev->fence_lock);
+ (void) ttm_bo_wait(bo, false, false, true);
+ if (!ret && !bo->sync_obj) {
+ spin_unlock(&bdev->fence_lock);
+ put_count = ttm_bo_del_from_lru(bo);
- ttm_bo_list_ref_sub(bo, put_count, true);
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_cleanup_memtype_use(bo);
- return;
- } else
- ttm_bo_flush_all_fences(bo);
+ ttm_bo_list_ref_sub(bo, put_count, true);
- /*
- * Make NO_EVICT bos immediately available to
- * shrinkers, now that they are queued for
- * destruction.
- */
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_add_to_lru(bo);
- }
+ return;
+ }
+ if (bo->sync_obj)
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ spin_unlock(&bdev->fence_lock);
- __ttm_bo_unreserve(bo);
+ if (!ret) {
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
}
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
+ if (sync_obj) {
+ driver->sync_obj_flush(sync_obj);
+ driver->sync_obj_unref(&sync_obj);
+ }
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
}
@@ -493,29 +579,48 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait_gpu)
{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
struct ttm_bo_global *glob = bo->glob;
int put_count;
int ret;
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, true);
if (ret && !no_wait_gpu) {
- long lret;
- ww_mutex_unlock(&bo->resv->lock);
+ void *sync_obj;
+
+ /*
+ * Take a reference to the fence and unreserve,
+ * at this point the buffer should be dead, so
+ * no new sync objects can be attached.
+ */
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ spin_unlock(&bdev->fence_lock);
+
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
- lret = reservation_object_wait_timeout_rcu(bo->resv,
- true,
- interruptible,
- 30 * HZ);
+ ret = driver->sync_obj_wait(sync_obj, false, interruptible);
+ driver->sync_obj_unref(&sync_obj);
+ if (ret)
+ return ret;
- if (lret < 0)
- return lret;
- else if (lret == 0)
- return -EBUSY;
+ /*
+ * remove sync_obj with ttm_bo_wait, the wait should be
+ * finished, and no new wait object should have been added.
+ */
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, false, true);
+ WARN_ON(ret);
+ spin_unlock(&bdev->fence_lock);
+ if (ret)
+ return ret;
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
/*
* We raced, and lost, someone else holds the reservation now,
@@ -529,17 +634,12 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
spin_unlock(&glob->lru_lock);
return 0;
}
-
- /*
- * remove sync_obj with ttm_bo_wait, the wait should be
- * finished, and no new wait object should have been added.
- */
- ret = ttm_bo_wait(bo, false, false, true);
- WARN_ON(ret);
- }
+ } else
+ spin_unlock(&bdev->fence_lock);
if (ret || unlikely(list_empty(&bo->ddestroy))) {
- __ttm_bo_unreserve(bo);
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
return ret;
}
@@ -584,14 +684,7 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- ret = __ttm_bo_reserve(entry, false, true, false, NULL);
- if (remove_all && ret) {
- spin_unlock(&glob->lru_lock);
- ret = __ttm_bo_reserve(entry, false, false,
- false, NULL);
- spin_lock(&glob->lru_lock);
- }
-
+ ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
if (!ret)
ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
!remove_all);
@@ -674,7 +767,9 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
struct ttm_placement placement;
int ret = 0;
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+ spin_unlock(&bdev->fence_lock);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS) {
@@ -683,15 +778,15 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
goto out;
}
-#ifdef notyet
- lockdep_assert_held(&bo->resv->lock.base);
-#endif
+ BUG_ON(!ttm_bo_is_reserved(bo));
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
evict_mem.bus.io_reserved_vm = false;
evict_mem.bus.io_reserved_count = 0;
+ placement.fpfn = 0;
+ placement.lpfn = 0;
placement.num_placement = 0;
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
@@ -721,7 +816,6 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
- const struct ttm_place *place,
bool interruptible,
bool no_wait_gpu)
{
@@ -732,22 +826,9 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
- ret = __ttm_bo_reserve(bo, false, true, false, NULL);
- if (!ret) {
- if (place && (place->fpfn || place->lpfn)) {
- /* Don't evict this BO if it's outside of the
- * requested placement range
- */
- if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
- (place->lpfn && place->lpfn <= bo->mem.start)) {
- __ttm_bo_unreserve(bo);
- ret = -EBUSY;
- continue;
- }
- }
-
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ if (!ret)
break;
- }
}
if (ret) {
@@ -793,7 +874,7 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
*/
static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
- const struct ttm_place *place,
+ struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible,
bool no_wait_gpu)
@@ -803,12 +884,12 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
int ret;
do {
- ret = (*man->func->get_node)(man, bo, place, mem);
+ ret = (*man->func->get_node)(man, bo, placement, mem);
if (unlikely(ret != 0))
return ret;
if (mem->mm_node)
break;
- ret = ttm_mem_evict_first(bdev, mem_type, place,
+ ret = ttm_mem_evict_first(bdev, mem_type,
interruptible, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
@@ -846,18 +927,18 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
uint32_t mem_type,
- const struct ttm_place *place,
+ uint32_t proposed_placement,
uint32_t *masked_placement)
{
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
- if ((cur_flags & place->flags & TTM_PL_MASK_MEM) == 0)
+ if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
return false;
- if ((place->flags & man->available_caching) == 0)
+ if ((proposed_placement & man->available_caching) == 0)
return false;
- cur_flags |= (place->flags & man->available_caching);
+ cur_flags |= (proposed_placement & man->available_caching);
*masked_placement = cur_flags;
return true;
@@ -888,38 +969,38 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = NULL;
for (i = 0; i < placement->num_placement; ++i) {
- const struct ttm_place *place = &placement->placement[i];
-
- ret = ttm_mem_type_from_place(place, &mem_type);
+ ret = ttm_mem_type_from_flags(placement->placement[i],
+ &mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type];
- if (!man->has_type || !man->use_type)
- continue;
- type_ok = ttm_bo_mt_compatible(man, mem_type, place,
+ type_ok = ttm_bo_mt_compatible(man,
+ mem_type,
+ placement->placement[i],
&cur_flags);
if (!type_ok)
continue;
- type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
- ttm_flag_masked(&cur_flags, place->flags,
+ ttm_flag_masked(&cur_flags, placement->placement[i],
~TTM_PL_MASK_MEMTYPE);
if (mem_type == TTM_PL_SYSTEM)
break;
- ret = (*man->func->get_node)(man, bo, place, mem);
- if (unlikely(ret))
- return ret;
-
+ if (man->has_type && man->use_type) {
+ type_found = true;
+ ret = (*man->func->get_node)(man, bo, placement, mem);
+ if (unlikely(ret))
+ return ret;
+ }
if (mem->mm_node)
break;
}
@@ -930,28 +1011,33 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
return 0;
}
- for (i = 0; i < placement->num_busy_placement; ++i) {
- const struct ttm_place *place = &placement->busy_placement[i];
+ if (!type_found)
+ return -EINVAL;
- ret = ttm_mem_type_from_place(place, &mem_type);
+ for (i = 0; i < placement->num_busy_placement; ++i) {
+ ret = ttm_mem_type_from_flags(placement->busy_placement[i],
+ &mem_type);
if (ret)
return ret;
man = &bdev->man[mem_type];
- if (!man->has_type || !man->use_type)
+ if (!man->has_type)
continue;
- if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
+ if (!ttm_bo_mt_compatible(man,
+ mem_type,
+ placement->busy_placement[i],
+ &cur_flags))
continue;
- type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
* Use the access and other non-mapping-related flag bits from
* the memory placement flags to the current flags
*/
- ttm_flag_masked(&cur_flags, place->flags,
+ ttm_flag_masked(&cur_flags, placement->busy_placement[i],
~TTM_PL_MASK_MEMTYPE);
+
if (mem_type == TTM_PL_SYSTEM) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
@@ -959,43 +1045,39 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
return 0;
}
- ret = ttm_bo_mem_force_space(bo, mem_type, place, mem,
+ ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
interruptible, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
return 0;
}
- if (ret == -ERESTARTSYS)
+ if (ret == -ERESTART)
has_erestartsys = true;
}
-
- if (!type_found) {
- printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
- return -EINVAL;
- }
-
- return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
+ ret = (has_erestartsys) ? -ERESTART: -ENOMEM;
+ return ret;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
-static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible,
bool no_wait_gpu)
{
int ret = 0;
struct ttm_mem_reg mem;
+ struct ttm_bo_device *bdev = bo->bdev;
-#ifdef notyet
- lockdep_assert_held(&bo->resv->lock.base);
-#endif
+ BUG_ON(!ttm_bo_is_reserved(bo));
/*
* FIXME: It's possible to pipeline buffer moves.
* Have the driver move function wait for idle when necessary,
* instead of doing it here.
*/
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
+ spin_unlock(&bdev->fence_lock);
if (ret)
return ret;
mem.num_pages = bo->num_pages;
@@ -1018,33 +1100,26 @@ out_unlock:
return ret;
}
-bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- uint32_t *new_flags)
+static bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
int i;
- for (i = 0; i < placement->num_placement; i++) {
- const struct ttm_place *heap = &placement->placement[i];
- if (mem->mm_node &&
- (mem->start < heap->fpfn ||
- (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
- continue;
+ if (mem->mm_node && placement->lpfn != 0 &&
+ (mem->start < placement->fpfn ||
+ mem->start + mem->num_pages > placement->lpfn))
+ return false;
- *new_flags = heap->flags;
+ for (i = 0; i < placement->num_placement; i++) {
+ *new_flags = placement->placement[i];
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
return true;
}
for (i = 0; i < placement->num_busy_placement; i++) {
- const struct ttm_place *heap = &placement->busy_placement[i];
- if (mem->mm_node &&
- (mem->start < heap->fpfn ||
- (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
- continue;
-
- *new_flags = heap->flags;
+ *new_flags = placement->busy_placement[i];
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
return true;
@@ -1052,7 +1127,6 @@ bool ttm_bo_mem_compat(struct ttm_placement *placement,
return false;
}
-EXPORT_SYMBOL(ttm_bo_mem_compat);
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
@@ -1062,9 +1136,12 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
int ret;
uint32_t new_flags;
-#ifdef notyet
- lockdep_assert_held(&bo->resv->lock.base);
-#endif
+ BUG_ON(!ttm_bo_is_reserved(bo));
+ /* Check that range is valid */
+ if (placement->lpfn || placement->fpfn)
+ if (placement->fpfn > placement->lpfn ||
+ (placement->lpfn - placement->fpfn) < bo->num_pages)
+ return -EINVAL;
/*
* Check whether we need to move buffer.
*/
@@ -1093,6 +1170,15 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_validate);
+int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement)
+{
+ BUG_ON((placement->fpfn || placement->lpfn) &&
+ (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
+
+ return 0;
+}
+
int ttm_bo_init(struct ttm_bo_device *bdev,
struct ttm_buffer_object *bo,
unsigned long size,
@@ -1103,13 +1189,11 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
struct uvm_object *persistent_swap_storage,
size_t acc_size,
struct sg_table *sg,
- struct reservation_object *resv,
void (*destroy) (struct ttm_buffer_object *))
{
int ret = 0;
unsigned long num_pages;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
- bool locked;
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (ret) {
@@ -1137,11 +1221,12 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
kref_init(&bo->kref);
kref_init(&bo->list_kref);
atomic_set(&bo->cpu_writers, 0);
+ atomic_set(&bo->reserved, 1);
+ init_waitqueue_head(&bo->event_queue);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
- rw_init(&bo->wu_mutex, "ttmwu");
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
@@ -1155,46 +1240,38 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.bus.io_reserved_count = 0;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
+ bo->seq_valid = false;
bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
bo->sg = sg;
- if (resv) {
- bo->resv = resv;
-#ifdef notyet
- lockdep_assert_held(&bo->resv->lock.base);
-#endif
- } else {
- bo->resv = &bo->ttm_resv;
- reservation_object_init(&bo->ttm_resv);
- }
atomic_inc(&bo->glob->bo_count);
drm_vma_node_reset(&bo->vma_node);
+ ret = ttm_bo_check_placement(bo, placement);
+ if (unlikely(ret != 0))
+ goto out_err;
+
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
if (bo->type == ttm_bo_type_device ||
- bo->type == ttm_bo_type_sg)
- ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
- bo->mem.num_pages);
-
- /* passed reservation objects should already be locked,
- * since otherwise lockdep will be angered in radeon.
- */
- if (!resv) {
- locked = ww_mutex_trylock(&bo->resv->lock);
- WARN_ON(!locked);
+ bo->type == ttm_bo_type_sg) {
+ ret = ttm_bo_setup_vm(bo);
+ if (ret)
+ goto out_err;
}
- if (likely(!ret))
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
+ if (ret)
+ goto out_err;
- if (!resv)
- ttm_bo_unreserve(bo);
+ ttm_bo_unreserve(bo);
+ return 0;
- if (unlikely(ret))
- ttm_bo_unref(&bo);
+out_err:
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
return ret;
}
@@ -1223,7 +1300,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
size += ttm_round_pot(struct_size);
size += PAGE_ALIGN(npages * sizeof(void *));
- size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+ size += PAGE_ALIGN(npages * sizeof(bus_addr_t));
size += ttm_round_pot(sizeof(struct ttm_dma_tt));
return size;
}
@@ -1249,7 +1326,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
interruptible, persistent_swap_storage, acc_size,
- NULL, NULL, NULL);
+ NULL, NULL);
if (likely(ret == 0))
*p_bo = bo;
@@ -1271,7 +1348,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
+ ret = ttm_mem_evict_first(bdev, mem_type, false, false);
if (ret) {
if (allow_errors) {
return ret;
@@ -1345,7 +1422,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
BUG_ON(man->has_type);
man->io_reserve_fastpath = true;
man->use_io_reserve_lru = false;
- rw_init(&man->io_reserve_mutex, "ior");
+ rw_init(&man->io_reserve_mutex, "ttm_iores");
INIT_LIST_HEAD(&man->io_reserve_lru);
ret = bdev->driver->init_mem_type(bdev, type, man);
@@ -1375,7 +1452,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
container_of(kobj, struct ttm_bo_global, kobj);
ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
- __free_page(glob->dummy_read_page);
+ km_free(glob->dummy_read_page, PAGE_SIZE, &kv_any, &kp_dma_zero);
kfree(glob);
}
@@ -1395,10 +1472,11 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
struct ttm_bo_global *glob = ref->object;
int ret;
- rw_init(&glob->device_list_mutex, "gdl");
+ rw_init(&glob->device_list_mutex, "ttm_devlist");
mtx_init(&glob->lru_lock, IPL_NONE);
glob->mem_glob = bo_ref->mem_glob;
- glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+ glob->dummy_read_page = km_alloc(PAGE_SIZE, &kv_any, &kp_dma_zero,
+ &kd_waitok);
if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM;
@@ -1423,7 +1501,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
kobject_put(&glob->kobj);
return ret;
out_no_shrink:
- __free_page(glob->dummy_read_page);
+ km_free(glob->dummy_read_page, PAGE_SIZE, &kv_any, &kp_dma_zero);
out_no_drp:
kfree(glob);
return ret;
@@ -1477,7 +1555,6 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
- struct address_space *mapping,
uint64_t file_page_offset,
bool need_dma32)
{
@@ -1499,10 +1576,11 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
0x10000000);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
- bdev->dev_mapping = mapping;
+ bdev->dev_mapping = NULL;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
bdev->val_seq = 0;
+ mtx_init(&bdev->fence_lock, IPL_NONE);
mutex_lock(&glob->device_list_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&glob->device_list_mutex);
@@ -1534,15 +1612,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return true;
}
-#ifdef __linux__
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
-
- drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
- ttm_mem_io_free_vm(bo);
-}
-#else
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_tt *ttm = bo->ttm;
@@ -1551,29 +1620,26 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
paddr_t paddr;
int i;
- if (drm_vma_node_has_offset(&bo->vma_node)) {
- if (bo->mem.bus.is_iomem) {
- for (i = 0; i < bo->mem.num_pages; ++i) {
- addr = bo->mem.bus.base + bo->mem.bus.offset;
- paddr = bus_space_mmap(bo->bdev->memt, addr,
- i << PAGE_SHIFT, 0, 0);
- page = PHYS_TO_VM_PAGE(paddr);
- if (unlikely(page == NULL))
- continue;
- pmap_page_protect(page, PROT_NONE);
- }
- } else if (ttm) {
- for (i = 0; i < ttm->num_pages; ++i) {
- page = ttm->pages[i];
- if (unlikely(page == NULL))
- continue;
- pmap_page_protect(page, PROT_NONE);
- }
+ if (bo->mem.bus.is_iomem) {
+ for (i = 0; i < bo->mem.num_pages; ++i) {
+ addr = bo->mem.bus.base + bo->mem.bus.offset;
+ paddr = bus_space_mmap(bo->bdev->memt, addr,
+ i << PAGE_SHIFT, 0, 0);
+ page = PHYS_TO_VM_PAGE(paddr);
+ if (unlikely(page == NULL))
+ continue;
+ pmap_page_protect(page, PROT_NONE);
+ }
+ } else if (ttm) {
+ for (i = 0; i < ttm->num_pages; ++i) {
+ page = ttm->pages[i];
+ if (unlikely(page == NULL))
+ continue;
+ pmap_page_protect(page, PROT_NONE);
}
}
ttm_mem_io_free_vm(bo);
}
-#endif
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
@@ -1588,66 +1654,95 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
+/**
+ * ttm_bo_setup_vm:
+ *
+ * @bo: the buffer to allocate address space for
+ *
+ * Allocate address space in the drm device so that applications
+ * can mmap the buffer and access the contents. This only
+ * applies to ttm_bo_type_device objects as others are not
+ * placed in the drm device address space.
+ */
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ return drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+ bo->mem.num_pages);
+}
+
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
{
- struct reservation_object_list *fobj;
- struct reservation_object *resv;
- struct fence *excl;
- long timeout = 15 * HZ;
- int i;
+ struct ttm_bo_driver *driver = bo->bdev->driver;
+ struct ttm_bo_device *bdev = bo->bdev;
+ void *sync_obj;
+ int ret = 0;
- resv = bo->resv;
- fobj = reservation_object_get_list(resv);
- excl = reservation_object_get_excl(resv);
- if (excl) {
- if (!fence_is_signaled(excl)) {
- if (no_wait)
- return -EBUSY;
+ if (likely(bo->sync_obj == NULL))
+ return 0;
- timeout = fence_wait_timeout(excl,
- interruptible, timeout);
- }
- }
+ while (bo->sync_obj) {
- for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
- struct fence *fence;
- fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(resv));
+ if (driver->sync_obj_signaled(bo->sync_obj)) {
+ void *tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ spin_unlock(&bdev->fence_lock);
+ driver->sync_obj_unref(&tmp_obj);
+ spin_lock(&bdev->fence_lock);
+ continue;
+ }
- if (!fence_is_signaled(fence)) {
- if (no_wait)
- return -EBUSY;
+ if (no_wait)
+ return -EBUSY;
- timeout = fence_wait_timeout(fence,
- interruptible, timeout);
+ sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ spin_unlock(&bdev->fence_lock);
+ ret = driver->sync_obj_wait(sync_obj,
+ lazy, interruptible);
+ if (unlikely(ret != 0)) {
+ driver->sync_obj_unref(&sync_obj);
+ spin_lock(&bdev->fence_lock);
+ return ret;
+ }
+ spin_lock(&bdev->fence_lock);
+ if (likely(bo->sync_obj == sync_obj)) {
+ void *tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ clear_bit(TTM_BO_PRIV_FLAG_MOVING,
+ &bo->priv_flags);
+ spin_unlock(&bdev->fence_lock);
+ driver->sync_obj_unref(&sync_obj);
+ driver->sync_obj_unref(&tmp_obj);
+ spin_lock(&bdev->fence_lock);
+ } else {
+ spin_unlock(&bdev->fence_lock);
+ driver->sync_obj_unref(&sync_obj);
+ spin_lock(&bdev->fence_lock);
}
}
-
- if (timeout < 0)
- return timeout;
-
- if (timeout == 0)
- return -EBUSY;
-
- reservation_object_add_excl_fence(resv, NULL);
- clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
+ struct ttm_bo_device *bdev = bo->bdev;
int ret = 0;
/*
* Using ttm_bo_reserve makes sure the lru lists are updated.
*/
- ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
if (unlikely(ret != 0))
return ret;
+ spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, true, no_wait);
+ spin_unlock(&bdev->fence_lock);
if (likely(ret == 0))
atomic_inc(&bo->cpu_writers);
ttm_bo_unreserve(bo);
@@ -1673,10 +1768,11 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
struct ttm_buffer_object *bo;
int ret = -EBUSY;
int put_count;
+ uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
- ret = __ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
if (!ret)
break;
}
@@ -1703,13 +1799,14 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
* Wait for GPU, then move to system cached.
*/
+ spin_lock(&bo->bdev->fence_lock);
ret = ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bo->bdev->fence_lock);
if (unlikely(ret != 0))
goto out;
- if (bo->mem.mem_type != TTM_PL_SYSTEM ||
- bo->ttm->caching_state != tt_cached) {
+ if ((bo->mem.placement & swap_placement) != swap_placement) {
struct ttm_mem_reg evict_mem;
evict_mem = bo->mem;
@@ -1742,7 +1839,8 @@ out:
* already swapped buffer.
*/
- __ttm_bo_unreserve(bo);
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -1753,35 +1851,3 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
;
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
-
-/**
- * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
- * unreserved
- *
- * @bo: Pointer to buffer
- */
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
-{
- int ret;
-
- /*
- * In the absense of a wait_unlocked API,
- * Use the bo::wu_mutex to avoid triggering livelocks due to
- * concurrent use of this function. Note that this use of
- * bo::wu_mutex can go away if we change locking order to
- * mmap_sem -> bo::reserve.
- */
- ret = mutex_lock_interruptible(&bo->wu_mutex);
- if (unlikely(ret != 0))
- return -ERESTARTSYS;
- if (!ww_mutex_is_locked(&bo->resv->lock))
- goto out_unlock;
- ret = __ttm_bo_reserve(bo, true, false, false, NULL);
- if (unlikely(ret != 0))
- goto out_unlock;
- __ttm_bo_unreserve(bo);
-
-out_unlock:
- mutex_unlock(&bo->wu_mutex);
- return ret;
-}
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_api.h b/sys/dev/pci/drm/ttm/ttm_bo_api.h
index e2fc62fc7df..8abe4cc8adf 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_api.h
+++ b/sys/dev/pci/drm/ttm/ttm_bo_api.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_bo_api.h,v 1.8 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -31,34 +32,19 @@
#ifndef _TTM_BO_API_H_
#define _TTM_BO_API_H_
-#include <dev/pci/drm/drm_linux.h>
-#include <dev/pci/drm/linux_ww_mutex.h>
-#include <dev/pci/drm/linux_reservation.h>
-#include <dev/pci/drm/drm_hashtab.h>
+#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/drm_vma_manager.h>
struct ttm_bo_device;
struct drm_mm_node;
-/**
- * struct ttm_place
- *
- * @fpfn: first valid page frame number to put the object
- * @lpfn: last valid page frame number to put the object
- * @flags: memory domain and caching flags for the object
- *
- * Structure indicating a possible place to put an object.
- */
-struct ttm_place {
- unsigned fpfn;
- unsigned lpfn;
- uint32_t flags;
-};
/**
* struct ttm_placement
*
+ * @fpfn: first valid page frame number to put the object
+ * @lpfn: last valid page frame number to put the object
* @num_placement: number of preferred placements
* @placement: preferred placements
* @num_busy_placement: number of preferred placements when need to evict buffer
@@ -67,10 +53,12 @@ struct ttm_place {
* Structure indicating the placement you request for an object.
*/
struct ttm_placement {
- unsigned num_placement;
- const struct ttm_place *placement;
- unsigned num_busy_placement;
- const struct ttm_place *busy_placement;
+ unsigned fpfn;
+ unsigned lpfn;
+ unsigned num_placement;
+ const uint32_t *placement;
+ unsigned num_busy_placement;
+ const uint32_t *busy_placement;
};
/**
@@ -160,6 +148,7 @@ struct ttm_tt;
* Lru lists may keep one refcount, the delayed delete list, and kref != 0
* keeps one refcount. When this refcount reaches zero,
* the object is destroyed.
+ * @event_queue: Queue for processes waiting on buffer object status change.
* @mem: structure describing current placement.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
@@ -170,12 +159,18 @@ struct ttm_tt;
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
+ * @val_seq: Sequence of the validation holding the @reserved lock.
+ * Used to avoid starvation when many processes compete to validate the
+ * buffer. This member is protected by the bo_device::lru_lock.
+ * @seq_valid: The value of @val_seq is valid. This value is protected by
+ * the bo_device::lru_lock.
+ * @reserved: Deadlock-free lock used for synchronization state transitions.
+ * @sync_obj: Pointer to a synchronization object.
* @priv_flags: Flags describing buffer object internal state.
* @vma_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
- * @wu_mutex: Wait unreserved mutex.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
@@ -209,9 +204,10 @@ struct ttm_buffer_object {
struct kref kref;
struct kref list_kref;
+ wait_queue_head_t event_queue;
/**
- * Members protected by the bo::resv::reserved lock.
+ * Members protected by the bo::reserved lock.
*/
struct ttm_mem_reg mem;
@@ -233,11 +229,24 @@ struct ttm_buffer_object {
struct list_head ddestroy;
struct list_head swap;
struct list_head io_reserve_lru;
+ uint32_t val_seq;
+ bool seq_valid;
/**
- * Members protected by a bo reservation.
+ * Members protected by the bdev::lru_lock
+ * only when written to.
*/
+ atomic_t reserved;
+
+ /**
+ * Members protected by struct buffer_object_device::fence_lock
+ * In addition, setting sync_obj to anything else
+ * than NULL requires bo::reserved to be held. This allows for
+ * checking NULL while reserved but not holding the mentioned lock.
+ */
+
+ void *sync_obj;
unsigned long priv_flags;
struct drm_vma_offset_node vma_node;
@@ -248,14 +257,10 @@ struct ttm_buffer_object {
* either of these locks held.
*/
- uint64_t offset; /* GPU address space is independent of CPU word size */
+ unsigned long offset;
uint32_t cur_placement;
struct sg_table *sg;
-
- struct reservation_object *resv;
- struct reservation_object ttm_resv;
- struct rwlock wu_mutex;
};
/**
@@ -315,20 +320,6 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
*/
extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
bool interruptible, bool no_wait);
-
-/**
- * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
- *
- * @placement: Return immediately if buffer is busy.
- * @mem: The struct ttm_mem_reg indicating the region where the bo resides
- * @new_flags: Describes compatible placement found
- *
- * Returns true if the placement is compatible
- */
-extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- uint32_t *new_flags);
-
/**
* ttm_bo_validate
*
@@ -473,7 +464,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
* @acc_size: Accounted size for this object.
- * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
* This function initializes a pre-allocated struct ttm_buffer_object.
@@ -501,16 +491,16 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
struct uvm_object *persistent_swap_storage,
size_t acc_size,
struct sg_table *sg,
- struct reservation_object *resv,
void (*destroy) (struct ttm_buffer_object *));
/**
- * ttm_bo_create
+ * ttm_bo_synccpu_object_init
*
* @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
- * @placement: Initial placement.
+ * @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
* @interruptible: If needing to sleep while waiting for GPU resources,
* sleep interruptible.
@@ -539,6 +529,20 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
struct ttm_buffer_object **p_bo);
/**
+ * ttm_bo_check_placement
+ *
+ * @bo: the buffer object.
+ * @placement: placements
+ *
+ * Performs minimal validity checking on an intended change of
+ * placement flags.
+ * Returns
+ * -EINVAL: Intended change is invalid or not allowed.
+ */
+extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+
+/**
* ttm_bo_init_mm
*
* @bdev: Pointer to a ttm_bo_device struct.
@@ -709,9 +713,23 @@ extern struct uvm_object *ttm_bo_mmap(voff_t, vsize_t,
*/
extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
- const char __user *wbuf, char __user *rbuf,
- size_t count, loff_t *f_pos, bool write);
+ const char *wbuf, char *rbuf,
+ size_t count, off_t *f_pos, bool write);
extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
-extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
+ *
+ * @bo: The buffer object to check.
+ *
+ * This function returns an indication if a bo is reserved or not, and should
+ * only be used to print an error when it is not from incorrect api usage, since
+ * there's no guarantee that it is the caller that is holding the reservation.
+ */
+static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
+{
+ return atomic_read(&bo->reserved);
+}
+
#endif
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_driver.h b/sys/dev/pci/drm/ttm/ttm_bo_driver.h
index 94d080dd65f..4e6ff00f784 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_driver.h
+++ b/sys/dev/pci/drm/ttm/ttm_bo_driver.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_bo_driver.h,v 1.10 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
@@ -34,10 +35,8 @@
#include <dev/pci/drm/ttm/ttm_bo_api.h>
#include <dev/pci/drm/ttm/ttm_memory.h>
#include <dev/pci/drm/ttm/ttm_module.h>
-#include <dev/pci/drm/ttm/ttm_placement.h>
#include <dev/pci/drm/drm_mm.h>
#include <dev/pci/drm/drm_global.h>
-#include <dev/pci/drm/drm_vma_manager.h>
struct ttm_backend_func {
/**
@@ -130,7 +129,6 @@ struct ttm_tt {
* struct ttm_dma_tt
*
* @ttm: Base ttm_tt struct.
- * @cpu_address: The CPU address of the pages
* @dma_address: The DMA (bus) addresses of the pages
* @pages_list: used by some page allocation backend
*
@@ -140,8 +138,7 @@ struct ttm_tt {
*/
struct ttm_dma_tt {
struct ttm_tt ttm;
- void **cpu_address;
- dma_addr_t *dma_address;
+ bus_addr_t *dma_address;
struct list_head pages_list;
};
@@ -181,7 +178,6 @@ struct ttm_mem_type_manager_func {
* @man: Pointer to a memory type manager.
* @bo: Pointer to the buffer object we're allocating space for.
* @placement: Placement details.
- * @flags: Additional placement flags.
* @mem: Pointer to a struct ttm_mem_reg to be filled in.
*
* This function should allocate space in the memory type managed
@@ -205,7 +201,7 @@ struct ttm_mem_type_manager_func {
*/
int (*get_node)(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
- const struct ttm_place *place,
+ struct ttm_placement *placement,
struct ttm_mem_reg *mem);
/**
@@ -274,7 +270,7 @@ struct ttm_mem_type_manager {
bool has_type;
bool use_type;
uint32_t flags;
- uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
+ unsigned long gpu_offset;
uint64_t size;
uint32_t available_caching;
uint32_t default_caching;
@@ -309,6 +305,11 @@ struct ttm_mem_type_manager {
* @move: Callback for a driver to hook in accelerated functions to
* move a buffer.
* If set to NULL, a potentially slow memcpy() move is used.
+ * @sync_obj_signaled: See ttm_fence_api.h
+ * @sync_obj_wait: See ttm_fence_api.h
+ * @sync_obj_flush: See ttm_fence_api.h
+ * @sync_obj_unref: See ttm_fence_api.h
+ * @sync_obj_ref: See ttm_fence_api.h
*/
struct ttm_bo_driver {
@@ -410,6 +411,23 @@ struct ttm_bo_driver {
int (*verify_access) (struct ttm_buffer_object *bo,
struct file *filp);
+ /**
+ * In case a driver writer dislikes the TTM fence objects,
+ * the driver writer can replace those with sync objects of
+ * his / her own. If it turns out that no driver writer is
+ * using these. I suggest we remove these hooks and plug in
+ * fences directly. The bo driver needs the following functionality:
+ * See the corresponding functions in the fence object API
+ * documentation.
+ */
+
+ bool (*sync_obj_signaled) (void *sync_obj);
+ int (*sync_obj_wait) (void *sync_obj,
+ bool lazy, bool interruptible);
+ int (*sync_obj_flush) (void *sync_obj);
+ void (*sync_obj_unref) (void **sync_obj);
+ void *(*sync_obj_ref) (void *sync_obj);
+
/* hook to notify driver about a driver move so it
* can do tiling things */
void (*move_notify)(struct ttm_buffer_object *bo,
@@ -496,6 +514,8 @@ struct ttm_bo_global {
*
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
* @man: An array of mem_type_managers.
+ * @fence_lock: Protects the synchronizing members on *all* bos belonging
+ * to this device.
* @vma_manager: Address space manager
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
@@ -515,6 +535,7 @@ struct ttm_bo_device {
struct ttm_bo_global *glob;
struct ttm_bo_driver *driver;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
+ spinlock_t fence_lock;
bus_space_tag_t iot;
bus_space_tag_t memt;
@@ -632,6 +653,18 @@ extern void ttm_tt_unbind(struct ttm_tt *ttm);
extern int ttm_tt_swapin(struct ttm_tt *ttm);
/**
+ * ttm_tt_cache_flush:
+ *
+ * @pages: An array of pointers to struct page:s to flush.
+ * @num_pages: Number of pages to flush.
+ *
+ * Flush the data of the indicated pages from the cpu caches.
+ * This is used when changing caching attributes of the pages from
+ * cache-coherent.
+ */
+extern void ttm_tt_cache_flush(struct vm_page *pages[], unsigned long num_pages);
+
+/**
* ttm_tt_set_placement_caching:
*
* @ttm A struct ttm_tt the backing pages of which will change caching policy.
@@ -648,15 +681,6 @@ extern int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
extern int ttm_tt_swapout(struct ttm_tt *ttm,
struct uvm_object *persistent_swap_storage);
-/**
- * ttm_tt_unpopulate - free pages from a ttm
- *
- * @ttm: Pointer to the ttm_tt structure
- *
- * Calls the driver method to free all pages from a ttm
- */
-extern void ttm_tt_unpopulate(struct ttm_tt *ttm);
-
/*
* ttm_bo.c
*/
@@ -714,7 +738,6 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
* @bdev: A pointer to a struct ttm_bo_device to initialize.
* @glob: A pointer to an initialized struct ttm_bo_global.
* @driver: A pointer to a struct ttm_bo_driver set up by the caller.
- * @mapping: The address space to use for this bo.
* @file_page_offset: Offset into the device address space that is available
* for buffer data. This ensures compatibility with other users of the
* address space.
@@ -726,7 +749,6 @@ extern int ttm_bo_device_release(struct ttm_bo_device *bdev);
extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
- struct address_space *mapping,
uint64_t file_page_offset, bool need_dma32);
/**
@@ -751,55 +773,6 @@ extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
bool interruptible);
extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-extern void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
-extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-
-/**
- * __ttm_bo_reserve:
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @use_ticket: If @bo is already reserved, Only sleep waiting for
- * it to become unreserved if @ticket->stamp is older.
- *
- * Will not remove reserved buffers from the lru lists.
- * Otherwise identical to ttm_bo_reserve.
- *
- * Returns:
- * -EDEADLK: The reservation may cause a deadlock.
- * Release all buffer reservations, wait for @bo to become unreserved and
- * try again. (only if use_sequence == 1).
- * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
- * a signal. Release all buffer reservations and return to user-space.
- * -EBUSY: The function needed to sleep, but @no_wait was true
- * -EALREADY: Bo already reserved using @ticket. This error code will only
- * be returned if @use_ticket is set to true.
- */
-static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_ticket,
- struct ww_acquire_ctx *ticket)
-{
- int ret = 0;
-
- if (no_wait) {
- bool success;
- if (WARN_ON(ticket))
- return -EBUSY;
-
- success = ww_mutex_trylock(&bo->resv->lock);
- return success ? 0 : -EBUSY;
- }
-
- if (interruptible)
- ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket);
- else
- ret = ww_mutex_lock(&bo->resv->lock, ticket);
- if (ret == -EINTR)
- return -ERESTARTSYS;
- return ret;
-}
/**
* ttm_bo_reserve:
@@ -807,8 +780,8 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
* @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
- * @use_ticket: If @bo is already reserved, Only sleep waiting for
- * it to become unreserved if @ticket->stamp is older.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
*
* Locks a buffer object for validation. (Or prevents other processes from
* locking it for validation) and removes it from lru lists, while taking
@@ -819,10 +792,19 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
* to make room for a buffer already reserved. (Buffers are reserved before
* they are evicted). The following algorithm prevents such deadlocks from
* occurring:
- * Processes attempting to reserve multiple buffers other than for eviction,
+ * 1) Buffers are reserved with the lru spinlock held. Upon successful
+ * reservation they are removed from the lru list. This stops a reserved buffer
+ * from being evicted. However the lru spinlock is released between the time
+ * a buffer is selected for eviction and the time it is reserved.
+ * Therefore a check is made when a buffer is reserved for eviction, that it
+ * is still the first buffer in the lru list, before it is removed from the
+ * list. @check_lru == 1 forces this check. If it fails, the function returns
+ * -EINVAL, and the caller should then choose a new buffer to evict and repeat
+ * the procedure.
+ * 2) Processes attempting to reserve multiple buffers other than for eviction,
* (typically execbuf), should first obtain a unique 32-bit
* validation sequence number,
- * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
+ * and call this function with @use_sequence == 1 and @sequence == the unique
* sequence number. If upon call of this function, the buffer object is already
* reserved, the validation sequence is checked against the validation
* sequence of the process currently reserving the buffer,
@@ -837,113 +819,84 @@ static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
* will eventually succeed, preventing both deadlocks and starvation.
*
* Returns:
- * -EDEADLK: The reservation may cause a deadlock.
+ * -EAGAIN: The reservation may cause a deadlock.
* Release all buffer reservations, wait for @bo to become unreserved and
* try again. (only if use_sequence == 1).
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
* -EBUSY: The function needed to sleep, but @no_wait was true
- * -EALREADY: Bo already reserved using @ticket. This error code will only
- * be returned if @use_ticket is set to true.
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
*/
-static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_ticket,
- struct ww_acquire_ctx *ticket)
-{
- int ret;
-
- WARN_ON(!atomic_read(&bo->kref.refcount));
+extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence);
- ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket);
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
-
- return ret;
-}
/**
- * ttm_bo_reserve_slowpath:
+ * ttm_bo_reserve_locked:
+ *
* @bo: A pointer to a struct ttm_buffer_object.
* @interruptible: Sleep interruptible if waiting.
- * @sequence: Set (@bo)->sequence to this value after lock
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Must be called with struct ttm_bo_global::lru_lock held,
+ * and will not remove reserved buffers from the lru lists.
+ * The function may release the LRU spinlock if it needs to sleep.
+ * Otherwise identical to ttm_bo_reserve.
*
- * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
- * from all our other reservations. Because there are no other reservations
- * held by us, this function cannot deadlock any more.
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
*/
-static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
- bool interruptible,
- struct ww_acquire_ctx *ticket)
-{
- int ret = 0;
-
- WARN_ON(!atomic_read(&bo->kref.refcount));
-
- if (interruptible)
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- ticket);
- else
- ww_mutex_lock_slow(&bo->resv->lock, ticket);
-
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
- else if (ret == -EINTR)
- ret = -ERESTARTSYS;
-
- return ret;
-}
+extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence,
+ uint32_t sequence);
/**
- * __ttm_bo_unreserve
+ * ttm_bo_unreserve
+ *
* @bo: A pointer to a struct ttm_buffer_object.
*
- * Unreserve a previous reservation of @bo where the buffer object is
- * already on lru lists.
+ * Unreserve a previous reservation of @bo.
*/
-static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo)
-{
- ww_mutex_unlock(&bo->resv->lock);
-}
+extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
/**
- * ttm_bo_unreserve
+ * ttm_bo_unreserve_locked
*
* @bo: A pointer to a struct ttm_buffer_object.
*
* Unreserve a previous reservation of @bo.
+ * Needs to be called with struct ttm_bo_global::lru_lock held.
*/
-static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
-{
- if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- spin_lock(&bo->glob->lru_lock);
- ttm_bo_add_to_lru(bo);
- spin_unlock(&bo->glob->lru_lock);
- }
- __ttm_bo_unreserve(bo);
-}
+extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
/**
- * ttm_bo_unreserve_ticket
+ * ttm_bo_wait_unreserved
+ *
* @bo: A pointer to a struct ttm_buffer_object.
- * @ticket: ww_acquire_ctx used for reserving
*
- * Unreserve a previous reservation of @bo made with @ticket.
+ * Wait for a struct ttm_buffer_object to become unreserved.
+ * This is typically used in the execbuf code to relax cpu-usage when
+ * a potential deadlock condition backoff.
*/
-static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo,
- struct ww_acquire_ctx *t)
-{
- ttm_bo_unreserve(bo);
-}
+extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
+ bool interruptible);
/*
* ttm_bo_util.c
*/
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
-void ttm_mem_io_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
/**
* ttm_bo_move_ttm
*
@@ -1001,7 +954,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
* ttm_bo_move_accel_cleanup.
*
* @bo: A pointer to a struct ttm_buffer_object.
- * @fence: A fence object that signals when moving is complete.
+ * @sync_obj: A sync object that signals when moving is complete.
* @evict: This is an evict move. Don't return until the buffer is idle.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
@@ -1015,7 +968,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
*/
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence,
+ void *sync_obj,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_manager.c b/sys/dev/pci/drm/ttm/ttm_bo_manager.c
index 7485030e7c9..d229d130373 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_manager.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_manager.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_bo_manager.c,v 1.9 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
@@ -34,7 +35,7 @@
#include <dev/pci/drm/drm_mm.h>
/**
- * Currently we use a spinlock for the lock, but a mutex *may* be
+ * Currently we use a mutex for the lock, but a rwlock *may* be
* more appropriate to reduce scheduling latency if the range manager
* ends up with very fragmented allocation patterns.
*/
@@ -46,7 +47,7 @@ struct ttm_range_manager {
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
- const struct ttm_place *place,
+ struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
@@ -57,7 +58,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
unsigned long lpfn;
int ret;
- lpfn = place->lpfn;
+ lpfn = placement->lpfn;
if (!lpfn)
lpfn = man->size;
@@ -65,15 +66,10 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (!node)
return -ENOMEM;
- if (place->flags & TTM_PL_FLAG_TOPDOWN) {
- sflags = DRM_MM_SEARCH_BELOW;
- aflags = DRM_MM_CREATE_TOP;
- }
-
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
mem->page_alignment, 0,
- place->fpfn, lpfn,
+ placement->fpfn, lpfn,
sflags, aflags);
spin_unlock(&rman->lock);
@@ -137,11 +133,14 @@ static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
const char *prefix)
{
+ printf("%s stub\n", __func__);
+#ifdef notyet
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
spin_lock(&rman->lock);
drm_mm_debug_table(&rman->mm, prefix);
spin_unlock(&rman->lock);
+#endif
}
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_util.c b/sys/dev/pci/drm/ttm/ttm_bo_util.c
index 3b26d865be2..4f268efc7b1 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_util.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_util.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_bo_util.c,v 1.18 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
@@ -31,8 +32,11 @@
#include <dev/pci/drm/ttm/ttm_bo_driver.h>
#include <dev/pci/drm/ttm/ttm_placement.h>
#include <dev/pci/drm/drm_vma_manager.h>
-#include <dev/pci/drm/drmP.h>
-#include <dev/pci/drm/linux_ww_mutex.h>
+
+int ttm_mem_reg_ioremap(struct ttm_bo_device *, struct ttm_mem_reg *,
+ void **);
+void ttm_mem_reg_iounmap(struct ttm_bo_device *, struct ttm_mem_reg *,
+ void *);
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
@@ -83,7 +87,6 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
mutex_lock(&man->io_reserve_mutex);
return 0;
}
-EXPORT_SYMBOL(ttm_mem_io_lock);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
{
@@ -92,7 +95,6 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
mutex_unlock(&man->io_reserve_mutex);
}
-EXPORT_SYMBOL(ttm_mem_io_unlock);
static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
{
@@ -110,9 +112,8 @@ static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
return 0;
}
-
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
int ret = 0;
@@ -134,10 +135,9 @@ retry:
}
return ret;
}
-EXPORT_SYMBOL(ttm_mem_io_reserve);
-void ttm_mem_io_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem)
+static void ttm_mem_io_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -150,7 +150,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev,
bdev->driver->io_mem_free(bdev, mem);
}
-EXPORT_SYMBOL(ttm_mem_io_free);
int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
{
@@ -183,7 +182,7 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
}
}
-static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -207,8 +206,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
flags = 0;
if (bus_space_map(bdev->memt, mem->bus.base + mem->bus.offset,
- mem->bus.size, BUS_SPACE_MAP_LINEAR | flags,
- &mem->bus.bsh)) {
+ mem->bus.size, BUS_SPACE_MAP_LINEAR | flags, &mem->bus.bsh)) {
printf("%s bus_space_map failed\n", __func__);
return -ENOMEM;
}
@@ -226,7 +224,7 @@ static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *m
return 0;
}
-static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void *virtual)
{
struct ttm_mem_type_manager *man;
@@ -265,34 +263,26 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
-#ifdef CONFIG_X86
- dst = kmap_atomic_prot(d, prot);
-#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
dst = vmap(&d, 1, 0, prot);
else
dst = kmap(d);
-#endif
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
-#ifdef CONFIG_X86
- kunmap_atomic(dst);
-#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(dst, PAGE_SIZE);
else
kunmap(d);
-#endif
return 0;
}
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page,
- pgprot_t prot)
+ vm_prot_t prot)
{
struct vm_page *s = ttm->pages[page];
void *src;
@@ -301,27 +291,19 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-#ifdef CONFIG_X86
- src = kmap_atomic_prot(s, prot);
-#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
src = vmap(&s, 1, 0, prot);
else
src = kmap(s);
-#endif
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
-#ifdef CONFIG_X86
- kunmap_atomic(src);
-#else
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
vunmap(src, PAGE_SIZE);
else
kunmap(s);
-#endif
return 0;
}
@@ -357,14 +339,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
goto out2;
/*
- * Don't move nonexistent data. Clear destination instead.
+ * Move nonexistent data. NOP.
*/
- if (old_iomap == NULL &&
- (ttm == NULL || (ttm->state == tt_unpopulated &&
- !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
- memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
+ if (old_iomap == NULL && ttm == NULL)
goto out2;
- }
/*
* TTM might be null for moves within the same region.
@@ -451,7 +429,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object **new_obj)
{
struct ttm_buffer_object *fbo;
- int ret;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
@@ -464,6 +443,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
+ init_waitqueue_head(&fbo->event_queue);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
@@ -471,47 +451,21 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
+ spin_lock(&bdev->fence_lock);
+ if (bo->sync_obj)
+ fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
+ else
+ fbo->sync_obj = NULL;
+ spin_unlock(&bdev->fence_lock);
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
fbo->acc_size = 0;
- fbo->resv = &fbo->ttm_resv;
- reservation_object_init(fbo->resv);
- ret = ww_mutex_trylock(&fbo->resv->lock);
- WARN_ON(!ret);
*new_obj = fbo;
return 0;
}
-#ifdef __linux__
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
-{
- /* Cached mappings need no adjustment */
- if (caching_flags & TTM_PL_FLAG_CACHED)
- return tmp;
-
-#if defined(__i386__) || defined(__x86_64__)
- if (caching_flags & TTM_PL_FLAG_WC)
- tmp = pgprot_writecombine(tmp);
- else if (boot_cpu_data.x86 > 3)
- tmp = pgprot_noncached(tmp);
-#endif
-#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
- defined(__powerpc__)
- if (caching_flags & TTM_PL_FLAG_WC)
- tmp = pgprot_writecombine(tmp);
- else
- tmp = pgprot_noncached(tmp);
-#endif
-#if defined(__sparc__) || defined(__mips__)
- tmp = pgprot_noncached(tmp);
-#endif
- return tmp;
-}
-EXPORT_SYMBOL(ttm_io_prot);
-#endif
-
pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
{
#ifdef PMAP_WC
@@ -521,14 +475,15 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
#endif
return PMAP_NOCACHE;
}
+EXPORT_SYMBOL(ttm_io_prot);
static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
unsigned long offset,
unsigned long size,
struct ttm_bo_kmap_obj *map)
{
- int flags;
struct ttm_mem_reg *mem = &bo->mem;
+ int flags;
if (bo->mem.bus.addr) {
map->bo_kmap_type = ttm_bo_map_premapped;
@@ -584,7 +539,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
- prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
+ prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
+ PAGE_KERNEL :
+ ttm_io_prot(mem->placement, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
@@ -609,7 +566,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (start_page > bo->num_pages)
return -EINVAL;
#if 0
- if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
+ if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
(void) ttm_mem_io_lock(man, false);
@@ -644,7 +601,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
vunmap(map->virtual, bo->mem.bus.size);
break;
case ttm_bo_map_kmap:
- kunmap(map->page);
+ kunmap(map->virtual);
break;
case ttm_bo_map_premapped:
break;
@@ -660,20 +617,30 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
EXPORT_SYMBOL(ttm_bo_kunmap);
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
- struct fence *fence,
+ void *sync_obj,
bool evict,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
struct ttm_buffer_object *ghost_obj;
+ void *tmp_obj = NULL;
- reservation_object_add_excl_fence(bo->resv, fence);
+ spin_lock(&bdev->fence_lock);
+ if (bo->sync_obj) {
+ tmp_obj = bo->sync_obj;
+ bo->sync_obj = NULL;
+ }
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
if (evict) {
ret = ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bdev->fence_lock);
+ if (tmp_obj)
+ driver->sync_obj_unref(&tmp_obj);
if (ret)
return ret;
@@ -694,13 +661,14 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
*/
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
+ spin_unlock(&bdev->fence_lock);
+ if (tmp_obj)
+ driver->sync_obj_unref(&tmp_obj);
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
if (ret)
return ret;
- reservation_object_add_excl_fence(ghost_obj->resv, fence);
-
/**
* If we're not moving to fixed memory, the TTM object
* needs to stay alive. Otherwhise hang it on the ghost
diff --git a/sys/dev/pci/drm/ttm/ttm_bo_vm.c b/sys/dev/pci/drm/ttm/ttm_bo_vm.c
index de7cee96a22..c987aa03b22 100644
--- a/sys/dev/pci/drm/ttm/ttm_bo_vm.c
+++ b/sys/dev/pci/drm/ttm/ttm_bo_vm.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_bo_vm.c,v 1.13 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -37,235 +38,14 @@
#define TTM_BO_VM_NUM_PREFAULT 16
-static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo)
-{
- int ret = 0;
-
- if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)))
- goto out_unlock;
-
- /*
- * Quick non-stalling check for idle.
- */
- ret = ttm_bo_wait(bo, false, false, true);
- if (likely(ret == 0))
- goto out_unlock;
-
- /*
- * If possible, avoid waiting for GPU with mmap_sem
- * held.
- */
-#ifdef notyet
- if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
- ret = VM_FAULT_RETRY;
- if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
- goto out_unlock;
-
- ttm_bo_reference(bo);
- up_read(&vma->vm_mm->mmap_sem);
- (void) ttm_bo_wait(bo, false, true, false);
- ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
- goto out_unlock;
- }
-#endif
-
- /*
- * Ordinary wait.
- */
- ret = ttm_bo_wait(bo, false, true, false);
- if (unlikely(ret != 0))
- ret = (ret != -ERESTARTSYS) ? VM_PAGER_ERROR :
- VM_PAGER_REFAULT;
-
-out_unlock:
- return ret;
-}
-
-#ifdef __linux__
-static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
- vma->vm_private_data;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long page_offset;
- unsigned long page_last;
- unsigned long pfn;
- struct ttm_tt *ttm = NULL;
- struct vm_page *page;
- int ret;
- int i;
- unsigned long address = (unsigned long)vmf->virtual_address;
- int retval = VM_FAULT_NOPAGE;
- struct ttm_mem_type_manager *man =
- &bdev->man[bo->mem.mem_type];
- struct vm_area_struct cvma;
-
- /*
- * Work around locking order reversal in fault / nopfn
- * between mmap_sem and bo_reserve: Perform a trylock operation
- * for reserve, and if it fails, retry the fault after waiting
- * for the buffer to become unreserved.
- */
- ret = ttm_bo_reserve(bo, true, true, false, NULL);
- if (unlikely(ret != 0)) {
- if (ret != -EBUSY)
- return VM_FAULT_NOPAGE;
-
- if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
- if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- ttm_bo_reference(bo);
- up_read(&vma->vm_mm->mmap_sem);
- (void) ttm_bo_wait_unreserved(bo);
- ttm_bo_unref(&bo);
- }
-
- return VM_FAULT_RETRY;
- }
-
- /*
- * If we'd want to change locking order to
- * mmap_sem -> bo::reserve, we'd use a blocking reserve here
- * instead of retrying the fault...
- */
- return VM_FAULT_NOPAGE;
- }
-
- /*
- * Refuse to fault imported pages. This should be handled
- * (if at all) by redirecting mmap to the exporter.
- */
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- retval = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *, const char __user *,
+ char __user *, size_t, off_t *, bool);
- if (bdev->driver->fault_reserve_notify) {
- ret = bdev->driver->fault_reserve_notify(bo);
- switch (ret) {
- case 0:
- break;
- case -EBUSY:
- case -ERESTARTSYS:
- retval = VM_FAULT_NOPAGE;
- goto out_unlock;
- default:
- retval = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
- }
+int ttm_bo_vm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *,
+ int, int, vm_fault_t, vm_prot_t, int);
+void ttm_bo_vm_reference(struct uvm_object *);
+void ttm_bo_vm_detach(struct uvm_object *);
- /*
- * Wait for buffer data in transit, due to a pipelined
- * move.
- */
- ret = ttm_bo_vm_fault_idle(bo);
- if (unlikely(ret != 0)) {
- retval = ret;
-
- if (retval == VM_FAULT_RETRY &&
- !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
- /* The BO has already been unreserved. */
- return retval;
- }
-
- goto out_unlock;
- }
-
- ret = ttm_mem_io_lock(man, true);
- if (unlikely(ret != 0)) {
- retval = VM_FAULT_NOPAGE;
- goto out_unlock;
- }
- ret = ttm_mem_io_reserve_vm(bo);
- if (unlikely(ret != 0)) {
- retval = VM_FAULT_SIGBUS;
- goto out_io_unlock;
- }
-
- page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
- page_last = vma_pages(vma) + vma->vm_pgoff -
- drm_vma_node_start(&bo->vma_node);
-
- if (unlikely(page_offset >= bo->num_pages)) {
- retval = VM_FAULT_SIGBUS;
- goto out_io_unlock;
- }
-
- /*
- * Make a local vma copy to modify the page_prot member
- * and vm_flags if necessary. The vma parameter is protected
- * by mmap_sem in write mode.
- */
- cvma = *vma;
- cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
-
- if (bo->mem.bus.is_iomem) {
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
- } else {
- ttm = bo->ttm;
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
-
- /* Allocate all page at once, most common usage */
- if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
- retval = VM_FAULT_OOM;
- goto out_io_unlock;
- }
- }
-
- /*
- * Speculatively prefault a number of pages. Only error on
- * first page.
- */
- for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
- if (bo->mem.bus.is_iomem)
- pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
- else {
- page = ttm->pages[page_offset];
- if (unlikely(!page && i == 0)) {
- retval = VM_FAULT_OOM;
- goto out_io_unlock;
- } else if (unlikely(!page)) {
- break;
- }
- page->mapping = vma->vm_file->f_mapping;
- page->index = drm_vma_node_start(&bo->vma_node) +
- page_offset;
- pfn = page_to_pfn(page);
- }
-
- if (vma->vm_flags & VM_MIXEDMAP)
- ret = vm_insert_mixed(&cvma, address, pfn);
- else
- ret = vm_insert_pfn(&cvma, address, pfn);
-
- /*
- * Somebody beat us to this PTE or prefaulting to
- * an already populated PTE, or prefaulting error.
- */
-
- if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
- break;
- else if (unlikely(ret != 0)) {
- retval =
- (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
- goto out_io_unlock;
- }
-
- address += PAGE_SIZE;
- if (unlikely(++page_offset >= page_last))
- break;
- }
-out_io_unlock:
- ttm_mem_io_unlock(man);
-out_unlock:
- ttm_bo_unreserve(bo);
- return retval;
-}
-#else
int
ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
int npages, int centeridx, vm_fault_t fault_type,
@@ -293,10 +73,10 @@ ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
- * for reserve, and if it fails, retry the fault after waiting
- * for the buffer to become unreserved.
+ * for reserve, and if it fails, retry the fault after scheduling.
*/
- ret = ttm_bo_reserve(bo, true, true, false, NULL);
+
+ ret = ttm_bo_reserve(bo, true, true, false, 0);
if (unlikely(ret != 0)) {
uvmfault_unlockall(ufi, NULL, uobj, NULL);
ret = ttm_bo_reserve(bo, true, false, false, 0);
@@ -305,21 +85,17 @@ ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
return VM_PAGER_REFAULT;
}
- /*
- * Refuse to fault imported pages. This should be handled
- * (if at all) by redirecting mmap to the exporter.
- */
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- retval = VM_PAGER_ERROR;
- goto out_unlock;
- }
-
if (bdev->driver->fault_reserve_notify) {
ret = bdev->driver->fault_reserve_notify(bo);
switch (ret) {
case 0:
break;
case -EBUSY:
+#if 0
+ set_need_resched();
+#else
+ printf("resched?\n");
+#endif
case -ERESTARTSYS:
retval = VM_PAGER_REFAULT;
goto out_unlock;
@@ -333,13 +109,18 @@ ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
* Wait for buffer data in transit, due to a pipelined
* move.
*/
- ret = ttm_bo_vm_fault_idle(bo);
- if (unlikely(ret != 0)) {
- retval = ret;
- retval = (ret != -ERESTARTSYS) ?
- VM_PAGER_ERROR : VM_PAGER_REFAULT;
- goto out_unlock;
- }
+
+ spin_lock(&bdev->fence_lock);
+ if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
+ ret = ttm_bo_wait(bo, false, true, false);
+ spin_unlock(&bdev->fence_lock);
+ if (unlikely(ret != 0)) {
+ retval = (ret != -ERESTARTSYS) ?
+ VM_PAGER_ERROR : VM_PAGER_REFAULT;
+ goto out_unlock;
+ }
+ } else
+ spin_unlock(&bdev->fence_lock);
ret = ttm_mem_io_lock(man, true);
if (unlikely(ret != 0)) {
@@ -363,9 +144,17 @@ ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
}
/*
- * Make a local vma copy to modify the page_prot member
- * and vm_flags if necessary. The vma parameter is protected
- * by mmap_sem in write mode.
+ * Strictly, we're not allowed to modify vma->vm_page_prot here,
+ * since the mmap_sem is only held in read mode. However, we
+ * modify only the caching bits of vma->vm_page_prot and
+ * consider those bits protected by
+ * the bo->rwlock, as we should be the only writers.
+ * There shouldn't really be any readers of these bits except
+ * within vm_insert_mixed()? fork?
+ *
+ * TODO: Add a list of vmas to the bo, and change the
+ * vma->vm_page_prot when the object changes caching policy, with
+ * the correct locks held.
*/
mapprot = ufi->entry->protection;
if (bo->mem.bus.is_iomem) {
@@ -411,7 +200,7 @@ ttm_bo_vm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps,
* an already populated PTE, or prefaulting error.
*/
- if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+ if (ret != 0 && i > 0)
break;
else if (unlikely(ret != 0)) {
uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
@@ -433,33 +222,6 @@ out_unlock:
ttm_bo_unreserve(bo);
return retval;
}
-#endif
-
-#ifdef notyet
-static void ttm_bo_vm_open(struct vm_area_struct *vma)
-{
- struct ttm_buffer_object *bo =
- (struct ttm_buffer_object *)vma->vm_private_data;
-
- WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
-
- (void)ttm_bo_reference(bo);
-}
-
-static void ttm_bo_vm_close(struct vm_area_struct *vma)
-{
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
-
- ttm_bo_unref(&bo);
- vma->vm_private_data = NULL;
-}
-
-static const struct vm_operations_struct ttm_bo_vm_ops = {
- .fault = ttm_bo_vm_fault,
- .open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close
-};
-#endif
void
ttm_bo_vm_reference(struct uvm_object *uobj)
@@ -510,94 +272,214 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
return bo;
}
-#ifdef __linux__
-int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_bo_device *bdev)
+struct uvm_object *
+ttm_bo_mmap(voff_t off, vsize_t size, struct ttm_bo_device *bdev)
{
struct ttm_bo_driver *driver;
struct ttm_buffer_object *bo;
int ret;
- bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
- if (unlikely(!bo))
- return -EINVAL;
+ bo = ttm_bo_vm_lookup(bdev, off >> PAGE_SHIFT, size >> PAGE_SHIFT);
+ if (unlikely(!bo)) {
+ ret = -EINVAL;
+ return NULL;
+ }
driver = bo->bdev->driver;
if (unlikely(!driver->verify_access)) {
ret = -EPERM;
goto out_unref;
}
+#ifdef notyet
ret = driver->verify_access(bo, filp);
if (unlikely(ret != 0))
goto out_unref;
+#endif
- vma->vm_ops = &ttm_bo_vm_ops;
+ bo->uobj.pgops = &ttm_bo_vm_ops;
+#if 0
/*
* Note: We're transferring the bo reference to
* vma->vm_private_data here.
*/
vma->vm_private_data = bo;
-
- /*
- * We'd like to use VM_PFNMAP on shared mappings, where
- * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
- * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
- * bad for performance. Until that has been sorted out, use
- * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
- */
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
- return 0;
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+#else
+ bo->uobj.uo_refs++;
+#endif
+ return &bo->uobj;
out_unref:
ttm_bo_unref(&bo);
- return ret;
+ return NULL;
}
-#else
-struct uvm_object *
-ttm_bo_mmap(voff_t off, vsize_t size, struct ttm_bo_device *bdev)
+EXPORT_SYMBOL(ttm_bo_mmap);
+
+#ifdef notyet
+int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+{
+ if (vma->vm_pgoff != 0)
+ return -EACCES;
+
+ vma->vm_ops = &ttm_bo_vm_ops;
+ vma->vm_private_data = ttm_bo_reference(bo);
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND;
+ return 0;
+}
+#endif
+EXPORT_SYMBOL(ttm_fbdev_mmap);
+
+
+ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ const char __user *wbuf, char __user *rbuf, size_t count,
+ off_t *f_pos, bool write)
{
- struct ttm_bo_driver *driver;
struct ttm_buffer_object *bo;
+ struct ttm_bo_driver *driver;
+ struct ttm_bo_kmap_obj map;
+ unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
+ unsigned long kmap_offset;
+ unsigned long kmap_end;
+ unsigned long kmap_num;
+ size_t io_size;
+ unsigned int page_offset;
+ char *virtual;
int ret;
+ bool no_wait = false;
+ bool dummy;
- bo = ttm_bo_vm_lookup(bdev, off >> PAGE_SHIFT, size >> PAGE_SHIFT);
- if (unlikely(!bo))
- return NULL;
+ bo = ttm_bo_vm_lookup(bdev, dev_offset, 1);
+ if (unlikely(bo == NULL))
+ return -EFAULT;
driver = bo->bdev->driver;
if (unlikely(!driver->verify_access)) {
ret = -EPERM;
goto out_unref;
}
-#ifdef notyet
+
ret = driver->verify_access(bo, filp);
if (unlikely(ret != 0))
goto out_unref;
-#endif
- bo->uobj.pgops = &ttm_bo_vm_ops;
- bo->uobj.uo_refs++;
- return &bo->uobj;
+ kmap_offset = dev_offset - drm_vma_node_start(&bo->vma_node);
+ if (unlikely(kmap_offset >= bo->num_pages)) {
+ ret = -EFBIG;
+ goto out_unref;
+ }
+
+ page_offset = *f_pos & PAGE_MASK;
+ io_size = bo->num_pages - kmap_offset;
+ io_size = (io_size << PAGE_SHIFT) - page_offset;
+ if (count < io_size)
+ io_size = count;
+
+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+ kmap_num = kmap_end - kmap_offset + 1;
+
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ ret = -EAGAIN;
+ goto out_unref;
+ default:
+ goto out_unref;
+ }
+
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unreserve(bo);
+ goto out_unref;
+ }
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ virtual += page_offset;
+
+ if (write)
+ ret = copy_from_user(virtual, wbuf, io_size);
+ else
+ ret = copy_to_user(rbuf, virtual, io_size);
+
+ ttm_bo_kunmap(&map);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
+
+ if (unlikely(ret != 0))
+ return -EFBIG;
+
+ *f_pos += io_size;
+
+ return io_size;
out_unref:
ttm_bo_unref(&bo);
- return NULL;
+ return ret;
}
-#endif
-EXPORT_SYMBOL(ttm_bo_mmap);
-#ifdef notyet
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
+ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
+ char __user *rbuf, size_t count, off_t *f_pos,
+ bool write)
{
- if (vma->vm_pgoff != 0)
- return -EACCES;
+ struct ttm_bo_kmap_obj map;
+ unsigned long kmap_offset;
+ unsigned long kmap_end;
+ unsigned long kmap_num;
+ size_t io_size;
+ unsigned int page_offset;
+ char *virtual;
+ int ret;
+ bool no_wait = false;
+ bool dummy;
+
+ kmap_offset = (*f_pos >> PAGE_SHIFT);
+ if (unlikely(kmap_offset >= bo->num_pages))
+ return -EFBIG;
+
+ page_offset = *f_pos & PAGE_MASK;
+ io_size = bo->num_pages - kmap_offset;
+ io_size = (io_size << PAGE_SHIFT) - page_offset;
+ if (count < io_size)
+ io_size = count;
+
+ kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
+ kmap_num = kmap_end - kmap_offset + 1;
+
+ ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ return -EAGAIN;
+ default:
+ return ret;
+ }
- vma->vm_ops = &ttm_bo_vm_ops;
- vma->vm_private_data = ttm_bo_reference(bo);
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND;
- return 0;
+ ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
+ if (unlikely(ret != 0)) {
+ ttm_bo_unreserve(bo);
+ return ret;
+ }
+
+ virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ virtual += page_offset;
+
+ if (write)
+ ret = copy_from_user(virtual, wbuf, io_size);
+ else
+ ret = copy_to_user(rbuf, virtual, io_size);
+
+ ttm_bo_kunmap(&map);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ *f_pos += io_size;
+
+ return io_size;
}
-EXPORT_SYMBOL(ttm_fbdev_mmap);
-#endif
diff --git a/sys/dev/pci/drm/ttm/ttm_execbuf_util.c b/sys/dev/pci/drm/ttm/ttm_execbuf_util.c
index 7d139a9d277..f773eacee4b 100644
--- a/sys/dev/pci/drm/ttm/ttm_execbuf_util.c
+++ b/sys/dev/pci/drm/ttm/ttm_execbuf_util.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_execbuf_util.c,v 1.5 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -29,13 +30,23 @@
#include <dev/pci/drm/ttm/ttm_bo_driver.h>
#include <dev/pci/drm/ttm/ttm_placement.h>
-static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
- struct ttm_validate_buffer *entry)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
- list_for_each_entry_continue_reverse(entry, list, head) {
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
- __ttm_bo_unreserve(bo);
+ if (entry->removed) {
+ ttm_bo_add_to_lru(bo);
+ entry->removed = false;
+
+ }
+ entry->reserved = false;
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
}
}
@@ -45,14 +56,47 @@ static void ttm_eu_del_from_lru_locked(struct list_head *list)
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- unsigned put_count = ttm_bo_del_from_lru(bo);
+ if (!entry->reserved)
+ continue;
- ttm_bo_list_ref_sub(bo, put_count, true);
+ if (!entry->removed) {
+ entry->put_count = ttm_bo_del_from_lru(bo);
+ entry->removed = true;
+ }
}
}
-void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
- struct list_head *list)
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ if (entry->put_count) {
+ ttm_bo_list_ref_sub(bo, entry->put_count, true);
+ entry->put_count = 0;
+ }
+ }
+}
+
+static int ttm_eu_wait_unreserved_locked(struct list_head *list,
+ struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int ret;
+
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ret = ttm_bo_wait_unreserved(bo, true);
+ spin_lock(&glob->lru_lock);
+ if (unlikely(ret != 0))
+ ttm_eu_backoff_reservation_locked(list);
+ return ret;
+}
+
+
+void ttm_eu_backoff_reservation(struct list_head *list)
{
struct ttm_validate_buffer *entry;
struct ttm_bo_global *glob;
@@ -62,18 +106,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
-
spin_lock(&glob->lru_lock);
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
-
- ttm_bo_add_to_lru(bo);
- __ttm_bo_unreserve(bo);
- }
+ ttm_eu_backoff_reservation_locked(list);
spin_unlock(&glob->lru_lock);
-
- if (ticket)
- ww_acquire_fini(ticket);
}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -89,95 +124,78 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
* buffers in different orders.
*/
-int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list, bool intr,
- struct list_head *dups)
+int ttm_eu_reserve_buffers(struct list_head *list)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
+ uint32_t val_seq;
if (list_empty(list))
return 0;
+ list_for_each_entry(entry, list, head) {
+ entry->reserved = false;
+ entry->put_count = 0;
+ entry->removed = false;
+ }
+
entry = list_first_entry(list, struct ttm_validate_buffer, head);
glob = entry->bo->glob;
- if (ticket)
- ww_acquire_init(ticket, &reservation_ww_class);
+retry:
+ spin_lock(&glob->lru_lock);
+ val_seq = entry->bo->bdev->val_seq++;
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
- ticket);
- if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- __ttm_bo_unreserve(bo);
-
- ret = -EBUSY;
-
- } else if (ret == -EALREADY && dups) {
- struct ttm_validate_buffer *safe = entry;
- entry = list_prev_entry(entry, head);
- list_del(&safe->head);
- list_add(&safe->head, dups);
- continue;
- }
-
- if (!ret) {
- if (!entry->shared)
- continue;
-
- ret = reservation_object_reserve_shared(bo->resv);
- if (!ret)
- continue;
- }
-
- /* uh oh, we lost out, drop every reservation and try
- * to only reserve this buffer, then start over if
- * this succeeds.
- */
- ttm_eu_backoff_reservation_reverse(list, entry);
-
- if (ret == -EDEADLK && intr) {
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- ticket);
- } else if (ret == -EDEADLK) {
- ww_mutex_lock_slow(&bo->resv->lock, ticket);
- ret = 0;
- }
-
- if (!ret && entry->shared)
- ret = reservation_object_reserve_shared(bo->resv);
-
- if (unlikely(ret != 0)) {
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
- if (ticket) {
- ww_acquire_done(ticket);
- ww_acquire_fini(ticket);
+retry_this_bo:
+ ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ ret = ttm_eu_wait_unreserved_locked(list, bo);
+ if (unlikely(ret != 0)) {
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return ret;
}
+ goto retry_this_bo;
+ case -EAGAIN:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ ret = ttm_bo_wait_unreserved(bo, true);
+ if (unlikely(ret != 0))
+ return ret;
+ goto retry;
+ default:
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
return ret;
}
- /* move this item to the front of the list,
- * forces correct iteration of the loop without keeping track
- */
- list_del(&entry->head);
- list_add(&entry->head, list);
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ return -EBUSY;
+ }
}
- if (ticket)
- ww_acquire_done(ticket);
- spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
-void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list, struct fence *fence)
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
{
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
@@ -194,18 +212,21 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
glob = bo->glob;
spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->fence_lock);
list_for_each_entry(entry, list, head) {
bo = entry->bo;
- if (entry->shared)
- reservation_object_add_shared_fence(bo->resv, fence);
- else
- reservation_object_add_excl_fence(bo->resv, fence);
- ttm_bo_add_to_lru(bo);
- __ttm_bo_unreserve(bo);
+ entry->old_sync_obj = bo->sync_obj;
+ bo->sync_obj = driver->sync_obj_ref(sync_obj);
+ ttm_bo_unreserve_locked(bo);
+ entry->reserved = false;
}
+ spin_unlock(&bdev->fence_lock);
spin_unlock(&glob->lru_lock);
- if (ticket)
- ww_acquire_fini(ticket);
+
+ list_for_each_entry(entry, list, head) {
+ if (entry->old_sync_obj)
+ driver->sync_obj_unref(&entry->old_sync_obj);
+ }
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/sys/dev/pci/drm/ttm/ttm_execbuf_util.h b/sys/dev/pci/drm/ttm/ttm_execbuf_util.h
index ca884e611dd..5c8fa203ff8 100644
--- a/sys/dev/pci/drm/ttm/ttm_execbuf_util.h
+++ b/sys/dev/pci/drm/ttm/ttm_execbuf_util.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_execbuf_util.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -38,36 +39,36 @@
*
* @head: list head for thread-private list.
* @bo: refcounted buffer object pointer.
- * @shared: should the fence be added shared?
+ * @reserved: Indicates whether @bo has been reserved for validation.
+ * @removed: Indicates whether @bo has been removed from lru lists.
+ * @put_count: Number of outstanding references on bo::list_kref.
+ * @old_sync_obj: Pointer to a sync object about to be unreferenced
*/
struct ttm_validate_buffer {
struct list_head head;
struct ttm_buffer_object *bo;
- bool shared;
+ bool reserved;
+ bool removed;
+ int put_count;
+ void *old_sync_obj;
};
/**
* function ttm_eu_backoff_reservation
*
- * @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs.
*
* Undoes all buffer validation reservations for bos pointed to by
* the list entries.
*/
-extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
- struct list_head *list);
+extern void ttm_eu_backoff_reservation(struct list_head *list);
/**
* function ttm_eu_reserve_buffers
*
- * @ticket: [out] ww_acquire_ctx filled in by call, or NULL if only
- * non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs.
- * @intr: should the wait be interruptible
- * @dups: [out] optional list of duplicates.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@@ -79,14 +80,9 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* CPU write reservations to be cleared, and for other threads to
* unreserve their buffers.
*
- * If intr is set to true, this function may return -ERESTARTSYS if the
- * calling process receives a signal while waiting. In that case, no
- * buffers on the list will be reserved upon return.
- *
- * If dups is non NULL all buffers already reserved by the current thread
- * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
- * on the first already reserved buffer and all buffers from the list are
- * unreserved again.
+ * This function may return -ERESTART or -EAGAIN if the calling process
+ * receives a signal while waiting. In that case, no buffers on the list
+ * will be reserved upon return.
*
* Buffers reserved by this function should be unreserved by
* a call to either ttm_eu_backoff_reservation() or
@@ -94,16 +90,13 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* has failed.
*/
-extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list, bool intr,
- struct list_head *dups);
+extern int ttm_eu_reserve_buffers(struct list_head *list);
/**
* function ttm_eu_fence_buffer_objects.
*
- * @ticket: ww_acquire_ctx from reserve call
* @list: thread private list of ttm_validate_buffer structs.
- * @fence: The new exclusive fence for the buffers.
+ * @sync_obj: The new sync object for the buffers.
*
* This function should be called when command submission is complete, and
* it will add a new sync object to bos pointed to by entries on @list.
@@ -111,8 +104,6 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
*
*/
-extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
- struct list_head *list,
- struct fence *fence);
+extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
#endif
diff --git a/sys/dev/pci/drm/ttm/ttm_memory.c b/sys/dev/pci/drm/ttm/ttm_memory.c
index 5b4f74791fa..63fe874d192 100644
--- a/sys/dev/pci/drm/ttm/ttm_memory.c
+++ b/sys/dev/pci/drm/ttm/ttm_memory.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_memory.c,v 1.12 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -27,7 +28,7 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <dev/pci/drm/drm_linux.h>
+#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/ttm/ttm_memory.h>
#include <dev/pci/drm/ttm/ttm_module.h>
#include <dev/pci/drm/ttm/ttm_page_alloc.h>
@@ -143,7 +144,9 @@ static ssize_t ttm_mem_zone_store(struct kobject *kobj,
return size;
}
+#endif
+#ifdef notyet
static struct attribute *ttm_mem_zone_attrs[] = {
&ttm_mem_sys,
&ttm_mem_emer,
@@ -191,7 +194,7 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
if (from_wq)
target = zone->swap_limit;
- else if (capable(CAP_SYS_ADMIN))
+ else if (DRM_SUSER(curproc))
target = zone->emer_mem;
else
target = zone->max_mem;
@@ -230,15 +233,13 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
goto out;
}
out:
+ glob->task_queued = false;
spin_unlock(&glob->lock);
}
-
-
-static void ttm_shrink_work(struct work_struct *work)
+static void ttm_shrink_work(void *arg1)
{
- struct ttm_mem_global *glob =
- container_of(work, struct ttm_mem_global, work);
+ struct ttm_mem_global *glob = arg1;
ttm_shrink(glob, true, 0ULL);
}
@@ -293,8 +294,7 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
zone->glob = glob;
glob->zone_highmem = zone;
ret = kobject_init_and_add(
- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
- zone->name);
+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
@@ -351,12 +351,16 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
{
uint64_t mem;
int ret;
+#ifdef DRMDEBUG
int i;
struct ttm_mem_zone *zone;
+#endif
mtx_init(&glob->lock, IPL_TTY);
- glob->swap_queue = create_singlethread_workqueue("ttm_swap");
- INIT_WORK(&glob->work, ttm_shrink_work);
+ glob->swap_queue = taskq_create("ttm_swap", 1, IPL_TTY, 0);
+ glob->task_queued = false;
+ task_set(&glob->task, ttm_shrink_work, glob);
+
ret = kobject_init_and_add(
&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
if (unlikely(ret != 0)) {
@@ -378,11 +382,13 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
if (unlikely(ret != 0))
goto out_no_zone;
#endif
+#ifdef DRMDEBUG
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
zone->name, (unsigned long long)zone->max_mem >> 10);
}
+#endif
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
@@ -401,14 +407,13 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
ttm_page_alloc_fini();
ttm_dma_page_alloc_fini();
- flush_workqueue(glob->swap_queue);
- destroy_workqueue(glob->swap_queue);
+ taskq_destroy(glob->swap_queue);
glob->swap_queue = NULL;
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
kobject_del(&zone->kobj);
kobject_put(&zone->kobj);
- }
+ }
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
@@ -429,11 +434,14 @@ static void ttm_check_swapping(struct ttm_mem_global *glob)
}
}
+ if (glob->task_queued)
+ needs_swapping = false;
+ else
+ glob->task_queued = true;
spin_unlock(&glob->lock);
if (unlikely(needs_swapping))
- (void)queue_work(glob->swap_queue, &glob->work);
-
+ task_add(glob->swap_queue, &glob->task);
}
static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
@@ -475,7 +483,7 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
if (single_zone && zone != single_zone)
continue;
- limit = (capable(CAP_SYS_ADMIN)) ?
+ limit = (DRM_SUSER(curproc)) ?
zone->emer_mem : zone->max_mem;
if (zone->used_mem > limit)
diff --git a/sys/dev/pci/drm/ttm/ttm_memory.h b/sys/dev/pci/drm/ttm/ttm_memory.h
index 402a283f032..e686c6338a0 100644
--- a/sys/dev/pci/drm/ttm/ttm_memory.h
+++ b/sys/dev/pci/drm/ttm/ttm_memory.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_memory.h,v 1.7 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -71,8 +72,9 @@ struct ttm_mem_zone;
struct ttm_mem_global {
struct kobject kobj;
struct ttm_mem_shrink *shrink;
- struct workqueue_struct *swap_queue;
- struct work_struct work;
+ struct taskq *swap_queue;
+ struct task task;
+ bool task_queued;
spinlock_t lock;
struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
unsigned int num_zones;
diff --git a/sys/dev/pci/drm/ttm/ttm_module.h b/sys/dev/pci/drm/ttm/ttm_module.h
index 7281ee748cf..59f8bfb6f99 100644
--- a/sys/dev/pci/drm/ttm/ttm_module.h
+++ b/sys/dev/pci/drm/ttm/ttm_module.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_module.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
diff --git a/sys/dev/pci/drm/ttm/ttm_page_alloc.c b/sys/dev/pci/drm/ttm/ttm_page_alloc.c
index 31d3bdb5747..eb32c3e5873 100644
--- a/sys/dev/pci/drm/ttm/ttm_page_alloc.c
+++ b/sys/dev/pci/drm/ttm/ttm_page_alloc.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_page_alloc.c,v 1.13 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright (c) Red Hat Inc.
@@ -33,15 +34,16 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <dev/pci/drm/drm_linux.h>
-
+#include <dev/pci/drm/drmP.h>
#include <dev/pci/drm/ttm/ttm_bo_driver.h>
#include <dev/pci/drm/ttm/ttm_page_alloc.h>
#ifdef TTM_HAS_AGP
-#include <asm/agp.h>
+#include <dev/pci/agpvar.h>
#endif
+#include <uvm/uvm_extern.h>
+
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct vm_page *))
#define SMALL_ALLOCATION 16
#define FREE_ALL_PAGES (~0U)
@@ -63,7 +65,7 @@ struct ttm_page_pool {
spinlock_t lock;
bool fill_lock;
struct pglist list;
- gfp_t gfp_flags;
+ int ttm_page_alloc_flags;
unsigned npages;
char *name;
unsigned long nfrees;
@@ -137,6 +139,36 @@ static struct attribute *ttm_pool_attrs[] = {
};
#endif
+struct vm_page *ttm_uvm_alloc_page(void);
+void ttm_uvm_free_page(struct vm_page *);
+
+struct vm_page *
+ttm_uvm_alloc_page(void)
+{
+ struct pglist mlist;
+ int error;
+
+ TAILQ_INIT(&mlist);
+ error = uvm_pglistalloc(PAGE_SIZE, dma_constraint.ucr_low,
+ dma_constraint.ucr_high, 0, 0, &mlist,
+ 1, UVM_PLA_WAITOK | UVM_PLA_ZERO);
+ if (error)
+ return NULL;
+
+ return TAILQ_FIRST(&mlist);
+}
+
+void
+ttm_uvm_free_page(struct vm_page *m)
+{
+#ifdef notyet
+ KASSERT(m->uobject == NULL);
+ KASSERT(m->wire_count == 1);
+ KASSERT((m->pg_flags & PG_FAKE) != 0);
+#endif
+ uvm_pagefree(m);
+}
+
static void ttm_pool_kobj_release(struct kobject *kobj)
{
struct ttm_pool_manager *m =
@@ -202,7 +234,7 @@ static const struct sysfs_ops ttm_pool_sysfs_ops = {
.show = &ttm_pool_show,
.store = &ttm_pool_store,
};
-#endif
+#endif // notyet
static struct kobj_type ttm_pool_kobj_type = {
.release = &ttm_pool_kobj_release,
@@ -212,16 +244,23 @@ static struct kobj_type ttm_pool_kobj_type = {
#endif
};
+#ifndef PG_PMAP_WC
+#define PG_PMAP_WC PG_PMAP_UC
+#endif
+
static struct ttm_pool_manager *_manager;
-#ifndef CONFIG_X86
static int set_pages_array_wb(struct vm_page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
+#if defined(__amd64__) || defined(__i386__) || defined(__powerpc__)
int i;
for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
+ atomic_clearbits_int(&pages[i]->pg_flags, PG_PMAP_WC);
+#else
+ return -ENOSYS;
+#endif
#endif
return 0;
}
@@ -229,10 +268,14 @@ static int set_pages_array_wb(struct vm_page **pages, int addrinarray)
static int set_pages_array_wc(struct vm_page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
+#if defined(__amd64__) || defined(__i386__) || defined(__powerpc__)
int i;
for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
+ atomic_setbits_int(&pages[i]->pg_flags, PG_PMAP_WC);
+#else
+ return -ENOSYS;
+#endif
#endif
return 0;
}
@@ -240,14 +283,17 @@ static int set_pages_array_wc(struct vm_page **pages, int addrinarray)
static int set_pages_array_uc(struct vm_page **pages, int addrinarray)
{
#ifdef TTM_HAS_AGP
+ printf("%s stub\n", __func__);
+ return -ENOSYS;
+#ifdef notyet
int i;
for (i = 0; i < addrinarray; i++)
map_page_into_agp(pages[i]);
#endif
+#endif
return 0;
}
-#endif
/**
* Select the right pool or requested caching state and ttm flags. */
@@ -277,7 +323,7 @@ static void ttm_pages_put(struct vm_page *pages[], unsigned npages)
if (set_pages_array_wb(pages, npages))
pr_err("Failed to set %d pages to wb!\n", npages);
for (i = 0; i < npages; ++i)
- __free_page(pages[i]);
+ ttm_uvm_free_page(pages[i]);
}
static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
@@ -295,12 +341,9 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
*
* @pool: to free the pages from
* @free_all: If set to true will free all pages in pool
- * @use_static: Safe to use static buffer
**/
-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
- bool use_static)
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
{
- static struct vm_page *static_buf[NUM_PAGES_TO_ALLOC];
unsigned long irq_flags;
struct vm_page *p, *p1;
struct vm_page **pages_to_free;
@@ -311,11 +354,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
- if (use_static)
- pages_to_free = static_buf;
- else
- pages_to_free = kmalloc(npages_to_free * sizeof(struct vm_page *),
- GFP_KERNEL);
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct vm_page *),
+ GFP_KERNEL);
if (!pages_to_free) {
pr_err("Failed to allocate memory for pool free operation\n");
return 0;
@@ -380,66 +420,54 @@ restart:
if (freed_pages)
ttm_pages_put(pages_to_free, freed_pages);
out:
- if (pages_to_free != static_buf)
- kfree(pages_to_free);
+ kfree(pages_to_free);
return nr_free;
}
+#ifdef notyet
+/* Get good estimation how many pages are free in pools */
+static int ttm_pool_get_num_unused_pages(void)
+{
+ unsigned i;
+ int total = 0;
+ for (i = 0; i < NUM_POOLS; ++i)
+ total += _manager->pools[i].npages;
+
+ return total;
+}
+#endif
+
/**
* Callback for mm to request pool to reduce number of page held.
- *
- * XXX: (dchinner) Deadlock warning!
- *
- * This code is crying out for a shrinker per pool....
*/
#ifdef notyet
-static unsigned long
-ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+static int ttm_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
{
- static DEFINE_MUTEX(lock);
- static unsigned start_pool;
+ static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
- unsigned pool_offset;
+ unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
- unsigned long freed = 0;
- if (!mutex_trylock(&lock))
- return SHRINK_STOP;
- pool_offset = ++start_pool % NUM_POOLS;
+ pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
if (shrink_pages == 0)
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
- /* OK to use static buffer since global mutex is held. */
- shrink_pages = ttm_page_pool_free(pool, nr_free, true);
- freed += nr_free - shrink_pages;
+ shrink_pages = ttm_page_pool_free(pool, nr_free);
}
- mutex_unlock(&lock);
- return freed;
-}
-
-
-static unsigned long
-ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
-{
- unsigned i;
- unsigned long count = 0;
-
- for (i = 0; i < NUM_POOLS; ++i)
- count += _manager->pools[i].npages;
-
- return count;
+ /* return estimated number of unused pages in pool */
+ return ttm_pool_get_num_unused_pages();
}
#endif
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
#ifdef notyet
- manager->mm_shrink.count_objects = ttm_pool_shrink_count;
- manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
+ manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
#endif
@@ -447,7 +475,7 @@ static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
{
- STUB();
+ printf("%s stub\n", __func__);
#ifdef notyet
unregister_shrinker(&manager->mm_shrink);
#endif
@@ -488,7 +516,7 @@ static void ttm_handle_caching_state_failure(struct pglist *pages,
/* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
TAILQ_REMOVE(pages, failed_pages[i], pageq);
- __free_page(failed_pages[i]);
+ ttm_uvm_free_page(failed_pages[i]);
}
}
@@ -498,7 +526,7 @@ static void ttm_handle_caching_state_failure(struct pglist *pages,
* This function is reentrant if caller updates count depending on number of
* pages returned in pages array.
*/
-static int ttm_alloc_new_pages(struct pglist *pages, gfp_t gfp_flags,
+static int ttm_alloc_new_pages(struct pglist *pages, int gfp_flags,
int ttm_flags, enum ttm_caching_state cstate, unsigned count)
{
struct vm_page **caching_array;
@@ -517,7 +545,7 @@ static int ttm_alloc_new_pages(struct pglist *pages, gfp_t gfp_flags,
}
for (i = 0, cpages = 0; i < count; ++i) {
- p = alloc_page(gfp_flags);
+ p = ttm_uvm_alloc_page();
if (!p) {
pr_err("Unable to get page %u\n", i);
@@ -609,8 +637,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
spin_unlock_irqrestore(&pool->lock, *irq_flags);
TAILQ_INIT(&new_pages);
- r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
- cstate, alloc_size);
+ r = ttm_alloc_new_pages(&new_pages, pool->ttm_page_alloc_flags,
+ ttm_flags, cstate, alloc_size);
spin_lock_irqsave(&pool->lock, *irq_flags);
if (!r) {
@@ -656,31 +684,11 @@ static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
pool->npages = 0;
goto out;
}
-#ifdef __linux__
- /* find the last pages to include for requested number of pages. Split
- * pool to begin and halve it to reduce search space. */
- if (count <= pool->npages/2) {
- i = 0;
- list_for_each(p, &pool->list) {
- if (++i == count)
- break;
- }
- } else {
- i = pool->npages + 1;
- list_for_each_prev(p, &pool->list) {
- if (--i == count)
- break;
- }
- }
- /* Cut 'count' number of pages from the pool */
- list_cut_position(pages, &pool->list, p);
-#else
for (i = 0; i < count; i++) {
p = TAILQ_FIRST(&pool->list);
TAILQ_REMOVE(&pool->list, p, pageq);
TAILQ_INSERT_TAIL(pages, p, pageq);
}
-#endif
pool->npages -= count;
count = 0;
out:
@@ -700,11 +708,7 @@ static void ttm_put_pages(struct vm_page **pages, unsigned npages, int flags,
/* No pool for this memory type so free the pages */
for (i = 0; i < npages; i++) {
if (pages[i]) {
-#ifdef notyet
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
-#endif
- __free_page(pages[i]);
+ ttm_uvm_free_page(pages[i]);
pages[i] = NULL;
}
}
@@ -714,10 +718,6 @@ static void ttm_put_pages(struct vm_page **pages, unsigned npages, int flags,
spin_lock_irqsave(&pool->lock, irq_flags);
for (i = 0; i < npages; i++) {
if (pages[i]) {
-#ifdef notyet
- if (page_count(pages[i]) != 1)
- pr_err("Erroneous page count. Leaking pages.\n");
-#endif
TAILQ_INSERT_TAIL(&pool->list, pages[i], pageq);
pages[i] = NULL;
pool->npages++;
@@ -734,7 +734,7 @@ static void ttm_put_pages(struct vm_page **pages, unsigned npages, int flags,
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (npages)
- ttm_page_pool_free(pool, npages, false);
+ ttm_page_pool_free(pool, npages);
}
/*
@@ -747,23 +747,28 @@ static int ttm_get_pages(struct vm_page **pages, unsigned npages, int flags,
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct pglist plist;
struct vm_page *p = NULL;
- gfp_t gfp_flags = GFP_USER;
+ const struct kmem_pa_mode *kp;
+ int gfp_flags = 0;
unsigned count;
int r;
- /* set zero flag for page allocation if required */
- if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
/* No pool for cached pages */
if (pool == NULL) {
- if (flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags |= GFP_DMA32;
- else
- gfp_flags |= GFP_HIGHUSER;
+
+ if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ kp = &kp_dma_zero;
+ else
+ kp = &kp_zero;
+ } else if (flags & TTM_PAGE_FLAG_DMA32) {
+ kp = &kp_dma;
+ } else {
+ kp = &kp_dirty;
+ }
for (r = 0; r < npages; ++r) {
- p = alloc_page(gfp_flags);
+// p = km_alloc(PAGE_SIZE, &kv_any, kp, &kd_waitok);
+ p = ttm_uvm_alloc_page();
if (!p) {
pr_err("Unable to allocate page\n");
@@ -776,7 +781,7 @@ static int ttm_get_pages(struct vm_page **pages, unsigned npages, int flags,
}
/* combine zero flag to pool flags */
- gfp_flags |= pool->gfp_flags;
+ gfp_flags |= pool->ttm_page_alloc_flags;
/* First we take pages from the pool */
TAILQ_INIT(&plist);
@@ -788,18 +793,9 @@ static int ttm_get_pages(struct vm_page **pages, unsigned npages, int flags,
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
-#ifdef __linux
- list_for_each_entry(p, &plist, lru) {
- if (PageHighMem(p))
- clear_highpage(p);
- else
- clear_page(page_address(p));
- }
-#else
TAILQ_FOREACH(p, &plist, pageq) {
pmap_zero_page(p);
}
-#endif
}
/* If pool didn't have enough pages allocate new one. */
@@ -824,14 +820,14 @@ static int ttm_get_pages(struct vm_page **pages, unsigned npages, int flags,
return 0;
}
-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
char *name)
{
mtx_init(&pool->lock, IPL_TTY);
pool->fill_lock = false;
TAILQ_INIT(&pool->list);
pool->npages = pool->nfrees = 0;
- pool->gfp_flags = flags;
+ pool->ttm_page_alloc_flags = flags;
pool->name = name;
}
@@ -844,18 +840,16 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
pr_info("Initializing pool allocator\n");
_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
- if (!_manager)
- return -ENOMEM;
- ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
+ ttm_page_pool_init_locked(&_manager->wc_pool, 0, "wc");
- ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
+ ttm_page_pool_init_locked(&_manager->uc_pool, 0, "uc");
ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
- GFP_USER | GFP_DMA32, "wc dma");
+ TTM_PAGE_FLAG_DMA32, "wc dma");
ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
- GFP_USER | GFP_DMA32, "uc dma");
+ TTM_PAGE_FLAG_DMA32, "uc dma");
_manager->options.max_size = max_pages;
_manager->options.small = SMALL_ALLOCATION;
@@ -881,9 +875,8 @@ void ttm_page_alloc_fini(void)
pr_info("Finalizing pool allocator\n");
ttm_pool_mm_shrink_fini(_manager);
- /* OK to use static buffer since global mutex is no longer used. */
for (i = 0; i < NUM_POOLS; ++i)
- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
kobject_put(&_manager->kobj);
_manager = NULL;
@@ -945,6 +938,7 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
+#ifdef notyet
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;
@@ -965,4 +959,5 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
}
return 0;
}
+#endif
EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/sys/dev/pci/drm/ttm/ttm_page_alloc.h b/sys/dev/pci/drm/ttm/ttm_page_alloc.h
index b28dace1d2f..96103815246 100644
--- a/sys/dev/pci/drm/ttm/ttm_page_alloc.h
+++ b/sys/dev/pci/drm/ttm/ttm_page_alloc.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_page_alloc.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/*
* Copyright (c) Red Hat Inc.
@@ -29,8 +30,6 @@
#include <dev/pci/drm/ttm/ttm_bo_driver.h>
#include <dev/pci/drm/ttm/ttm_memory.h>
-struct device;
-
/**
* Initialize pool allocator.
*/
@@ -61,10 +60,12 @@ extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
/**
* Output the state of pools to debugfs file
*/
+#ifdef notyet
extern int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
+#endif
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
+#ifdef CONFIG_SWIOTLB
/**
* Initialize pool allocator.
*/
@@ -92,19 +93,12 @@ static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
static inline void ttm_dma_page_alloc_fini(void) { return; }
+#ifdef notyet
static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{
return 0;
}
-static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
- struct device *dev)
-{
- return -ENOMEM;
-}
-static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
- struct device *dev)
-{
-}
+#endif
#endif
#endif
diff --git a/sys/dev/pci/drm/ttm/ttm_placement.h b/sys/dev/pci/drm/ttm/ttm_placement.h
index 8ed44f9bbdf..2e708956bf7 100644
--- a/sys/dev/pci/drm/ttm/ttm_placement.h
+++ b/sys/dev/pci/drm/ttm/ttm_placement.h
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_placement.h,v 1.3 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -65,8 +66,6 @@
* reference the buffer.
* TTM_PL_FLAG_NO_EVICT means that the buffer may never
* be evicted to make room for other buffers.
- * TTM_PL_FLAG_TOPDOWN requests to be placed from the
- * top of the memory area, instead of the bottom.
*/
#define TTM_PL_FLAG_CACHED (1 << 16)
@@ -74,7 +73,6 @@
#define TTM_PL_FLAG_WC (1 << 18)
#define TTM_PL_FLAG_SHARED (1 << 20)
#define TTM_PL_FLAG_NO_EVICT (1 << 21)
-#define TTM_PL_FLAG_TOPDOWN (1 << 22)
#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
TTM_PL_FLAG_UNCACHED | \
diff --git a/sys/dev/pci/drm/ttm/ttm_tt.c b/sys/dev/pci/drm/ttm/ttm_tt.c
index a9b74d8a47a..cf9f5374c66 100644
--- a/sys/dev/pci/drm/ttm/ttm_tt.c
+++ b/sys/dev/pci/drm/ttm/ttm_tt.c
@@ -1,3 +1,4 @@
+/* $OpenBSD: ttm_tt.c,v 1.6 2018/04/20 16:09:37 deraadt Exp $ */
/**************************************************************************
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
@@ -48,12 +49,9 @@ static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
- ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
- sizeof(*ttm->ttm.pages) +
- sizeof(*ttm->dma_address) +
- sizeof(*ttm->cpu_address));
- ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
- ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
+ ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
+ ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
+ sizeof(*ttm->dma_address));
}
#ifdef CONFIG_X86
@@ -113,7 +111,11 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
}
if (ttm->caching_state == tt_cached)
+#ifdef notyet
drm_clflush_pages(ttm->pages, ttm->num_pages);
+#else
+ printf("%s partial stub\n", __func__);
+#endif
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i];
@@ -166,8 +168,9 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
ttm_tt_unbind(ttm);
}
- if (ttm->state == tt_unbound)
- ttm_tt_unpopulate(ttm);
+ if (ttm->state == tt_unbound) {
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
+ }
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
@@ -224,7 +227,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&ttm_dma->pages_list);
ttm_dma_tt_alloc_page_directory(ttm_dma);
- if (!ttm->pages) {
+ if (!ttm->pages || !ttm_dma->dma_address) {
ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
@@ -239,7 +242,7 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
drm_free_large(ttm->pages);
ttm->pages = NULL;
- ttm_dma->cpu_address = NULL;
+ drm_free_large(ttm_dma->dma_address);
ttm_dma->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
@@ -312,7 +315,6 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
ttm->swap_storage = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
-
return 0;
out_err:
return ret;
@@ -373,28 +375,3 @@ out_err:
return ret;
}
-
-static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
-{
- int i;
- struct vm_page *page;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- page = ttm->pages[i];
- if (unlikely(page == NULL))
- continue;
- pmap_page_protect(page, PROT_NONE);
- }
-}
-
-void ttm_tt_unpopulate(struct ttm_tt *ttm)
-{
- if (ttm->state == tt_unpopulated)
- return;
-
- ttm_tt_clear_mapping(ttm);
- ttm->bdev->driver->ttm_tt_unpopulate(ttm);
-}
diff --git a/sys/dev/rasops/rasops.c b/sys/dev/rasops/rasops.c
index 1b601908a84..138db2ebc82 100644
--- a/sys/dev/rasops/rasops.c
+++ b/sys/dev/rasops/rasops.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rasops.c,v 1.51 2018/04/20 16:06:07 deraadt Exp $ */
+/* $OpenBSD: rasops.c,v 1.52 2018/04/20 16:09:37 deraadt Exp $ */
/* $NetBSD: rasops.c,v 1.35 2001/02/02 06:01:01 marcus Exp $ */
/*-
@@ -1373,13 +1373,8 @@ struct rasops_screen {
int rs_visible;
int rs_crow;
int rs_ccol;
-
- int rs_dispoffset; /* rs_bs index, start of our actual screen */
- int rs_visibleoffset; /* rs_bs index, current scrollback screen */
};
-#define RS_SCROLLBACK_SCREENS 5
-
int
rasops_alloc_screen(void *v, void **cookiep,
int *curxp, int *curyp, long *attrp)
@@ -1392,15 +1387,13 @@ rasops_alloc_screen(void *v, void **cookiep,
if (scr == NULL)
return (ENOMEM);
- scr->rs_bs = mallocarray(ri->ri_rows * RS_SCROLLBACK_SCREENS,
+ scr->rs_bs = mallocarray(ri->ri_rows,
ri->ri_cols * sizeof(struct wsdisplay_charcell), M_DEVBUF,
M_NOWAIT);
if (scr->rs_bs == NULL) {
free(scr, M_DEVBUF, sizeof(*scr));
return (ENOMEM);
}
- scr->rs_visibleoffset = scr->rs_dispoffset = ri->ri_rows *
- (RS_SCROLLBACK_SCREENS - 1) * ri->ri_cols;
*cookiep = scr;
*curxp = 0;
@@ -1412,19 +1405,13 @@ rasops_alloc_screen(void *v, void **cookiep,
scr->rs_crow = -1;
scr->rs_ccol = -1;
- for (i = 0; i < scr->rs_dispoffset; i++) {
- scr->rs_bs[i].uc = ' ';
- scr->rs_bs[i].attr = *attrp;
- }
-
if (ri->ri_bs && scr->rs_visible) {
- memcpy(scr->rs_bs + scr->rs_dispoffset, ri->ri_bs,
- ri->ri_rows * ri->ri_cols *
+ memcpy(scr->rs_bs, ri->ri_bs, ri->ri_rows * ri->ri_cols *
sizeof(struct wsdisplay_charcell));
} else {
for (i = 0; i < ri->ri_rows * ri->ri_cols; i++) {
- scr->rs_bs[scr->rs_dispoffset + i].uc = ' ';
- scr->rs_bs[scr->rs_dispoffset + i].attr = *attrp;
+ scr->rs_bs[i].uc = ' ';
+ scr->rs_bs[i].attr = *attrp;
}
}
@@ -1444,8 +1431,7 @@ rasops_free_screen(void *v, void *cookie)
ri->ri_nscreens--;
free(scr->rs_bs, M_DEVBUF,
- ri->ri_rows * RS_SCROLLBACK_SCREENS * ri->ri_cols *
- sizeof(struct wsdisplay_charcell));
+ ri->ri_rows * ri->ri_cols * sizeof(struct wsdisplay_charcell));
free(scr, M_DEVBUF, sizeof(*scr));
}
@@ -1481,11 +1467,9 @@ rasops_doswitch(void *v)
ri->ri_eraserows(ri, 0, ri->ri_rows, attr);
ri->ri_active = scr;
ri->ri_active->rs_visible = 1;
- ri->ri_active->rs_visibleoffset = ri->ri_active->rs_dispoffset;
for (row = 0; row < ri->ri_rows; row++) {
for (col = 0; col < ri->ri_cols; col++) {
- int off = row * scr->rs_ri->ri_cols + col +
- scr->rs_visibleoffset;
+ int off = row * scr->rs_ri->ri_cols + col;
ri->ri_putchar(ri, row, col, scr->rs_bs[off].uc,
scr->rs_bs[off].attr);
@@ -1507,7 +1491,7 @@ rasops_getchar(void *v, int row, int col, struct wsdisplay_charcell *cell)
if (scr == NULL || scr->rs_bs == NULL)
return (1);
- *cell = scr->rs_bs[row * ri->ri_cols + col + scr->rs_dispoffset];
+ *cell = scr->rs_bs[row * ri->ri_cols + col];
return (0);
}
@@ -1537,10 +1521,7 @@ int
rasops_vcons_putchar(void *cookie, int row, int col, u_int uc, long attr)
{
struct rasops_screen *scr = cookie;
- int off = row * scr->rs_ri->ri_cols + col + scr->rs_dispoffset;
-
- if (scr->rs_visible && scr->rs_visibleoffset != scr->rs_dispoffset)
- rasops_scrollback(scr->rs_ri, scr, 0);
+ int off = row * scr->rs_ri->ri_cols + col;
scr->rs_bs[off].uc = uc;
scr->rs_bs[off].attr = attr;
@@ -1559,8 +1540,7 @@ rasops_vcons_copycols(void *cookie, int row, int src, int dst, int num)
int cols = scr->rs_ri->ri_cols;
int col, rc;
- memmove(&scr->rs_bs[row * cols + dst + scr->rs_dispoffset],
- &scr->rs_bs[row * cols + src + scr->rs_dispoffset],
+ memmove(&scr->rs_bs[row * cols + dst], &scr->rs_bs[row * cols + src],
num * sizeof(struct wsdisplay_charcell));
if (!scr->rs_visible)
@@ -1570,7 +1550,7 @@ rasops_vcons_copycols(void *cookie, int row, int src, int dst, int num)
return ri->ri_copycols(ri, row, src, dst, num);
for (col = dst; col < dst + num; col++) {
- int off = row * cols + col + scr->rs_dispoffset;
+ int off = row * cols + col;
rc = ri->ri_putchar(ri, row, col,
scr->rs_bs[off].uc, scr->rs_bs[off].attr);
@@ -1589,7 +1569,7 @@ rasops_vcons_erasecols(void *cookie, int row, int col, int num, long attr)
int i;
for (i = 0; i < num; i++) {
- int off = row * cols + col + i + scr->rs_dispoffset;
+ int off = row * cols + col + i;
scr->rs_bs[off].uc = ' ';
scr->rs_bs[off].attr = attr;
@@ -1609,15 +1589,8 @@ rasops_vcons_copyrows(void *cookie, int src, int dst, int num)
int cols = ri->ri_cols;
int row, col, rc;
- if (dst == 0 && (src + num == ri->ri_rows))
- memmove(&scr->rs_bs[dst],
- &scr->rs_bs[src * cols],
- ((ri->ri_rows * RS_SCROLLBACK_SCREENS * cols) -
- (src * cols)) * sizeof(struct wsdisplay_charcell));
- else
- memmove(&scr->rs_bs[dst * cols + scr->rs_dispoffset],
- &scr->rs_bs[src * cols + scr->rs_dispoffset],
- num * cols * sizeof(struct wsdisplay_charcell));
+ memmove(&scr->rs_bs[dst * cols], &scr->rs_bs[src * cols],
+ num * cols * sizeof(struct wsdisplay_charcell));
if (!scr->rs_visible)
return 0;
@@ -1627,7 +1600,7 @@ rasops_vcons_copyrows(void *cookie, int src, int dst, int num)
for (row = dst; row < dst + num; row++) {
for (col = 0; col < cols; col++) {
- int off = row * cols + col + scr->rs_dispoffset;
+ int off = row * cols + col;
rc = ri->ri_putchar(ri, row, col,
scr->rs_bs[off].uc, scr->rs_bs[off].attr);
@@ -1647,7 +1620,7 @@ rasops_vcons_eraserows(void *cookie, int row, int num, long attr)
int i;
for (i = 0; i < num * cols; i++) {
- int off = row * cols + i + scr->rs_dispoffset;
+ int off = row * cols + i;
scr->rs_bs[off].uc = ' ';
scr->rs_bs[off].attr = attr;
@@ -1899,45 +1872,3 @@ rasops_list_font(void *v, struct wsdisplay_font *font)
font->cookie = font->data = NULL; /* don't leak kernel pointers */
return 0;
}
-
-void
-rasops_scrollback(void *v, void *cookie, int lines)
-{
- struct rasops_info *ri = v;
- struct rasops_screen *scr = cookie;
- int row, col, oldvoff;
- long attr;
-
- oldvoff = scr->rs_visibleoffset;
-
- if (lines == 0)
- scr->rs_visibleoffset = scr->rs_dispoffset;
- else {
- int off = scr->rs_visibleoffset + (lines * ri->ri_cols);
-
- if (off < 0)
- off = 0;
- else if (off > scr->rs_dispoffset)
- off = scr->rs_dispoffset;
-
- scr->rs_visibleoffset = off;
- }
-
- if (scr->rs_visibleoffset == oldvoff)
- return;
-
- rasops_cursor(ri, 0, 0, 0);
- ri->ri_eraserows(ri, 0, ri->ri_rows, attr);
- for (row = 0; row < ri->ri_rows; row++) {
- for (col = 0; col < ri->ri_cols; col++) {
- int off = row * scr->rs_ri->ri_cols + col +
- scr->rs_visibleoffset;
-
- ri->ri_putchar(ri, row, col, scr->rs_bs[off].uc,
- scr->rs_bs[off].attr);
- }
- }
-
- if (scr->rs_crow != -1 && scr->rs_visibleoffset == scr->rs_dispoffset)
- rasops_cursor(ri, 1, scr->rs_crow, scr->rs_ccol);
-}
diff --git a/sys/dev/rasops/rasops.h b/sys/dev/rasops/rasops.h
index eeb8b0ba7f4..015e46bfcd7 100644
--- a/sys/dev/rasops/rasops.h
+++ b/sys/dev/rasops/rasops.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: rasops.h,v 1.20 2018/04/20 16:06:07 deraadt Exp $ */
+/* $OpenBSD: rasops.h,v 1.21 2018/04/20 16:09:37 deraadt Exp $ */
/* $NetBSD: rasops.h,v 1.13 2000/06/13 13:36:54 ad Exp $ */
/*-
@@ -178,7 +178,6 @@ int rasops_show_screen(void *, void *, int,
int rasops_load_font(void *, void *, struct wsdisplay_font *);
int rasops_list_font(void *, struct wsdisplay_font *);
int rasops_getchar(void *, int, int, struct wsdisplay_charcell *);
-void rasops_scrollback(void *, void *, int);
extern const u_char rasops_isgray[16];
extern const u_char rasops_cmap[256*3];