// SPDX-License-Identifier: GPL-2.0-or-later /* * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring * * Copyright (c) 2009 Clemens Ladisch */ #include #include #include #include #include #include #include #include #include MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor"); MODULE_AUTHOR("Clemens Ladisch "); MODULE_LICENSE("GPL"); static bool force; module_param(force, bool, 0444); MODULE_PARM_DESC(force, "force loading on processors with erratum 319"); /* Provide lock for writing to NB_SMU_IND_ADDR */ static DEFINE_MUTEX(nb_smu_ind_mutex); #ifndef PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 #define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3 #endif /* CPUID function 0x80000001, ebx */ #define CPUID_PKGTYPE_MASK 0xf0000000 #define CPUID_PKGTYPE_F 0x00000000 #define CPUID_PKGTYPE_AM2R2_AM3 0x10000000 /* DRAM controller (PCI function 2) */ #define REG_DCT0_CONFIG_HIGH 0x094 #define DDR3_MODE 0x00000100 /* miscellaneous (PCI function 3) */ #define REG_HARDWARE_THERMAL_CONTROL 0x64 #define HTC_ENABLE 0x00000001 #define REG_REPORTED_TEMPERATURE 0xa4 #define REG_NORTHBRIDGE_CAPABILITIES 0xe8 #define NB_CAP_HTC 0x00000400 /* * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL * and REG_REPORTED_TEMPERATURE have been moved to * D0F0xBC_xD820_0C64 [Hardware Temperature Control] * D0F0xBC_xD820_0CA4 [Reported Temperature Control] */ #define F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET 0xd8200c64 #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 /* F17h M01h Access througn SMN */ #define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800 struct k10temp_data { struct pci_dev *pdev; void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); int temp_offset; u32 temp_adjust_mask; bool show_tdie; }; struct tctl_offset { u8 model; char const *id; int offset; }; static const struct tctl_offset tctl_offset_table[] = { { 0x17, "AMD Ryzen 5 1600X", 20000 }, { 0x17, "AMD Ryzen 7 1700X", 20000 }, { 0x17, "AMD Ryzen 7 1800X", 20000 }, { 0x17, "AMD Ryzen 7 2700X", 10000 }, { 0x17, "AMD Ryzen Threadripper 19", 27000 }, /* 19{00,20,50}X */ { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */ }; static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval) { pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval); } static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval) { pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval); } static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn, unsigned int base, int offset, u32 *val) { mutex_lock(&nb_smu_ind_mutex); pci_bus_write_config_dword(pdev->bus, devfn, base, offset); pci_bus_read_config_dword(pdev->bus, devfn, base + 4, val); mutex_unlock(&nb_smu_ind_mutex); } static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval) { amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, F15H_M60H_HARDWARE_TEMP_CTRL_OFFSET, regval); } static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) { amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval); } static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) { amd_smn_read(amd_pci_dev_to_node_id(pdev), F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); } static unsigned int get_raw_temp(struct k10temp_data *data) { unsigned int temp; u32 regval; data->read_tempreg(data->pdev, ®val); temp = (regval >> 21) * 125; if (regval & data->temp_adjust_mask) temp -= 49000; return temp; } static ssize_t temp1_input_show(struct device *dev, struct device_attribute *attr, char *buf) { struct k10temp_data *data = dev_get_drvdata(dev); unsigned int temp = get_raw_temp(data); if (temp > data->temp_offset) temp -= data->temp_offset; else temp = 0; return sprintf(buf, "%u\n", temp); } static ssize_t temp2_input_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct k10temp_data *data = dev_get_drvdata(dev); unsigned int temp = get_raw_temp(data); return sprintf(buf, "%u\n", temp); } static ssize_t temp_label_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie"); } static ssize_t temp1_max_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", 70 * 1000); } static ssize_t temp_crit_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct k10temp_data *data = dev_get_drvdata(dev); int show_hyst = attr->index; u32 regval; int value; data->read_htcreg(data->pdev, ®val); value = ((regval >> 16) & 0x7f) * 500 + 52000; if (show_hyst) value -= ((regval >> 24) & 0xf) * 500; return sprintf(buf, "%d\n", value); } static DEVICE_ATTR_RO(temp1_input); static DEVICE_ATTR_RO(temp1_max); static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0); static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1); static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0); static DEVICE_ATTR_RO(temp2_input); static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1); static umode_t k10temp_is_visible(struct kobject *kobj, struct attribute *attr, int index) { struct device *dev = container_of(kobj, struct device, kobj); struct k10temp_data *data = dev_get_drvdata(dev); struct pci_dev *pdev = data->pdev; u32 reg; switch (index) { case 0 ... 1: /* temp1_input, temp1_max */ default: break; case 2 ... 3: /* temp1_crit, temp1_crit_hyst */ if (!data->read_htcreg) return 0; pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, ®); if (!(reg & NB_CAP_HTC)) return 0; data->read_htcreg(data->pdev, ®); if (!(reg & HTC_ENABLE)) return 0; break; case 4 ... 6: /* temp1_label, temp2_input, temp2_label */ if (!data->show_tdie) return 0; break; } return attr->mode; } static struct attribute *k10temp_attrs[] = { &dev_attr_temp1_input.attr, &dev_attr_temp1_max.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, &sensor_dev_attr_temp1_label.dev_attr.attr, &dev_attr_temp2_input.attr, &sensor_dev_attr_temp2_label.dev_attr.attr, NULL }; static const struct attribute_group k10temp_group = { .attrs = k10temp_attrs, .is_visible = k10temp_is_visible, }; __ATTRIBUTE_GROUPS(k10temp); static bool has_erratum_319(struct pci_dev *pdev) { u32 pkg_type, reg_dram_cfg; if (boot_cpu_data.x86 != 0x10) return false; /* * Erratum 319: The thermal sensor of Socket F/AM2+ processors * may be unreliable. */ pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK; if (pkg_type == CPUID_PKGTYPE_F) return true; if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3) return false; /* DDR3 memory implies socket AM3, which is good */ pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 2), REG_DCT0_CONFIG_HIGH, ®_dram_cfg); if (reg_dram_cfg & DDR3_MODE) return false; /* * Unfortunately it is possible to run a socket AM3 CPU with DDR2 * memory. We blacklist all the cores which do exist in socket AM2+ * format. It still isn't perfect, as RB-C2 cores exist in both AM2+ * and AM3 formats, but that's the best we can do. */ return boot_cpu_data.x86_model < 4 || (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); } static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; struct k10temp_data *data; struct device *hwmon_dev; int i; if (unreliable) { if (!force) { dev_err(dev, "unreliable CPU thermal sensor; monitoring disabled\n"); return -ENODEV; } dev_warn(dev, "unreliable CPU thermal sensor; check erratum 319\n"); } data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->pdev = pdev; if (boot_cpu_data.x86 == 0x15 && ((boot_cpu_data.x86_model & 0xf0) == 0x60 || (boot_cpu_data.x86_model & 0xf0) == 0x70)) { data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { data->temp_adjust_mask = 0x80000; data->read_tempreg = read_tempreg_nb_f17; data->show_tdie = true; } else { data->read_htcreg = read_htcreg_pci; data->read_tempreg = read_tempreg_pci; } for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { const struct tctl_offset *entry = &tctl_offset_table[i]; if (boot_cpu_data.x86 == entry->model && strstr(boot_cpu_data.x86_model_id, entry->id)) { data->temp_offset = entry->offset; break; } } hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data, k10temp_groups); return PTR_ERR_OR_ZERO(hwmon_dev); } static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M70H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); static struct pci_driver k10temp_driver = { .name = "k10temp", .id_table = k10temp_id_table, .probe = k10temp_probe, }; module_pci_driver(k10temp_driver);