aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig56
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/amd64-agp.c26
-rw-r--r--drivers/char/agp/ati-agp.c8
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/frontend.c4
-rw-r--r--drivers/char/agp/intel-gtt.c100
-rw-r--r--drivers/char/agp/nvidia-agp.c3
-rw-r--r--drivers/char/agp/sis-agp.c25
-rw-r--r--drivers/char/agp/sworks-agp.c5
-rw-r--r--drivers/char/agp/via-agp.c28
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/char/applicom.c4
-rw-r--r--drivers/char/bsr.c2
-rw-r--r--drivers/char/hpet.c50
-rw-r--r--drivers/char/hw_random/Kconfig41
-rw-r--r--drivers/char/hw_random/Makefile3
-rw-r--r--drivers/char/hw_random/arm_smccc_trng.c4
-rw-r--r--drivers/char/hw_random/atmel-rng.c148
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c2
-rw-r--r--drivers/char/hw_random/cavium-rng-vf.c194
-rw-r--r--drivers/char/hw_random/cavium-rng.c11
-rw-r--r--drivers/char/hw_random/cn10k-rng.c184
-rw-r--r--drivers/char/hw_random/core.c175
-rw-r--r--drivers/char/hw_random/imx-rngc.c51
-rw-r--r--drivers/char/hw_random/iproc-rng200.c9
-rw-r--r--drivers/char/hw_random/mpfs-rng.c104
-rw-r--r--drivers/char/hw_random/nomadik-rng.c4
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c2
-rw-r--r--drivers/char/hw_random/optee-rng.c8
-rw-r--r--drivers/char/hw_random/powernv-rng.c2
-rw-r--r--drivers/char/hw_random/s390-trng.c11
-rw-r--r--drivers/char/hw_random/tx4939-rng.c157
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/char/hw_random/virtio-rng.c4
-rw-r--r--drivers/char/ipmi/Kconfig6
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c9
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c82
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c147
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c8
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c22
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c54
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c30
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c96
-rw-r--r--drivers/char/ipmi/kcs_bmc_cdev_ipmi.c4
-rw-r--r--drivers/char/ipmi/kcs_bmc_serio.c4
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/mem.c12
-rw-r--r--drivers/char/misc.c24
-rw-r--r--drivers/char/mwave/3780i.h2
-rw-r--r--drivers/char/pcmcia/synclink_cs.c13
-rw-r--r--drivers/char/random.c2978
-rw-r--r--drivers/char/tb0219.c359
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c9
-rw-r--r--drivers/char/tpm/st33zp24/spi.c9
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c125
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.h2
-rw-r--r--drivers/char/tpm/tpm-chip.c83
-rw-r--r--drivers/char/tpm/tpm-dev-common.c8
-rw-r--r--drivers/char/tpm/tpm-sysfs.c3
-rw-r--r--drivers/char/tpm/tpm.h3
-rw-r--r--drivers/char/tpm/tpm1-cmd.c7
-rw-r--r--drivers/char/tpm/tpm2-cmd.c27
-rw-r--r--drivers/char/tpm/tpm2-space.c73
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c2
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c4
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c3
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c1
-rw-r--r--drivers/char/tpm/tpm_ppi.c2
-rw-r--r--drivers/char/tpm/tpm_tis.c67
-rw-r--r--drivers/char/tpm/tpm_tis_core.c28
-rw-r--r--drivers/char/tpm/tpm_tis_core.h68
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c389
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c31
-rw-r--r--drivers/char/tpm/tpm_tis_spi.h4
-rw-r--r--drivers/char/tpm/tpm_tis_spi_cr50.c27
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c48
-rw-r--r--drivers/char/tpm/tpm_tis_synquacer.c98
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c2
-rw-r--r--drivers/char/tpm/xen-tpmfront.c26
-rw-r--r--drivers/char/ttyprintk.c16
-rw-r--r--drivers/char/virtio_console.c19
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.c2
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c6
-rw-r--r--drivers/char/xillybus/xillybus_class.c26
-rw-r--r--drivers/char/xillybus/xillyusb.c1
89 files changed, 3155 insertions, 3364 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 740811893c57..0f378d29dab0 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -18,7 +18,8 @@ config TTY_PRINTK
The feature is useful to inline user messages with kernel
messages.
In order to use this feature, you should output user messages
- to /dev/ttyprintk or redirect console to this TTY.
+ to /dev/ttyprintk or redirect console to this TTY, or boot
+ the kernel with console=ttyprintk.
If unsure, say N.
@@ -246,11 +247,6 @@ config SONYPI
To compile this driver as a module, choose M here: the
module will be called sonypi.
-config GPIO_TB0219
- tristate "TANBAC TB0219 GPIO support"
- depends on TANBAC_TB022X
- select GPIO_VR41XX
-
source "drivers/char/pcmcia/Kconfig"
config MWAVE
@@ -428,27 +424,39 @@ config ADI
driver include crash and makedumpfile.
config RANDOM_TRUST_CPU
- bool "Trust the CPU manufacturer to initialize Linux's CRNG"
- depends on ARCH_RANDOM
- default n
+ bool "Initialize RNG using CPU RNG instructions"
+ default y
help
- Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
- RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
- for the purposes of initializing Linux's CRNG. Since this is not
- something that can be independently audited, this amounts to trusting
- that CPU manufacturer (perhaps with the insistence or mandate
- of a Nation State's intelligence or law enforcement agencies)
- has not installed a hidden back door to compromise the CPU's
- random number generation facilities. This can also be configured
- at boot with "random.trust_cpu=on/off".
+ Initialize the RNG using random numbers supplied by the CPU's
+ RNG instructions (e.g. RDRAND), if supported and available. These
+ random numbers are never used directly, but are rather hashed into
+ the main input pool, and this happens regardless of whether or not
+ this option is enabled. Instead, this option controls whether the
+ they are credited and hence can initialize the RNG. Additionally,
+ other sources of randomness are always used, regardless of this
+ setting. Enabling this implies trusting that the CPU can supply high
+ quality and non-backdoored random numbers.
+
+ Say Y here unless you have reason to mistrust your CPU or believe
+ its RNG facilities may be faulty. This may also be configured at
+ boot time with "random.trust_cpu=on/off".
config RANDOM_TRUST_BOOTLOADER
- bool "Trust the bootloader to initialize Linux's CRNG"
+ bool "Initialize RNG using bootloader-supplied seed"
+ default y
help
- Some bootloaders can provide entropy to increase the kernel's initial
- device randomness. Say Y here to assume the entropy provided by the
- booloader is trustworthy so it will be added to the kernel's entropy
- pool. Otherwise, say N here so it will be regarded as device input that
- only mixes the entropy pool.
+ Initialize the RNG using a seed supplied by the bootloader or boot
+ environment (e.g. EFI or a bootloader-generated device tree). This
+ seed is not used directly, but is rather hashed into the main input
+ pool, and this happens regardless of whether or not this option is
+ enabled. Instead, this option controls whether the seed is credited
+ and hence can initialize the RNG. Additionally, other sources of
+ randomness are always used, regardless of this setting. Enabling
+ this implies trusting that the bootloader can supply high quality and
+ non-backdoored seeds.
+
+ Say Y here unless you have reason to mistrust your bootloader or
+ believe its RNG facilities may be faulty. This may also be configured
+ at boot time with "random.trust_bootloader=on/off".
endmenu
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 264eb398fdd4..1b35d1724565 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -31,7 +31,6 @@ obj-$(CONFIG_NWFLASH) += nwflash.o
obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
-obj-$(CONFIG_GPIO_TB0219) += tb0219.o
obj-$(CONFIG_TELCLOCK) += tlclk.o
obj-$(CONFIG_MWAVE) += mwave/
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index b40edae32817..84a4aa9312cf 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -327,7 +327,7 @@ static int cache_nbs(struct pci_dev *pdev, u32 cap_ptr)
{
int i;
- if (amd_cache_northbridges() < 0)
+ if (!amd_nb_num())
return -ENODEV;
if (!amd_nb_has_feature(AMD_NB_GART))
@@ -588,20 +588,11 @@ static void agp_amd64_remove(struct pci_dev *pdev)
agp_bridges_found--;
}
-#ifdef CONFIG_PM
+#define agp_amd64_suspend NULL
-static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused agp_amd64_resume(struct device *dev)
{
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int agp_amd64_resume(struct pci_dev *pdev)
-{
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
if (pdev->vendor == PCI_VENDOR_ID_NVIDIA)
nforce3_agp_init(pdev);
@@ -609,8 +600,6 @@ static int agp_amd64_resume(struct pci_dev *pdev)
return amd_8151_configure();
}
-#endif /* CONFIG_PM */
-
static const struct pci_device_id agp_amd64_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
@@ -738,15 +727,14 @@ static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
{ }
};
+static SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, agp_amd64_suspend, agp_amd64_resume);
+
static struct pci_driver agp_amd64_pci_driver = {
.name = "agpgart-amd64",
.id_table = agp_amd64_pci_table,
.probe = agp_amd64_probe,
.remove = agp_amd64_remove,
-#ifdef CONFIG_PM
- .suspend = agp_amd64_suspend,
- .resume = agp_amd64_resume,
-#endif
+ .driver.pm = &agp_amd64_pm_ops,
};
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 20bf5f78a362..6f5530482d83 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -55,7 +55,7 @@ static struct _ati_generic_private {
static int ati_create_page_map(struct ati_page_map *page_map)
{
- int i, err = 0;
+ int i, err;
page_map->real = (unsigned long *) __get_free_page(GFP_KERNEL);
if (page_map->real == NULL)
@@ -63,6 +63,10 @@ static int ati_create_page_map(struct ati_page_map *page_map)
set_memory_uc((unsigned long)page_map->real, 1);
err = map_page_into_agp(virt_to_page(page_map->real));
+ if (err) {
+ free_page((unsigned long)page_map->real);
+ return err;
+ }
page_map->remapped = page_map->real;
for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
@@ -303,7 +307,7 @@ static int ati_insert_memory(struct agp_memory * mem,
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
- writel(agp_bridge->driver->mask_memory(agp_bridge,
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
page_to_phys(mem->pages[i]),
mem->type),
cur_gatt+GET_GATT_OFF(addr));
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 004a3ce8ba72..0e19c600db53 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -62,6 +62,7 @@ EXPORT_SYMBOL(agp_find_bridge);
/**
* agp_backend_acquire - attempt to acquire an agp backend.
+ * @pdev: the PCI device
*
*/
struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev)
@@ -83,6 +84,7 @@ EXPORT_SYMBOL(agp_backend_acquire);
/**
* agp_backend_release - release the lock on the agp backend.
+ * @bridge: the AGP backend to release
*
* The caller must insure that the graphics aperture translation table
* is read for use by another entity.
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 00ff5fcb808a..321118a9cfa5 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -39,7 +39,9 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
+
#include "agp.h"
+#include "compat_ioctl.h"
struct agp_front_data agp_fe;
@@ -1017,7 +1019,7 @@ static long agp_ioctl(struct file *file,
case AGPIOC_UNBIND:
ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
break;
-
+
case AGPIOC_CHIPSET_FLUSH:
break;
}
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 5bfdf222d5f9..bf6716ff863b 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
+#include <linux/iommu.h>
#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
@@ -110,8 +111,8 @@ static int intel_gtt_map_memory(struct page **pages,
for_each_sg(st->sgl, sg, num_entries, i)
sg_set_page(sg, pages[i], PAGE_SIZE, 0);
- if (!pci_map_sg(intel_private.pcidev,
- st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
+ if (!dma_map_sg(&intel_private.pcidev->dev, st->sgl, st->nents,
+ DMA_BIDIRECTIONAL))
goto err;
return 0;
@@ -126,8 +127,8 @@ static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
struct sg_table st;
DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
- pci_unmap_sg(intel_private.pcidev, sg_list,
- num_sg, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_sg(&intel_private.pcidev->dev, sg_list, num_sg,
+ DMA_BIDIRECTIONAL);
st.sgl = sg_list;
st.orig_nents = st.nents = num_sg;
@@ -302,9 +303,9 @@ static int intel_gtt_setup_scratch_page(void)
set_pages_uc(page, 1);
if (intel_private.needs_dmar) {
- dma_addr = pci_map_page(intel_private.pcidev, page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) {
+ dma_addr = dma_map_page(&intel_private.pcidev->dev, page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&intel_private.pcidev->dev, dma_addr)) {
__free_page(page);
return -EINVAL;
}
@@ -551,9 +552,9 @@ static void intel_gtt_teardown_scratch_page(void)
{
set_pages_wb(intel_private.scratch_page, 1);
if (intel_private.needs_dmar)
- pci_unmap_page(intel_private.pcidev,
- intel_private.scratch_page_dma,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&intel_private.pcidev->dev,
+ intel_private.scratch_page_dma, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(intel_private.scratch_page);
}
@@ -572,18 +573,15 @@ static void intel_gtt_cleanup(void)
*/
static inline int needs_ilk_vtd_wa(void)
{
-#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
- /* Query intel_iommu to see if we need the workaround. Presumably that
- * was loaded first.
+ /*
+ * Query iommu subsystem to see if we need the workaround. Presumably
+ * that was loaded first.
*/
- if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
- gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
- intel_iommu_gfx_mapped)
- return 1;
-#endif
- return 0;
+ return ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ device_iommu_mapped(&intel_private.pcidev->dev));
}
static bool intel_gtt_can_wc(void)
@@ -743,7 +741,7 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
-bool intel_enable_gtt(void)
+bool intel_gmch_enable_gtt(void)
{
u8 __iomem *reg;
@@ -786,7 +784,7 @@ bool intel_enable_gtt(void)
return true;
}
-EXPORT_SYMBOL(intel_enable_gtt);
+EXPORT_SYMBOL(intel_gmch_enable_gtt);
static int i830_setup(void)
{
@@ -820,8 +818,8 @@ static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
static int intel_fake_agp_configure(void)
{
- if (!intel_enable_gtt())
- return -EIO;
+ if (!intel_gmch_enable_gtt())
+ return -EIO;
intel_private.clear_fake_agp = true;
agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
@@ -843,20 +841,20 @@ static bool i830_check_flags(unsigned int flags)
return false;
}
-void intel_gtt_insert_page(dma_addr_t addr,
- unsigned int pg,
- unsigned int flags)
+void intel_gmch_gtt_insert_page(dma_addr_t addr,
+ unsigned int pg,
+ unsigned int flags)
{
intel_private.driver->write_entry(addr, pg, flags);
readl(intel_private.gtt + pg);
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
-EXPORT_SYMBOL(intel_gtt_insert_page);
+EXPORT_SYMBOL(intel_gmch_gtt_insert_page);
-void intel_gtt_insert_sg_entries(struct sg_table *st,
- unsigned int pg_start,
- unsigned int flags)
+void intel_gmch_gtt_insert_sg_entries(struct sg_table *st,
+ unsigned int pg_start,
+ unsigned int flags)
{
struct scatterlist *sg;
unsigned int len, m;
@@ -878,13 +876,13 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
-EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+EXPORT_SYMBOL(intel_gmch_gtt_insert_sg_entries);
#if IS_ENABLED(CONFIG_AGP_INTEL)
-static void intel_gtt_insert_pages(unsigned int first_entry,
- unsigned int num_entries,
- struct page **pages,
- unsigned int flags)
+static void intel_gmch_gtt_insert_pages(unsigned int first_entry,
+ unsigned int num_entries,
+ struct page **pages,
+ unsigned int flags)
{
int i, j;
@@ -904,7 +902,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (intel_private.clear_fake_agp) {
int start = intel_private.stolen_size / PAGE_SIZE;
int end = intel_private.gtt_mappable_entries;
- intel_gtt_clear_range(start, end - start);
+ intel_gmch_gtt_clear_range(start, end - start);
intel_private.clear_fake_agp = false;
}
@@ -933,12 +931,12 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (ret != 0)
return ret;
- intel_gtt_insert_sg_entries(&st, pg_start, type);
+ intel_gmch_gtt_insert_sg_entries(&st, pg_start, type);
mem->sg_list = st.sgl;
mem->num_sg = st.nents;
} else
- intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
- type);
+ intel_gmch_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
+ type);
out:
ret = 0;
@@ -948,7 +946,7 @@ out_err:
}
#endif
-void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
+void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
{
unsigned int i;
@@ -958,7 +956,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
}
wmb();
}
-EXPORT_SYMBOL(intel_gtt_clear_range);
+EXPORT_SYMBOL(intel_gmch_gtt_clear_range);
#if IS_ENABLED(CONFIG_AGP_INTEL)
static int intel_fake_agp_remove_entries(struct agp_memory *mem,
@@ -967,7 +965,7 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0)
return 0;
- intel_gtt_clear_range(pg_start, mem->page_count);
+ intel_gmch_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
@@ -1411,13 +1409,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
if (bridge) {
mask = intel_private.driver->dma_mask_size;
- if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
+ if (dma_set_mask(&intel_private.pcidev->dev, DMA_BIT_MASK(mask)))
dev_err(&intel_private.pcidev->dev,
"set gfx device dma mask %d-bit failed!\n",
mask);
else
- pci_set_consistent_dma_mask(intel_private.pcidev,
- DMA_BIT_MASK(mask));
+ dma_set_coherent_mask(&intel_private.pcidev->dev,
+ DMA_BIT_MASK(mask));
}
if (intel_gtt_init() != 0) {
@@ -1430,22 +1428,22 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
-void intel_gtt_get(u64 *gtt_total,
- phys_addr_t *mappable_base,
- resource_size_t *mappable_end)
+void intel_gmch_gtt_get(u64 *gtt_total,
+ phys_addr_t *mappable_base,
+ resource_size_t *mappable_end)
{
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
*mappable_base = intel_private.gma_bus_addr;
*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
}
-EXPORT_SYMBOL(intel_gtt_get);
+EXPORT_SYMBOL(intel_gmch_gtt_get);
-void intel_gtt_chipset_flush(void)
+void intel_gmch_gtt_flush(void)
{
if (intel_private.driver->chipset_flush)
intel_private.driver->chipset_flush();
}
-EXPORT_SYMBOL(intel_gtt_chipset_flush);
+EXPORT_SYMBOL(intel_gmch_gtt_flush);
void intel_gmch_remove(void)
{
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index f78e756157db..826dbd06f6bb 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -261,7 +261,8 @@ static int nvidia_remove_memory(struct agp_memory *mem, off_t pg_start, int type
static void nvidia_tlbflush(struct agp_memory *mem)
{
unsigned long end;
- u32 wbc_reg, temp;
+ u32 wbc_reg;
+ u32 __maybe_unused temp;
int i;
/* flush chipset */
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 14909fc5d767..f8a02f4bef1b 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -217,26 +217,14 @@ static void agp_sis_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#ifdef CONFIG_PM
+#define agp_sis_suspend NULL
-static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused agp_sis_resume(
+ __attribute__((unused)) struct device *dev)
{
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int agp_sis_resume(struct pci_dev *pdev)
-{
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
return sis_driver.configure();
}
-#endif /* CONFIG_PM */
-
static const struct pci_device_id agp_sis_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
@@ -419,15 +407,14 @@ static const struct pci_device_id agp_sis_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
+static SIMPLE_DEV_PM_OPS(agp_sis_pm_ops, agp_sis_suspend, agp_sis_resume);
+
static struct pci_driver agp_sis_pci_driver = {
.name = "agpgart-sis",
.id_table = agp_sis_pci_table,
.probe = agp_sis_probe,
.remove = agp_sis_remove,
-#ifdef CONFIG_PM
- .suspend = agp_sis_suspend,
- .resume = agp_sis_resume,
-#endif
+ .driver.pm = &agp_sis_pm_ops,
};
static int __init agp_sis_init(void)
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index f875970bda65..b91da5998dd7 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -262,13 +262,10 @@ static void serverworks_tlbflush(struct agp_memory *temp)
static int serverworks_configure(void)
{
- struct aper_size_info_lvl2 *current_size;
u32 temp;
u8 enable_reg;
u16 cap_reg;
- current_size = A_SIZE_LVL2(agp_bridge->current_size);
-
/* Get the memory mapped registers */
pci_read_config_dword(agp_bridge->dev, serverworks_private.mm_addr_ofs, &temp);
temp = (temp & PCI_BASE_ADDRESS_MEM_MASK);
@@ -350,7 +347,7 @@ static int serverworks_insert_memory(struct agp_memory *mem,
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = SVRWRKS_GET_GATT(addr);
- writel(agp_bridge->driver->mask_memory(agp_bridge,
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
page_to_phys(mem->pages[i]), mem->type),
cur_gatt+GET_GATT_OFF(addr));
}
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 87a92a044570..b2f484f527fb 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -128,9 +128,6 @@ static int via_fetch_size_agp3(void)
static int via_configure_agp3(void)
{
u32 temp;
- struct aper_size_info_16 *current_size;
-
- current_size = A_SIZE_16(agp_bridge->current_size);
/* address to map to */
agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev,
@@ -492,22 +489,11 @@ static void agp_via_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#ifdef CONFIG_PM
-
-static int agp_via_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- pci_save_state (pdev);
- pci_set_power_state (pdev, PCI_D3hot);
-
- return 0;
-}
+#define agp_via_suspend NULL
-static int agp_via_resume(struct pci_dev *pdev)
+static int __maybe_unused agp_via_resume(struct device *dev)
{
- struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
-
- pci_set_power_state (pdev, PCI_D0);
- pci_restore_state(pdev);
+ struct agp_bridge_data *bridge = dev_get_drvdata(dev);
if (bridge->driver == &via_agp3_driver)
return via_configure_agp3();
@@ -517,8 +503,6 @@ static int agp_via_resume(struct pci_dev *pdev)
return 0;
}
-#endif /* CONFIG_PM */
-
/* must be the same order as name table above */
static const struct pci_device_id agp_via_pci_table[] = {
#define ID(x) \
@@ -567,16 +551,14 @@ static const struct pci_device_id agp_via_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_via_pci_table);
+static SIMPLE_DEV_PM_OPS(agp_via_pm_ops, agp_via_suspend, agp_via_resume);
static struct pci_driver agp_via_pci_driver = {
.name = "agpgart-via",
.id_table = agp_via_pci_table,
.probe = agp_via_probe,
.remove = agp_via_remove,
-#ifdef CONFIG_PM
- .suspend = agp_via_suspend,
- .resume = agp_via_resume,
-#endif
+ .driver.pm = &agp_via_pm_ops,
};
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 230cf852fa9c..e795390b070f 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -544,7 +544,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
wake_up_interruptible(&apm_waitqueue);
/*
- * Wait for the the suspend_acks_pending variable to drop to
+ * Wait for the suspend_acks_pending variable to drop to
* zero, meaning everybody acked the suspend event (or the
* process was killed.)
*
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index deb85a334c93..36203d3fa6ea 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -89,8 +89,8 @@ static struct applicom_board {
spinlock_t mutex;
} apbs[MAX_BOARD];
-static unsigned int irq = 0; /* interrupt number IRQ */
-static unsigned long mem = 0; /* physical segment of board */
+static unsigned int irq; /* interrupt number IRQ */
+static unsigned long mem; /* physical segment of board */
module_param_hw(irq, uint, irq, 0);
MODULE_PARM_DESC(irq, "IRQ of the Applicom board");
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index cce2af5df7b4..d5f943938427 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -60,7 +60,7 @@ struct bsr_dev {
};
static unsigned total_bsr_devs;
-static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs);
+static LIST_HEAD(bsr_devs);
static struct class *bsr_class;
static int bsr_major;
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 4e5431f01450..ee71376f174b 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -16,6 +16,7 @@
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/init.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/poll.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
@@ -120,22 +121,6 @@ static struct hpets *hpets;
#define HPET_PERIODIC 0x0004
#define HPET_SHARED_IRQ 0x0008
-
-#ifndef readq
-static inline unsigned long long readq(void __iomem *addr)
-{
- return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(unsigned long long v, void __iomem *addr)
-{
- writel(v & 0xffffffff, addr);
- writel(v >> 32, addr + 4);
-}
-#endif
-
static irqreturn_t hpet_interrupt(int irq, void *data)
{
struct hpet_dev *devp;
@@ -268,9 +253,9 @@ static int hpet_open(struct inode *inode, struct file *file)
for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
for (i = 0; i < hpetp->hp_ntimer; i++)
- if (hpetp->hp_dev[i].hd_flags & HPET_OPEN)
+ if (hpetp->hp_dev[i].hd_flags & HPET_OPEN) {
continue;
- else {
+ } else {
devp = &hpetp->hp_dev[i];
break;
}
@@ -317,9 +302,9 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
devp->hd_irqdata = 0;
spin_unlock_irq(&hpet_lock);
- if (data)
+ if (data) {
break;
- else if (file->f_flags & O_NONBLOCK) {
+ } else if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto out;
} else if (signal_pending(current)) {
@@ -746,26 +731,6 @@ static struct ctl_table hpet_table[] = {
{}
};
-static struct ctl_table hpet_root[] = {
- {
- .procname = "hpet",
- .maxlen = 0,
- .mode = 0555,
- .child = hpet_table,
- },
- {}
-};
-
-static struct ctl_table dev_root[] = {
- {
- .procname = "dev",
- .maxlen = 0,
- .mode = 0555,
- .child = hpet_root,
- },
- {}
-};
-
static struct ctl_table_header *sysctl_header;
/*
@@ -1002,7 +967,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
break;
irq = acpi_register_gsi(NULL, irqp->interrupts[i],
- irqp->triggering, irqp->polarity);
+ irqp->triggering,
+ irqp->polarity);
if (irq < 0)
return AE_ERROR;
@@ -1061,7 +1027,7 @@ static int __init hpet_init(void)
if (result < 0)
return -ENODEV;
- sysctl_header = register_sysctl_table(dev_root);
+ sysctl_header = register_sysctl("dev/hpet", hpet_table);
result = acpi_bus_register_driver(&hpet_acpi_driver);
if (result < 0) {
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 814b3d0ca7b7..3da8e85f8aae 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -87,7 +87,7 @@ config HW_RANDOM_BA431
config HW_RANDOM_BCM2835
tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
- ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
+ ARCH_BCMBCA || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -226,19 +226,6 @@ config HW_RANDOM_VIRTIO
To compile this driver as a module, choose M here: the
module will be called virtio-rng. If unsure, say N.
-config HW_RANDOM_TX4939
- tristate "TX4939 Random Number Generator support"
- depends on SOC_TX4939
- default HW_RANDOM
- help
- This driver provides kernel-side support for the Random Number
- Generator hardware found on TX4939 SoC.
-
- To compile this driver as a module, choose M here: the
- module will be called tx4939-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_MXC_RNGA
tristate "Freescale i.MX RNGA Random Number Generator"
depends on SOC_IMX31
@@ -398,6 +385,19 @@ config HW_RANDOM_PIC32
If unsure, say Y.
+config HW_RANDOM_POLARFIRE_SOC
+ tristate "Microchip PolarFire SoC Random Number Generator support"
+ depends on HW_RANDOM && POLARFIRE_SOC_SYS_CTRL
+ help
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on PolarFire SoC (MPFS).
+
+ To compile this driver as a module, choose M here. The
+ module will be called mfps_rng.
+
+ If unsure, say N.
+
+
config HW_RANDOM_MESON
tristate "Amlogic Meson Random Number Generator support"
depends on HW_RANDOM
@@ -414,7 +414,7 @@ config HW_RANDOM_MESON
config HW_RANDOM_CAVIUM
tristate "Cavium ThunderX Random Number Generator support"
- depends on HW_RANDOM && PCI && (ARM64 || (COMPILE_TEST && 64BIT))
+ depends on HW_RANDOM && PCI && ARCH_THUNDER
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -538,6 +538,17 @@ config HW_RANDOM_ARM_SMCCC_TRNG
To compile this driver as a module, choose M here: the
module will be called arm_smccc_trng.
+config HW_RANDOM_CN10K
+ tristate "Marvell CN10K Random Number Generator support"
+ depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST))
+ default HW_RANDOM
+ help
+ This driver provides support for the True Random Number
+ generator available in Marvell CN10K SoCs.
+
+ To compile this driver as a module, choose M here.
+ The module will be called cn10k_rng. If unsure, say Y.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index a5a1c765a394..3e948cf04476 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
-obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
obj-$(CONFIG_HW_RANDOM_INGENIC_RNG) += ingenic-rng.o
@@ -46,3 +45,5 @@ obj-$(CONFIG_HW_RANDOM_NPCM) += npcm-rng.o
obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o
obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
+obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
+obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o
diff --git a/drivers/char/hw_random/arm_smccc_trng.c b/drivers/char/hw_random/arm_smccc_trng.c
index b24ac39a903b..e34c3ea692b6 100644
--- a/drivers/char/hw_random/arm_smccc_trng.c
+++ b/drivers/char/hw_random/arm_smccc_trng.c
@@ -71,8 +71,6 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
MAX_BITS_PER_CALL);
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND, bits, &res);
- if ((int)res.a0 < 0)
- return (int)res.a0;
switch ((int)res.a0) {
case SMCCC_RET_SUCCESS:
@@ -88,6 +86,8 @@ static int smccc_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
return copied;
cond_resched();
break;
+ default:
+ return -EIO;
}
}
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index ecb71c4317a5..b8effe77d80f 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -13,13 +13,16 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/hw_random.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#define TRNG_CR 0x00
#define TRNG_MR 0x04
#define TRNG_ISR 0x1c
+#define TRNG_ISR_DATRDY BIT(0)
#define TRNG_ODATA 0x50
#define TRNG_KEY 0x524e4700 /* RNG */
@@ -34,37 +37,79 @@ struct atmel_trng {
struct clk *clk;
void __iomem *base;
struct hwrng rng;
+ bool has_half_rate;
};
+static bool atmel_trng_wait_ready(struct atmel_trng *trng, bool wait)
+{
+ int ready;
+
+ ready = readl(trng->base + TRNG_ISR) & TRNG_ISR_DATRDY;
+ if (!ready && wait)
+ readl_poll_timeout(trng->base + TRNG_ISR, ready,
+ ready & TRNG_ISR_DATRDY, 1000, 20000);
+
+ return !!ready;
+}
+
static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng);
u32 *data = buf;
+ int ret;
- /* data ready? */
- if (readl(trng->base + TRNG_ISR) & 1) {
- *data = readl(trng->base + TRNG_ODATA);
- /*
- ensure data ready is only set again AFTER the next data
- word is ready in case it got set between checking ISR
- and reading ODATA, so we don't risk re-reading the
- same word
- */
- readl(trng->base + TRNG_ISR);
- return 4;
- } else
- return 0;
+ ret = pm_runtime_get_sync((struct device *)trng->rng.priv);
+ if (ret < 0) {
+ pm_runtime_put_sync((struct device *)trng->rng.priv);
+ return ret;
+ }
+
+ ret = atmel_trng_wait_ready(trng, wait);
+ if (!ret)
+ goto out;
+
+ *data = readl(trng->base + TRNG_ODATA);
+ /*
+ * ensure data ready is only set again AFTER the next data word is ready
+ * in case it got set between checking ISR and reading ODATA, so we
+ * don't risk re-reading the same word
+ */
+ readl(trng->base + TRNG_ISR);
+ ret = 4;
+
+out:
+ pm_runtime_mark_last_busy((struct device *)trng->rng.priv);
+ pm_runtime_put_sync_autosuspend((struct device *)trng->rng.priv);
+ return ret;
}
-static void atmel_trng_enable(struct atmel_trng *trng)
+static int atmel_trng_init(struct atmel_trng *trng)
{
+ unsigned long rate;
+ int ret;
+
+ ret = clk_prepare_enable(trng->clk);
+ if (ret)
+ return ret;
+
+ if (trng->has_half_rate) {
+ rate = clk_get_rate(trng->clk);
+
+ /* if peripheral clk is above 100MHz, set HALFR */
+ if (rate > 100000000)
+ writel(TRNG_HALFR, trng->base + TRNG_MR);
+ }
+
writel(TRNG_KEY | 1, trng->base + TRNG_CR);
+
+ return 0;
}
-static void atmel_trng_disable(struct atmel_trng *trng)
+static void atmel_trng_cleanup(struct atmel_trng *trng)
{
writel(TRNG_KEY, trng->base + TRNG_CR);
+ clk_disable_unprepare(trng->clk);
}
static int atmel_trng_probe(struct platform_device *pdev)
@@ -88,32 +133,31 @@ static int atmel_trng_probe(struct platform_device *pdev)
if (!data)
return -ENODEV;
- if (data->has_half_rate) {
- unsigned long rate = clk_get_rate(trng->clk);
-
- /* if peripheral clk is above 100MHz, set HALFR */
- if (rate > 100000000)
- writel(TRNG_HALFR, trng->base + TRNG_MR);
- }
-
- ret = clk_prepare_enable(trng->clk);
- if (ret)
- return ret;
-
- atmel_trng_enable(trng);
+ trng->has_half_rate = data->has_half_rate;
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
+ trng->rng.priv = (unsigned long)&pdev->dev;
+ platform_set_drvdata(pdev, trng);
- ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+#ifndef CONFIG_PM
+ ret = atmel_trng_init(trng);
if (ret)
- goto err_register;
+ return ret;
+#endif
- platform_set_drvdata(pdev, trng);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
- return 0;
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+#ifndef CONFIG_PM
+ atmel_trng_cleanup(trng);
+#endif
+ }
-err_register:
- clk_disable_unprepare(trng->clk);
return ret;
}
@@ -121,43 +165,35 @@ static int atmel_trng_remove(struct platform_device *pdev)
{
struct atmel_trng *trng = platform_get_drvdata(pdev);
-
- atmel_trng_disable(trng);
- clk_disable_unprepare(trng->clk);
+ atmel_trng_cleanup(trng);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM
-static int atmel_trng_suspend(struct device *dev)
+static int __maybe_unused atmel_trng_runtime_suspend(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
- atmel_trng_disable(trng);
- clk_disable_unprepare(trng->clk);
+ atmel_trng_cleanup(trng);
return 0;
}
-static int atmel_trng_resume(struct device *dev)
+static int __maybe_unused atmel_trng_runtime_resume(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
- int ret;
- ret = clk_prepare_enable(trng->clk);
- if (ret)
- return ret;
-
- atmel_trng_enable(trng);
-
- return 0;
+ return atmel_trng_init(trng);
}
-static const struct dev_pm_ops atmel_trng_pm_ops = {
- .suspend = atmel_trng_suspend,
- .resume = atmel_trng_resume,
+static const struct dev_pm_ops __maybe_unused atmel_trng_pm_ops = {
+ SET_RUNTIME_PM_OPS(atmel_trng_runtime_suspend,
+ atmel_trng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
-#endif /* CONFIG_PM */
static const struct atmel_trng_data at91sam9g45_config = {
.has_half_rate = false,
@@ -185,9 +221,7 @@ static struct platform_driver atmel_trng_driver = {
.remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
-#ifdef CONFIG_PM
- .pm = &atmel_trng_pm_ops,
-#endif /* CONFIG_PM */
+ .pm = pm_ptr(&atmel_trng_pm_ops),
.of_match_table = atmel_trng_dt_ids,
},
};
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index e7dd457e9b22..e98fcac578d6 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -71,7 +71,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max,
while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) {
if (!wait)
return 0;
- cpu_relax();
+ hwrng_msleep(rng, 1000);
}
num_words = rng_readl(priv, RNG_STATUS) >> 24;
diff --git a/drivers/char/hw_random/cavium-rng-vf.c b/drivers/char/hw_random/cavium-rng-vf.c
index 3de4a6a443ef..7c55f4cf4a8b 100644
--- a/drivers/char/hw_random/cavium-rng-vf.c
+++ b/drivers/char/hw_random/cavium-rng-vf.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Hardware Random Number Generator support for Cavium, Inc.
- * Thunder processor family.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ * Hardware Random Number Generator support.
+ * Cavium Thunder, Marvell OcteonTx/Tx2 processor families.
*
* Copyright (C) 2016 Cavium, Inc.
*/
@@ -15,16 +12,146 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
+#include <asm/arch_timer.h>
+
+/* PCI device IDs */
+#define PCI_DEVID_CAVIUM_RNG_PF 0xA018
+#define PCI_DEVID_CAVIUM_RNG_VF 0xA033
+
+#define HEALTH_STATUS_REG 0x38
+
+/* RST device info */
+#define PCI_DEVICE_ID_RST_OTX2 0xA085
+#define RST_BOOT_REG 0x1600ULL
+#define CLOCK_BASE_RATE 50000000ULL
+#define MSEC_TO_NSEC(x) (x * 1000000)
+
struct cavium_rng {
struct hwrng ops;
void __iomem *result;
+ void __iomem *pf_regbase;
+ struct pci_dev *pdev;
+ u64 clock_rate;
+ u64 prev_error;
+ u64 prev_time;
};
+static inline bool is_octeontx(struct pci_dev *pdev)
+{
+ if (midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_83XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)) ||
+ midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX_81XX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)) ||
+ midr_is_cpu_model_range(read_cpuid_id(), MIDR_THUNDERX,
+ MIDR_CPU_VAR_REV(0, 0),
+ MIDR_CPU_VAR_REV(3, 0)))
+ return true;
+
+ return false;
+}
+
+static u64 rng_get_coprocessor_clkrate(void)
+{
+ u64 ret = CLOCK_BASE_RATE * 16; /* Assume 800Mhz as default */
+ struct pci_dev *pdev;
+ void __iomem *base;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_RST_OTX2, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto error_put_pdev;
+
+ /* RST: PNR_MUL * 50Mhz gives clockrate */
+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT_REG) >> 33) & 0x3F);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+static int check_rng_health(struct cavium_rng *rng)
+{
+ u64 cur_err, cur_time;
+ u64 status, cycles;
+ u64 time_elapsed;
+
+
+ /* Skip checking health for OcteonTx */
+ if (!rng->pf_regbase)
+ return 0;
+
+ status = readq(rng->pf_regbase + HEALTH_STATUS_REG);
+ if (status & BIT_ULL(0)) {
+ dev_err(&rng->pdev->dev, "HWRNG: Startup health test failed\n");
+ return -EIO;
+ }
+
+ cycles = status >> 1;
+ if (!cycles)
+ return 0;
+
+ cur_time = arch_timer_read_counter();
+
+ /* RNM_HEALTH_STATUS[CYCLES_SINCE_HEALTH_FAILURE]
+ * Number of coprocessor cycles times 2 since the last failure.
+ * This field doesn't get cleared/updated until another failure.
+ */
+ cycles = cycles / 2;
+ cur_err = (cycles * 1000000000) / rng->clock_rate; /* In nanosec */
+
+ /* Ignore errors that happenned a long time ago, these
+ * are most likely false positive errors.
+ */
+ if (cur_err > MSEC_TO_NSEC(10)) {
+ rng->prev_error = 0;
+ rng->prev_time = 0;
+ return 0;
+ }
+
+ if (rng->prev_error) {
+ /* Calculate time elapsed since last error
+ * '1' tick of CNTVCT is 10ns, since it runs at 100Mhz.
+ */
+ time_elapsed = (cur_time - rng->prev_time) * 10;
+ time_elapsed += rng->prev_error;
+
+ /* Check if current error is a new one or the old one itself.
+ * If error is a new one then consider there is a persistent
+ * issue with entropy, declare hardware failure.
+ */
+ if (cur_err < time_elapsed) {
+ dev_err(&rng->pdev->dev, "HWRNG failure detected\n");
+ rng->prev_error = cur_err;
+ rng->prev_time = cur_time;
+ return -EIO;
+ }
+ }
+
+ rng->prev_error = cur_err;
+ rng->prev_time = cur_time;
+ return 0;
+}
+
/* Read data from the RNG unit */
static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
{
struct cavium_rng *p = container_of(rng, struct cavium_rng, ops);
unsigned int size = max;
+ int err = 0;
+
+ err = check_rng_health(p);
+ if (err)
+ return err;
while (size >= 8) {
*((u64 *)dat) = readq(p->result);
@@ -39,6 +166,39 @@ static int cavium_rng_read(struct hwrng *rng, void *dat, size_t max, bool wait)
return max;
}
+static int cavium_map_pf_regs(struct cavium_rng *rng)
+{
+ struct pci_dev *pdev;
+
+ /* Health status is not supported on 83xx, skip mapping PF CSRs */
+ if (is_octeontx(rng->pdev)) {
+ rng->pf_regbase = NULL;
+ return 0;
+ }
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_CAVIUM_RNG_PF, NULL);
+ if (!pdev) {
+ pr_err("Cannot find RNG PF device\n");
+ return -EIO;
+ }
+
+ rng->pf_regbase = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!rng->pf_regbase) {
+ dev_err(&pdev->dev, "Failed to map PF CSR region\n");
+ pci_dev_put(pdev);
+ return -ENOMEM;
+ }
+
+ pci_dev_put(pdev);
+
+ /* Get co-processor clock rate */
+ rng->clock_rate = rng_get_coprocessor_clkrate();
+
+ return 0;
+}
+
/* Map Cavium RNG to an HWRNG object */
static int cavium_rng_probe_vf(struct pci_dev *pdev,
const struct pci_device_id *id)
@@ -50,6 +210,8 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
if (!rng)
return -ENOMEM;
+ rng->pdev = pdev;
+
/* Map the RNG result */
rng->result = pcim_iomap(pdev, 0, 0);
if (!rng->result) {
@@ -67,6 +229,11 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
pci_set_drvdata(pdev, rng);
+ /* Health status is available only at PF, hence map PF registers. */
+ ret = cavium_map_pf_regs(rng);
+ if (ret)
+ return ret;
+
ret = devm_hwrng_register(&pdev->dev, &rng->ops);
if (ret) {
dev_err(&pdev->dev, "Error registering device as HWRNG.\n");
@@ -76,10 +243,18 @@ static int cavium_rng_probe_vf(struct pci_dev *pdev,
return 0;
}
+/* Remove the VF */
+static void cavium_rng_remove_vf(struct pci_dev *pdev)
+{
+ struct cavium_rng *rng;
+
+ rng = pci_get_drvdata(pdev);
+ iounmap(rng->pf_regbase);
+}
static const struct pci_device_id cavium_rng_vf_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa033), 0, 0, 0},
- {0,},
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CAVIUM_RNG_VF) },
+ { 0, }
};
MODULE_DEVICE_TABLE(pci, cavium_rng_vf_id_table);
@@ -87,8 +262,9 @@ static struct pci_driver cavium_rng_vf_driver = {
.name = "cavium_rng_vf",
.id_table = cavium_rng_vf_id_table,
.probe = cavium_rng_probe_vf,
+ .remove = cavium_rng_remove_vf,
};
module_pci_driver(cavium_rng_vf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cavium-rng.c b/drivers/char/hw_random/cavium-rng.c
index 63d6e68c24d2..b96579222408 100644
--- a/drivers/char/hw_random/cavium-rng.c
+++ b/drivers/char/hw_random/cavium-rng.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Hardware Random Number Generator support for Cavium Inc.
- * Thunder processor family.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ * Hardware Random Number Generator support.
+ * Cavium Thunder, Marvell OcteonTx/Tx2 processor families.
*
* Copyright (C) 2016 Cavium, Inc.
*/
@@ -91,4 +88,4 @@ static struct pci_driver cavium_rng_pf_driver = {
module_pci_driver(cavium_rng_pf_driver);
MODULE_AUTHOR("Omer Khaliq <okhaliq@caviumnetworks.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
new file mode 100644
index 000000000000..a01e9307737c
--- /dev/null
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K RVU Hardware Random Number Generator.
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+
+#include <linux/arm-smccc.h>
+
+/* CSRs */
+#define RNM_CTL_STATUS 0x000
+#define RNM_ENTROPY_STATUS 0x008
+#define RNM_CONST 0x030
+#define RNM_EBG_ENT 0x048
+#define RNM_PF_EBG_HEALTH 0x050
+#define RNM_PF_RANDOM 0x400
+#define RNM_TRNG_RESULT 0x408
+
+struct cn10k_rng {
+ void __iomem *reg_base;
+ struct hwrng ops;
+ struct pci_dev *pdev;
+};
+
+#define PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE 0xc2000b0f
+
+static unsigned long reset_rng_health_state(struct cn10k_rng *rng)
+{
+ struct arm_smccc_res res;
+
+ /* Send SMC service call to reset EBG health state */
+ arm_smccc_smc(PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE, 0, 0, 0, 0, 0, 0, 0, &res);
+ return res.a0;
+}
+
+static int check_rng_health(struct cn10k_rng *rng)
+{
+ u64 status;
+ unsigned long err;
+
+ /* Skip checking health */
+ if (!rng->reg_base)
+ return -ENODEV;
+
+ status = readq(rng->reg_base + RNM_PF_EBG_HEALTH);
+ if (status & BIT_ULL(20)) {
+ err = reset_rng_health_state(rng);
+ if (err) {
+ dev_err(&rng->pdev->dev, "HWRNG: Health test failed (status=%llx)\n",
+ status);
+ dev_err(&rng->pdev->dev, "HWRNG: error during reset (error=%lx)\n",
+ err);
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static void cn10k_read_trng(struct cn10k_rng *rng, u64 *value)
+{
+ u64 upper, lower;
+
+ *value = readq(rng->reg_base + RNM_PF_RANDOM);
+
+ /* HW can run out of entropy if large amount random data is read in
+ * quick succession. Zeros may not be real random data from HW.
+ */
+ if (!*value) {
+ upper = readq(rng->reg_base + RNM_PF_RANDOM);
+ lower = readq(rng->reg_base + RNM_PF_RANDOM);
+ while (!(upper & 0x00000000FFFFFFFFULL))
+ upper = readq(rng->reg_base + RNM_PF_RANDOM);
+ while (!(lower & 0xFFFFFFFF00000000ULL))
+ lower = readq(rng->reg_base + RNM_PF_RANDOM);
+
+ *value = (upper & 0xFFFFFFFF00000000) | (lower & 0xFFFFFFFF);
+ }
+}
+
+static int cn10k_rng_read(struct hwrng *hwrng, void *data,
+ size_t max, bool wait)
+{
+ struct cn10k_rng *rng = (struct cn10k_rng *)hwrng->priv;
+ unsigned int size;
+ u8 *pos = data;
+ int err = 0;
+ u64 value;
+
+ err = check_rng_health(rng);
+ if (err)
+ return err;
+
+ size = max;
+
+ while (size >= 8) {
+ cn10k_read_trng(rng, &value);
+
+ *((u64 *)pos) = value;
+ size -= 8;
+ pos += 8;
+ }
+
+ if (size > 0) {
+ cn10k_read_trng(rng, &value);
+
+ while (size > 0) {
+ *pos = (u8)value;
+ value >>= 8;
+ size--;
+ pos++;
+ }
+ }
+
+ return max - size;
+}
+
+static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct cn10k_rng *rng;
+ int err;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->pdev = pdev;
+ pci_set_drvdata(pdev, rng);
+
+ rng->reg_base = pcim_iomap(pdev, 0, 0);
+ if (!rng->reg_base) {
+ dev_err(&pdev->dev, "Error while mapping CSRs, exiting\n");
+ return -ENOMEM;
+ }
+
+ rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "cn10k-rng-%s", dev_name(&pdev->dev));
+ if (!rng->ops.name)
+ return -ENOMEM;
+
+ rng->ops.read = cn10k_rng_read;
+ rng->ops.quality = 1000;
+ rng->ops.priv = (unsigned long)rng;
+
+ reset_rng_health_state(rng);
+
+ err = devm_hwrng_register(&pdev->dev, &rng->ops);
+ if (err) {
+ dev_err(&pdev->dev, "Could not register hwrng device.\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static void cn10k_rng_remove(struct pci_dev *pdev)
+{
+ /* Nothing to do */
+}
+
+static const struct pci_device_id cn10k_rng_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA098) }, /* RNG PF */
+ {0,},
+};
+
+MODULE_DEVICE_TABLE(pci, cn10k_rng_id_table);
+
+static struct pci_driver cn10k_rng_driver = {
+ .name = "cn10k_rng",
+ .id_table = cn10k_rng_id_table,
+ .probe = cn10k_rng_probe,
+ .remove = cn10k_rng_remove,
+};
+
+module_pci_driver(cn10k_rng_driver);
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION("Marvell CN10K HW RNG Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index a3db27916256..cc002b0c2f0c 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/hw_random.h>
+#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
@@ -31,7 +32,7 @@ static struct hwrng *current_rng;
/* the current rng has been explicitly chosen by user via sysfs */
static int cur_rng_set_by_user;
static struct task_struct *hwrng_fill;
-/* list of registered rngs, sorted decending by quality */
+/* list of registered rngs */
static LIST_HEAD(rng_list);
/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
@@ -44,14 +45,14 @@ static unsigned short default_quality; /* = 0; default to "off" */
module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
- "current hwrng entropy estimation per 1024 bits of input");
+ "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
"default entropy content of hwrng per 1024 bits of input");
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
-static void start_khwrngd(void);
+static int hwrng_fillfn(void *unused);
static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait);
@@ -64,13 +65,12 @@ static size_t rng_buffer_size(void)
static void add_early_randomness(struct hwrng *rng)
{
int bytes_read;
- size_t size = min_t(size_t, 16, rng_buffer_size());
mutex_lock(&reading_mutex);
- bytes_read = rng_get_data(rng, rng_buffer, size, 0);
+ bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
mutex_unlock(&reading_mutex);
if (bytes_read > 0)
- add_device_randomness(rng_buffer, bytes_read);
+ add_device_randomness(rng_fillbuf, bytes_read);
}
static inline void cleanup_rng(struct kref *kref)
@@ -96,6 +96,15 @@ static int set_current_rng(struct hwrng *rng)
drop_current_rng();
current_rng = rng;
+ /* if necessary, start hwrng thread */
+ if (!hwrng_fill) {
+ hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+ if (IS_ERR(hwrng_fill)) {
+ pr_err("hwrng_fill thread creation failed\n");
+ hwrng_fill = NULL;
+ }
+ }
+
return 0;
}
@@ -161,14 +170,11 @@ static int hwrng_init(struct hwrng *rng)
reinit_completion(&rng->cleanup_done);
skip_init:
- current_quality = rng->quality ? : default_quality;
- if (current_quality > 1024)
- current_quality = 1024;
-
- if (current_quality == 0 && hwrng_fill)
- kthread_stop(hwrng_fill);
- if (current_quality > 0 && !hwrng_fill)
- start_khwrngd();
+ if (!rng->quality)
+ rng->quality = default_quality;
+ if (rng->quality > 1024)
+ rng->quality = 1024;
+ current_quality = rng->quality; /* obsolete */
return 0;
}
@@ -298,24 +304,28 @@ static struct miscdevice rng_miscdev = {
static int enable_best_rng(void)
{
+ struct hwrng *rng, *new_rng = NULL;
int ret = -ENODEV;
BUG_ON(!mutex_is_locked(&rng_mutex));
- /* rng_list is sorted by quality, use the best (=first) one */
- if (!list_empty(&rng_list)) {
- struct hwrng *new_rng;
-
- new_rng = list_entry(rng_list.next, struct hwrng, list);
- ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
- if (!ret)
- cur_rng_set_by_user = 0;
- } else {
+ /* no rng to use? */
+ if (list_empty(&rng_list)) {
drop_current_rng();
cur_rng_set_by_user = 0;
- ret = 0;
+ return 0;
}
+ /* use the rng which offers the best quality */
+ list_for_each_entry(rng, &rng_list, list) {
+ if (!new_rng || rng->quality > new_rng->quality)
+ new_rng = rng;
+ }
+
+ ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
+ if (!ret)
+ cur_rng_set_by_user = 0;
+
return ret;
}
@@ -336,8 +346,9 @@ static ssize_t rng_current_store(struct device *dev,
} else {
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
- cur_rng_set_by_user = 1;
err = set_current_rng(rng);
+ if (!err)
+ cur_rng_set_by_user = 1;
break;
}
}
@@ -399,14 +410,72 @@ static ssize_t rng_selected_show(struct device *dev,
return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
}
+static ssize_t rng_quality_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t ret;
+ struct hwrng *rng;
+
+ rng = get_current_rng();
+ if (IS_ERR(rng))
+ return PTR_ERR(rng);
+
+ if (!rng) /* no need to put_rng */
+ return -ENODEV;
+
+ ret = sysfs_emit(buf, "%hu\n", rng->quality);
+ put_rng(rng);
+
+ return ret;
+}
+
+static ssize_t rng_quality_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ u16 quality;
+ int ret = -EINVAL;
+
+ if (len < 2)
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&rng_mutex);
+ if (ret)
+ return -ERESTARTSYS;
+
+ ret = kstrtou16(buf, 0, &quality);
+ if (ret || quality > 1024) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!current_rng) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ current_rng->quality = quality;
+ current_quality = quality; /* obsolete */
+
+ /* the best available RNG may have changed */
+ ret = enable_best_rng();
+
+out:
+ mutex_unlock(&rng_mutex);
+ return ret ? ret : len;
+}
+
static DEVICE_ATTR_RW(rng_current);
static DEVICE_ATTR_RO(rng_available);
static DEVICE_ATTR_RO(rng_selected);
+static DEVICE_ATTR_RW(rng_quality);
static struct attribute *rng_dev_attrs[] = {
&dev_attr_rng_current.attr,
&dev_attr_rng_available.attr,
&dev_attr_rng_selected.attr,
+ &dev_attr_rng_quality.attr,
NULL
};
@@ -424,9 +493,11 @@ static int __init register_miscdev(void)
static int hwrng_fillfn(void *unused)
{
+ size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
long rc;
while (!kthread_should_stop()) {
+ unsigned short quality;
struct hwrng *rng;
rng = get_current_rng();
@@ -435,35 +506,38 @@ static int hwrng_fillfn(void *unused)
mutex_lock(&reading_mutex);
rc = rng_get_data(rng, rng_fillbuf,
rng_buffer_size(), 1);
+ if (current_quality != rng->quality)
+ rng->quality = current_quality; /* obsolete */
+ quality = rng->quality;
mutex_unlock(&reading_mutex);
+
+ if (rc <= 0)
+ hwrng_msleep(rng, 10000);
+
put_rng(rng);
- if (rc <= 0) {
- pr_warn("hwrng: no data available\n");
- msleep_interruptible(10000);
+
+ if (rc <= 0)
continue;
- }
+
+ /* If we cannot credit at least one bit of entropy,
+ * keep track of the remainder for the next iteration
+ */
+ entropy = rc * quality * 8 + entropy_credit;
+ if ((entropy >> 10) == 0)
+ entropy_credit = entropy;
+
/* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
- rc * current_quality * 8 >> 10);
+ entropy >> 10);
}
hwrng_fill = NULL;
return 0;
}
-static void start_khwrngd(void)
-{
- hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
- if (IS_ERR(hwrng_fill)) {
- pr_err("hwrng_fill thread creation failed\n");
- hwrng_fill = NULL;
- }
-}
-
int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *tmp;
- struct list_head *rng_list_ptr;
bool is_new_current = false;
if (!rng->name || (!rng->data_read && !rng->read))
@@ -477,17 +551,11 @@ int hwrng_register(struct hwrng *rng)
if (strcmp(tmp->name, rng->name) == 0)
goto out_unlock;
}
+ list_add_tail(&rng->list, &rng_list);
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
-
- /* rng_list is sorted by decreasing quality */
- list_for_each(rng_list_ptr, &rng_list) {
- tmp = list_entry(rng_list_ptr, struct hwrng, list);
- if (tmp->quality < rng->quality)
- break;
- }
- list_add_tail(&rng->list, rng_list_ptr);
+ init_completion(&rng->dying);
if (!current_rng ||
(!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
@@ -535,6 +603,7 @@ void hwrng_unregister(struct hwrng *rng)
old_rng = current_rng;
list_del(&rng->list);
+ complete_all(&rng->dying);
if (current_rng == rng) {
err = enable_best_rng();
if (err) {
@@ -603,6 +672,14 @@ void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
}
EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
+long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
+{
+ unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+ return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
+}
+EXPORT_SYMBOL_GPL(hwrng_msleep);
+
static int __init hwrng_modinit(void)
{
int ret;
@@ -638,7 +715,7 @@ static void __exit hwrng_modexit(void)
unregister_miscdev();
}
-module_init(hwrng_modinit);
+fs_initcall(hwrng_modinit); /* depends on misc_register() */
module_exit(hwrng_modexit);
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
index b05d676ca814..a1c24148ed31 100644
--- a/drivers/char/hw_random/imx-rngc.c
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -245,7 +245,7 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (IS_ERR(rngc->base))
return PTR_ERR(rngc->base);
- rngc->clk = devm_clk_get(&pdev->dev, NULL);
+ rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(rngc->clk)) {
dev_err(&pdev->dev, "Can not get rng_clk\n");
return PTR_ERR(rngc->clk);
@@ -255,27 +255,14 @@ static int imx_rngc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = clk_prepare_enable(rngc->clk);
- if (ret)
- return ret;
-
ver_id = readl(rngc->base + RNGC_VER_ID);
rng_type = ver_id >> RNGC_TYPE_SHIFT;
/*
* This driver supports only RNGC and RNGB. (There's a different
* driver for RNGA.)
*/
- if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) {
- ret = -ENODEV;
- goto err;
- }
-
- ret = devm_request_irq(&pdev->dev,
- irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
- if (ret) {
- dev_err(rngc->dev, "Can't get interrupt working.\n");
- goto err;
- }
+ if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB)
+ return -ENODEV;
init_completion(&rngc->rng_op_done);
@@ -290,18 +277,25 @@ static int imx_rngc_probe(struct platform_device *pdev)
imx_rngc_irq_mask_clear(rngc);
+ ret = devm_request_irq(&pdev->dev,
+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+ if (ret) {
+ dev_err(rngc->dev, "Can't get interrupt working.\n");
+ return ret;
+ }
+
if (self_test) {
ret = imx_rngc_self_test(rngc);
if (ret) {
dev_err(rngc->dev, "self test failed\n");
- goto err;
+ return ret;
}
}
- ret = hwrng_register(&rngc->rng);
+ ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
if (ret) {
dev_err(&pdev->dev, "hwrng registration failed\n");
- goto err;
+ return ret;
}
dev_info(&pdev->dev,
@@ -309,22 +303,6 @@ static int imx_rngc_probe(struct platform_device *pdev)
rng_type == RNGC_TYPE_RNGB ? 'B' : 'C',
(ver_id >> RNGC_VER_MAJ_SHIFT) & 0xff, ver_id & 0xff);
return 0;
-
-err:
- clk_disable_unprepare(rngc->clk);
-
- return ret;
-}
-
-static int __exit imx_rngc_remove(struct platform_device *pdev)
-{
- struct imx_rngc *rngc = platform_get_drvdata(pdev);
-
- hwrng_unregister(&rngc->rng);
-
- clk_disable_unprepare(rngc->clk);
-
- return 0;
}
static int __maybe_unused imx_rngc_suspend(struct device *dev)
@@ -355,11 +333,10 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
static struct platform_driver imx_rngc_driver = {
.driver = {
- .name = "imx_rngc",
+ .name = KBUILD_MODNAME,
.pm = &imx_rngc_pm_ops,
.of_match_table = imx_rngc_dt_ids,
},
- .remove = __exit_p(imx_rngc_remove),
};
module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
diff --git a/drivers/char/hw_random/iproc-rng200.c b/drivers/char/hw_random/iproc-rng200.c
index a43743887db1..06bc060534d8 100644
--- a/drivers/char/hw_random/iproc-rng200.c
+++ b/drivers/char/hw_random/iproc-rng200.c
@@ -1,14 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Broadcom Corporation
*
-* This program is free software; you can redistribute it and/or
-* modify it under the terms of the GNU General Public License as
-* published by the Free Software Foundation version 2.
-*
-* This program is distributed "as is" WITHOUT ANY WARRANTY of any
-* kind, whether express or implied; without even the implied warranty
-* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
*/
/*
* DESCRIPTION: The Broadcom iProc RNG200 Driver
diff --git a/drivers/char/hw_random/mpfs-rng.c b/drivers/char/hw_random/mpfs-rng.c
new file mode 100644
index 000000000000..5813da617a48
--- /dev/null
+++ b/drivers/char/hw_random/mpfs-rng.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip PolarFire SoC (MPFS) hardware random driver
+ *
+ * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved.
+ *
+ * Author: Conor Dooley <conor.dooley@microchip.com>
+ */
+
+#include <linux/module.h>
+#include <linux/hw_random.h>
+#include <linux/platform_device.h>
+#include <soc/microchip/mpfs.h>
+
+#define CMD_OPCODE 0x21
+#define CMD_DATA_SIZE 0U
+#define CMD_DATA NULL
+#define MBOX_OFFSET 0U
+#define RESP_OFFSET 0U
+#define RNG_RESP_BYTES 32U
+
+struct mpfs_rng {
+ struct mpfs_sys_controller *sys_controller;
+ struct hwrng rng;
+};
+
+static int mpfs_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct mpfs_rng *rng_priv = container_of(rng, struct mpfs_rng, rng);
+ u32 response_msg[RNG_RESP_BYTES / sizeof(u32)];
+ unsigned int count = 0, copy_size_bytes;
+ int ret;
+
+ struct mpfs_mss_response response = {
+ .resp_status = 0U,
+ .resp_msg = (u32 *)response_msg,
+ .resp_size = RNG_RESP_BYTES
+ };
+ struct mpfs_mss_msg msg = {
+ .cmd_opcode = CMD_OPCODE,
+ .cmd_data_size = CMD_DATA_SIZE,
+ .response = &response,
+ .cmd_data = CMD_DATA,
+ .mbox_offset = MBOX_OFFSET,
+ .resp_offset = RESP_OFFSET
+ };
+
+ while (count < max) {
+ ret = mpfs_blocking_transaction(rng_priv->sys_controller, &msg);
+ if (ret)
+ return ret;
+
+ copy_size_bytes = max - count > RNG_RESP_BYTES ? RNG_RESP_BYTES : max - count;
+ memcpy(buf + count, response_msg, copy_size_bytes);
+
+ count += copy_size_bytes;
+ if (!wait)
+ break;
+ }
+
+ return count;
+}
+
+static int mpfs_rng_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpfs_rng *rng_priv;
+ int ret;
+
+ rng_priv = devm_kzalloc(dev, sizeof(*rng_priv), GFP_KERNEL);
+ if (!rng_priv)
+ return -ENOMEM;
+
+ rng_priv->sys_controller = mpfs_sys_controller_get(&pdev->dev);
+ if (IS_ERR(rng_priv->sys_controller))
+ return dev_err_probe(dev, PTR_ERR(rng_priv->sys_controller),
+ "Failed to register system controller hwrng sub device\n");
+
+ rng_priv->rng.read = mpfs_rng_read;
+ rng_priv->rng.name = pdev->name;
+ rng_priv->rng.quality = 1024;
+
+ platform_set_drvdata(pdev, rng_priv);
+
+ ret = devm_hwrng_register(&pdev->dev, &rng_priv->rng);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register MPFS hwrng\n");
+
+ dev_info(&pdev->dev, "Registered MPFS hwrng\n");
+
+ return 0;
+}
+
+static struct platform_driver mpfs_rng_driver = {
+ .driver = {
+ .name = "mpfs-rng",
+ },
+ .probe = mpfs_rng_probe,
+};
+module_platform_driver(mpfs_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>");
+MODULE_DESCRIPTION("PolarFire SoC (MPFS) hardware random driver");
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 67947a19aa22..e8f9621e7954 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -65,14 +65,14 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
out_release:
amba_release_regions(dev);
out_clk:
- clk_disable(rng_clk);
+ clk_disable_unprepare(rng_clk);
return ret;
}
static void nmk_rng_remove(struct amba_device *dev)
{
amba_release_regions(dev);
- clk_disable(rng_clk);
+ clk_disable_unprepare(rng_clk);
}
static const struct amba_id nmk_rng_ids[] = {
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index e0d77fa048fb..f06e4f95114f 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -92,7 +92,7 @@ static int __maybe_unused omap_rom_rng_runtime_resume(struct device *dev)
r = ddata->rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
if (r != 0) {
- clk_disable(ddata->clk);
+ clk_disable_unprepare(ddata->clk);
dev_err(dev, "HW init failed: %d\n", r);
return -EIO;
diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c
index 135a82590923..96b5d546d136 100644
--- a/drivers/char/hw_random/optee-rng.c
+++ b/drivers/char/hw_random/optee-rng.c
@@ -115,7 +115,7 @@ static size_t get_optee_rng_data(struct optee_rng_private *pvt_data,
static int optee_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
{
struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
- size_t read = 0, rng_size = 0;
+ size_t read = 0, rng_size;
int timeout = 1;
u8 *data = buf;
@@ -145,10 +145,10 @@ static int optee_rng_init(struct hwrng *rng)
struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
struct tee_shm *entropy_shm_pool = NULL;
- entropy_shm_pool = tee_shm_alloc(pvt_data->ctx, MAX_ENTROPY_REQ_SZ,
- TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
+ entropy_shm_pool = tee_shm_alloc_kernel_buf(pvt_data->ctx,
+ MAX_ENTROPY_REQ_SZ);
if (IS_ERR(entropy_shm_pool)) {
- dev_err(pvt_data->dev, "tee_shm_alloc failed\n");
+ dev_err(pvt_data->dev, "tee_shm_alloc_kernel_buf failed\n");
return PTR_ERR(entropy_shm_pool);
}
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
index 8da1d7917bdc..429e956f34e1 100644
--- a/drivers/char/hw_random/powernv-rng.c
+++ b/drivers/char/hw_random/powernv-rng.c
@@ -23,7 +23,7 @@ static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait
buf = (unsigned long *)data;
for (i = 0; i < len; i++)
- powernv_get_random_long(buf++);
+ pnv_get_random_long(buf++);
return len * sizeof(unsigned long);
}
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
index 2beaa35c0d74..795853dfc46b 100644
--- a/drivers/char/hw_random/s390-trng.c
+++ b/drivers/char/hw_random/s390-trng.c
@@ -108,7 +108,6 @@ static ssize_t trng_counter_show(struct device *dev,
{
u64 dev_counter = atomic64_read(&trng_dev_counter);
u64 hwrng_counter = atomic64_read(&trng_hwrng_counter);
-#if IS_ENABLED(CONFIG_ARCH_RANDOM)
u64 arch_counter = atomic64_read(&s390_arch_random_counter);
return sysfs_emit(buf,
@@ -118,14 +117,6 @@ static ssize_t trng_counter_show(struct device *dev,
"total: %llu\n",
dev_counter, hwrng_counter, arch_counter,
dev_counter + hwrng_counter + arch_counter);
-#else
- return sysfs_emit(buf,
- "trng: %llu\n"
- "hwrng: %llu\n"
- "total: %llu\n",
- dev_counter, hwrng_counter,
- dev_counter + hwrng_counter);
-#endif
}
static DEVICE_ATTR(byte_counter, 0444, trng_counter_show, NULL);
@@ -261,5 +252,5 @@ static void __exit trng_exit(void)
trng_debug_exit();
}
-module_cpu_feature_match(MSA, trng_init);
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, trng_init);
module_exit(trng_exit);
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
deleted file mode 100644
index c8bd34e740fd..000000000000
--- a/drivers/char/hw_random/tx4939-rng.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * RNG driver for TX4939 Random Number Generators (RNG)
- *
- * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
-#include <linux/gfp.h>
-
-#define TX4939_RNG_RCSR 0x00000000
-#define TX4939_RNG_ROR(n) (0x00000018 + (n) * 8)
-
-#define TX4939_RNG_RCSR_INTE 0x00000008
-#define TX4939_RNG_RCSR_RST 0x00000004
-#define TX4939_RNG_RCSR_FIN 0x00000002
-#define TX4939_RNG_RCSR_ST 0x00000001
-
-struct tx4939_rng {
- struct hwrng rng;
- void __iomem *base;
- u64 databuf[3];
- unsigned int data_avail;
-};
-
-static void rng_io_start(void)
-{
-#ifndef CONFIG_64BIT
- /*
- * readq is reading a 64-bit register using a 64-bit load. On
- * a 32-bit kernel however interrupts or any other processor
- * exception would clobber the upper 32-bit of the processor
- * register so interrupts need to be disabled.
- */
- local_irq_disable();
-#endif
-}
-
-static void rng_io_end(void)
-{
-#ifndef CONFIG_64BIT
- local_irq_enable();
-#endif
-}
-
-static u64 read_rng(void __iomem *base, unsigned int offset)
-{
- return ____raw_readq(base + offset);
-}
-
-static void write_rng(u64 val, void __iomem *base, unsigned int offset)
-{
- return ____raw_writeq(val, base + offset);
-}
-
-static int tx4939_rng_data_present(struct hwrng *rng, int wait)
-{
- struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
- int i;
-
- if (rngdev->data_avail)
- return rngdev->data_avail;
- for (i = 0; i < 20; i++) {
- rng_io_start();
- if (!(read_rng(rngdev->base, TX4939_RNG_RCSR)
- & TX4939_RNG_RCSR_ST)) {
- rngdev->databuf[0] =
- read_rng(rngdev->base, TX4939_RNG_ROR(0));
- rngdev->databuf[1] =
- read_rng(rngdev->base, TX4939_RNG_ROR(1));
- rngdev->databuf[2] =
- read_rng(rngdev->base, TX4939_RNG_ROR(2));
- rngdev->data_avail =
- sizeof(rngdev->databuf) / sizeof(u32);
- /* Start RNG */
- write_rng(TX4939_RNG_RCSR_ST,
- rngdev->base, TX4939_RNG_RCSR);
- wait = 0;
- }
- rng_io_end();
- if (!wait)
- break;
- /* 90 bus clock cycles by default for generation */
- ndelay(90 * 5);
- }
- return rngdev->data_avail;
-}
-
-static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer)
-{
- struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
-
- rngdev->data_avail--;
- *buffer = *((u32 *)&rngdev->databuf + rngdev->data_avail);
- return sizeof(u32);
-}
-
-static int __init tx4939_rng_probe(struct platform_device *dev)
-{
- struct tx4939_rng *rngdev;
- int i;
-
- rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
- if (!rngdev)
- return -ENOMEM;
- rngdev->base = devm_platform_ioremap_resource(dev, 0);
- if (IS_ERR(rngdev->base))
- return PTR_ERR(rngdev->base);
-
- rngdev->rng.name = dev_name(&dev->dev);
- rngdev->rng.data_present = tx4939_rng_data_present;
- rngdev->rng.data_read = tx4939_rng_data_read;
-
- rng_io_start();
- /* Reset RNG */
- write_rng(TX4939_RNG_RCSR_RST, rngdev->base, TX4939_RNG_RCSR);
- write_rng(0, rngdev->base, TX4939_RNG_RCSR);
- /* Start RNG */
- write_rng(TX4939_RNG_RCSR_ST, rngdev->base, TX4939_RNG_RCSR);
- rng_io_end();
- /*
- * Drop first two results. From the datasheet:
- * The quality of the random numbers generated immediately
- * after reset can be insufficient. Therefore, do not use
- * random numbers obtained from the first and second
- * generations; use the ones from the third or subsequent
- * generation.
- */
- for (i = 0; i < 2; i++) {
- rngdev->data_avail = 0;
- if (!tx4939_rng_data_present(&rngdev->rng, 1))
- return -EIO;
- }
-
- platform_set_drvdata(dev, rngdev);
- return devm_hwrng_register(&dev->dev, &rngdev->rng);
-}
-
-static struct platform_driver tx4939_rng_driver = {
- .driver = {
- .name = "tx4939-rng",
- },
-};
-
-module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe);
-
-MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for TX4939");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 7444cc146e86..a9a0a3b09c8b 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -145,7 +145,7 @@ static int via_rng_init(struct hwrng *rng)
}
/* Control the RNG via MSR. Tread lightly and pay very close
- * close attention to values written, as the reserved fields
+ * attention to values written, as the reserved fields
* are documented to be "undefined and unpredictable"; but it
* does not say to write them as zero, so I make a guess that
* we restore the values we find in the register.
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 0a7dde135db1..a6f3a8a2aca6 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -159,6 +159,8 @@ static int probe_common(struct virtio_device *vdev)
goto err_find;
}
+ virtio_device_ready(vdev);
+
/* we always have a pending entropy request */
request_entropy(vi);
@@ -179,9 +181,9 @@ static void remove_common(struct virtio_device *vdev)
vi->data_avail = 0;
vi->data_idx = 0;
complete(&vi->have_data);
- vdev->config->reset(vdev);
if (vi->hwrng_register_done)
hwrng_unregister(&vi->hwrng);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
ida_simple_remove(&rng_index_ida, vi->index);
kfree(vi);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index b061e6b513ed..39565cf74b2c 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -119,13 +119,13 @@ config ASPEED_KCS_IPMI_BMC
provides the access of KCS IO space for BMC side.
config NPCM7XX_KCS_IPMI_BMC
- depends on ARCH_NPCM7XX || COMPILE_TEST
+ depends on ARCH_NPCM || COMPILE_TEST
select IPMI_KCS_BMC
select REGMAP_MMIO
- tristate "NPCM7xx KCS IPMI BMC driver"
+ tristate "NPCM KCS IPMI BMC driver"
help
Provides a driver for the KCS (Keyboard Controller Style) IPMI
- interface found on Nuvoton NPCM7xx SOCs.
+ interface found on Nuvoton NPCM SOCs.
The driver implements the BMC side of the KCS contorller, it
provides the access of KCS IO space for BMC side.
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 49b8f22fdcf0..a0e9e80d92ee 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -299,8 +299,7 @@ static int ipmb_slave_cb(struct i2c_client *client,
return 0;
}
-static int ipmb_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ipmb_probe(struct i2c_client *client)
{
struct ipmb_dev *ipmb_dev;
int ret;
@@ -342,14 +341,12 @@ static int ipmb_probe(struct i2c_client *client,
return 0;
}
-static int ipmb_remove(struct i2c_client *client)
+static void ipmb_remove(struct i2c_client *client)
{
struct ipmb_dev *ipmb_dev = i2c_get_clientdata(client);
i2c_slave_unregister(client);
misc_deregister(&ipmb_dev->miscdev);
-
- return 0;
}
static const struct i2c_device_id ipmb_id[] = {
@@ -369,7 +366,7 @@ static struct i2c_driver ipmb_driver = {
.name = "ipmb-dev",
.acpi_match_table = ACPI_PTR(acpi_ipmb_id),
},
- .probe = ipmb_probe,
+ .probe_new = ipmb_probe,
.remove = ipmb_remove,
.id_table = ipmb_id,
};
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index ba0c2d2c6bbe..7c1aee5e11b7 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -39,6 +39,7 @@ MODULE_PARM_DESC(max_retries, "Max resends of a command before timing out.");
struct ipmi_ipmb_dev {
struct ipmi_smi *intf;
struct i2c_client *client;
+ struct i2c_client *slave;
struct ipmi_smi_handlers handlers;
@@ -217,8 +218,8 @@ static void ipmi_ipmb_send_response(struct ipmi_ipmb_dev *iidev,
{
if ((msg->data[0] >> 2) & 1) {
/*
- * It's a response being sent, we needto return a
- * response response. Fake a send msg command
+ * It's a response being sent, we need to return a
+ * response to the response. Fake a send msg command
* response with channel 0. This will always be ipmb
* direct.
*/
@@ -257,7 +258,7 @@ static void ipmi_ipmb_format_for_xmit(struct ipmi_ipmb_dev *iidev,
memcpy(iidev->xmitmsg + 5, msg->data + 1, msg->data_size - 1);
iidev->xmitlen = msg->data_size + 4;
}
- iidev->xmitmsg[3] = iidev->client->addr << 1;
+ iidev->xmitmsg[3] = iidev->slave->addr << 1;
if (((msg->data[0] >> 2) & 1) == 0)
/* If it's a command, put in our own sequence number. */
iidev->xmitmsg[4] = ((iidev->xmitmsg[4] & 0x03) |
@@ -423,24 +424,33 @@ static void ipmi_ipmb_request_events(void *send_info)
/* We don't fetch events here. */
}
-static int ipmi_ipmb_remove(struct i2c_client *client)
+static void ipmi_ipmb_cleanup(struct ipmi_ipmb_dev *iidev)
{
- struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
-
- if (iidev->client) {
- iidev->client = NULL;
- i2c_slave_unregister(client);
+ if (iidev->slave) {
+ i2c_slave_unregister(iidev->slave);
+ if (iidev->slave != iidev->client)
+ i2c_unregister_device(iidev->slave);
}
+ iidev->slave = NULL;
+ iidev->client = NULL;
ipmi_ipmb_stop_thread(iidev);
+}
- return 0;
+static void ipmi_ipmb_remove(struct i2c_client *client)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+
+ ipmi_ipmb_cleanup(iidev);
+ ipmi_unregister_smi(iidev->intf);
}
-static int ipmi_ipmb_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ipmi_ipmb_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ipmi_ipmb_dev *iidev;
+ struct device_node *slave_np;
+ struct i2c_adapter *slave_adap = NULL;
+ struct i2c_client *slave = NULL;
int rv;
iidev = devm_kzalloc(&client->dev, sizeof(*iidev), GFP_KERNEL);
@@ -464,14 +474,46 @@ static int ipmi_ipmb_probe(struct i2c_client *client,
&iidev->max_retries) != 0)
iidev->max_retries = max_retries;
+ slave_np = of_parse_phandle(dev->of_node, "slave-dev", 0);
+ if (slave_np) {
+ slave_adap = of_get_i2c_adapter_by_node(slave_np);
+ of_node_put(slave_np);
+ if (!slave_adap) {
+ dev_notice(&client->dev,
+ "Could not find slave adapter\n");
+ return -EINVAL;
+ }
+ }
+
+ iidev->client = client;
+
+ if (slave_adap) {
+ struct i2c_board_info binfo;
+
+ memset(&binfo, 0, sizeof(binfo));
+ strscpy(binfo.type, "ipmb-slave", I2C_NAME_SIZE);
+ binfo.addr = client->addr;
+ binfo.flags = I2C_CLIENT_SLAVE;
+ slave = i2c_new_client_device(slave_adap, &binfo);
+ i2c_put_adapter(slave_adap);
+ if (IS_ERR(slave)) {
+ rv = PTR_ERR(slave);
+ dev_notice(&client->dev,
+ "Could not allocate slave device: %d\n", rv);
+ return rv;
+ }
+ i2c_set_clientdata(slave, iidev);
+ } else {
+ slave = client;
+ }
i2c_set_clientdata(client, iidev);
- client->flags |= I2C_CLIENT_SLAVE;
+ slave->flags |= I2C_CLIENT_SLAVE;
- rv = i2c_slave_register(client, ipmi_ipmb_slave_cb);
+ rv = i2c_slave_register(slave, ipmi_ipmb_slave_cb);
if (rv)
- return rv;
-
- iidev->client = client;
+ goto out_err;
+ iidev->slave = slave;
+ slave = NULL;
iidev->handlers.flags = IPMI_SMI_CAN_HANDLE_IPMB_DIRECT;
iidev->handlers.start_processing = ipmi_ipmb_start_processing;
@@ -502,7 +544,9 @@ static int ipmi_ipmb_probe(struct i2c_client *client,
return 0;
out_err:
- ipmi_ipmb_remove(client);
+ if (slave && slave != client)
+ i2c_unregister_device(slave);
+ ipmi_ipmb_cleanup(iidev);
return rv;
}
@@ -528,7 +572,7 @@ static struct i2c_driver ipmi_ipmb_driver = {
.name = DEVICE_NAME,
.of_match_table = of_ipmi_ipmb_match,
},
- .probe = ipmi_ipmb_probe,
+ .probe_new = ipmi_ipmb_probe,
.remove = ipmi_ipmb_remove,
.id_table = ipmi_ipmb_id,
};
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c837d5416e0e..49a1707693c9 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -11,8 +11,8 @@
* Copyright 2002 MontaVista Software Inc.
*/
-#define pr_fmt(fmt) "%s" fmt, "IPMI message handler: "
-#define dev_fmt pr_fmt
+#define pr_fmt(fmt) "IPMI message handler: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
#include <linux/module.h>
#include <linux/errno.h>
@@ -145,6 +145,18 @@ module_param(default_max_retries, uint, 0644);
MODULE_PARM_DESC(default_max_retries,
"The time (milliseconds) between retry sends in maintenance mode");
+/* The default maximum number of users that may register. */
+static unsigned int max_users = 30;
+module_param(max_users, uint, 0644);
+MODULE_PARM_DESC(max_users,
+ "The most users that may use the IPMI stack at one time.");
+
+/* The default maximum number of message a user may have outstanding. */
+static unsigned int max_msgs_per_user = 100;
+module_param(max_msgs_per_user, uint, 0644);
+MODULE_PARM_DESC(max_msgs_per_user,
+ "The most message a user may have outstanding.");
+
/* Call every ~1000 ms. */
#define IPMI_TIMEOUT_TIME 1000
@@ -187,6 +199,8 @@ struct ipmi_user {
/* Does this interface receive IPMI events? */
bool gets_events;
+ atomic_t nr_msgs;
+
/* Free must run in process context for RCU cleanup. */
struct work_struct remove_work;
};
@@ -442,6 +456,10 @@ struct ipmi_smi {
*/
struct list_head users;
struct srcu_struct users_srcu;
+ atomic_t nr_users;
+ struct device_attribute nr_users_devattr;
+ struct device_attribute nr_msgs_devattr;
+
/* Used for wake ups at startup. */
wait_queue_head_t waitq;
@@ -718,12 +736,6 @@ static void intf_free(struct kref *ref)
kfree(intf);
}
-struct watcher_entry {
- int intf_num;
- struct ipmi_smi *intf;
- struct list_head link;
-};
-
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
struct ipmi_smi *intf;
@@ -927,11 +939,13 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
* risk. At this moment, simply skip it in that case.
*/
ipmi_free_recv_msg(msg);
+ atomic_dec(&msg->user->nr_msgs);
} else {
int index;
struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
if (user) {
+ atomic_dec(&user->nr_msgs);
user->handler->ipmi_recv_hndl(msg, user->handler_data);
release_ipmi_user(user, index);
} else {
@@ -1230,6 +1244,11 @@ int ipmi_create_user(unsigned int if_num,
goto out_kfree;
found:
+ if (atomic_add_return(1, &intf->nr_users) > max_users) {
+ rv = -EBUSY;
+ goto out_kfree;
+ }
+
INIT_WORK(&new_user->remove_work, free_user_work);
rv = init_srcu_struct(&new_user->release_barrier);
@@ -1244,6 +1263,7 @@ int ipmi_create_user(unsigned int if_num,
/* Note that each existing user holds a refcount to the interface. */
kref_get(&intf->refcount);
+ atomic_set(&new_user->nr_msgs, 0);
kref_init(&new_user->refcount);
new_user->handler = handler;
new_user->handler_data = handler_data;
@@ -1262,6 +1282,7 @@ int ipmi_create_user(unsigned int if_num,
return 0;
out_kfree:
+ atomic_dec(&intf->nr_users);
srcu_read_unlock(&ipmi_interfaces_srcu, index);
vfree(new_user);
return rv;
@@ -1336,6 +1357,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
/* Remove the user from the interface's sequence table. */
spin_lock_irqsave(&intf->seq_lock, flags);
list_del_rcu(&user->link);
+ atomic_dec(&intf->nr_users);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
@@ -2284,6 +2306,14 @@ static int i_ipmi_request(struct ipmi_user *user,
struct ipmi_recv_msg *recv_msg;
int rv = 0;
+ if (user) {
+ if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+ /* Decrement will happen at the end of the routine. */
+ rv = -EBUSY;
+ goto out;
+ }
+ }
+
if (supplied_recv)
recv_msg = supplied_recv;
else {
@@ -2296,7 +2326,7 @@ static int i_ipmi_request(struct ipmi_user *user,
recv_msg->user_msg_data = user_msg_data;
if (supplied_smi)
- smi_msg = (struct ipmi_smi_msg *) supplied_smi;
+ smi_msg = supplied_smi;
else {
smi_msg = ipmi_alloc_smi_msg();
if (smi_msg == NULL) {
@@ -2348,13 +2378,16 @@ out_err:
ipmi_free_smi_msg(smi_msg);
ipmi_free_recv_msg(recv_msg);
} else {
- pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data);
+ dev_dbg(intf->si_dev, "Send: %*ph\n",
+ smi_msg->data_size, smi_msg->data);
smi_send(intf, intf->handlers, smi_msg, priority);
}
rcu_read_unlock();
out:
+ if (rv && user)
+ atomic_dec(&user->nr_msgs);
return rv;
}
@@ -3031,7 +3064,7 @@ cleanup_bmc_device(struct kref *ref)
* with removing the device attributes while reading a device
* attribute.
*/
- schedule_work(&bmc->remove_work);
+ queue_work(remove_work_wq, &bmc->remove_work);
}
/*
@@ -3471,6 +3504,36 @@ void ipmi_poll_interface(struct ipmi_user *user)
}
EXPORT_SYMBOL(ipmi_poll_interface);
+static ssize_t nr_users_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi, nr_users_devattr);
+
+ return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users));
+}
+static DEVICE_ATTR_RO(nr_users);
+
+static ssize_t nr_msgs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi, nr_msgs_devattr);
+ struct ipmi_user *user;
+ int index;
+ unsigned int count = 0;
+
+ index = srcu_read_lock(&intf->users_srcu);
+ list_for_each_entry_rcu(user, &intf->users, link)
+ count += atomic_read(&user->nr_msgs);
+ srcu_read_unlock(&intf->users_srcu, index);
+
+ return sysfs_emit(buf, "%u\n", count);
+}
+static DEVICE_ATTR_RO(nr_msgs);
+
static void redo_bmc_reg(struct work_struct *work)
{
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
@@ -3529,6 +3592,7 @@ int ipmi_add_smi(struct module *owner,
if (slave_addr != 0)
intf->addrinfo[0].address = slave_addr;
INIT_LIST_HEAD(&intf->users);
+ atomic_set(&intf->nr_users, 0);
intf->handlers = handlers;
intf->send_info = send_info;
spin_lock_init(&intf->seq_lock);
@@ -3592,6 +3656,20 @@ int ipmi_add_smi(struct module *owner,
if (rv)
goto out_err_bmc_reg;
+ intf->nr_users_devattr = dev_attr_nr_users;
+ sysfs_attr_init(&intf->nr_users_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->nr_users_devattr);
+ if (rv)
+ goto out_err_bmc_reg;
+
+ intf->nr_msgs_devattr = dev_attr_nr_msgs;
+ sysfs_attr_init(&intf->nr_msgs_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr);
+ if (rv) {
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+ goto out_err_bmc_reg;
+ }
+
/*
* Keep memory order straight for RCU readers. Make
* sure everything else is committed to memory before
@@ -3677,8 +3755,11 @@ static void cleanup_smi_msgs(struct ipmi_smi *intf)
void ipmi_unregister_smi(struct ipmi_smi *intf)
{
struct ipmi_smi_watcher *w;
- int intf_num = intf->intf_num, index;
+ int intf_num, index;
+ if (!intf)
+ return;
+ intf_num = intf->intf_num;
mutex_lock(&ipmi_interfaces_mutex);
intf->intf_num = -1;
intf->in_shutdown = true;
@@ -3688,6 +3769,9 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
/* At this point no users can be added to the interface. */
+ device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+
/*
* Call all the watcher interfaces to tell them that
* an interface is going away.
@@ -3836,7 +3920,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
msg->data[10] = ipmb_checksum(&msg->data[6], 4);
msg->data_size = 11;
- pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data);
+ dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
+ msg->data_size, msg->data);
rcu_read_lock();
if (!intf->in_shutdown) {
@@ -3989,10 +4074,10 @@ static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_ipmb_direct_addr *daddr;
- recv_msg = (struct ipmi_recv_msg *) msg->user_data;
+ recv_msg = msg->user_data;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
- "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
+ "IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
return 0;
}
@@ -4266,7 +4351,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
/*
* The message starts at byte 4 which follows the
- * the Channel Byte in the "GET MESSAGE" command
+ * Channel Byte in the "GET MESSAGE" command
*/
recv_msg->msg.data_len = msg->rsp_size - 4;
memcpy(recv_msg->msg_data, &msg->rsp[4],
@@ -4407,10 +4492,10 @@ static int handle_bmc_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_system_interface_addr *smi_addr;
- recv_msg = (struct ipmi_recv_msg *) msg->user_data;
+ recv_msg = msg->user_data;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
- "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
+ "IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
return 0;
}
@@ -4444,7 +4529,7 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
unsigned char cc;
bool is_cmd = !((msg->rsp[0] >> 2) & 1);
- pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
+ dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp);
if (msg->rsp_size < 2) {
/* Message is too small to be correct. */
@@ -4518,6 +4603,8 @@ return_unspecified:
} else
/* The message was sent, start the timer. */
intf_start_seq_timer(intf, msg->msgid);
+ requeue = 0;
+ goto out;
} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
|| (msg->rsp[1] != msg->data[1])) {
/*
@@ -4826,7 +4913,8 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
smi_msg->data_size = recv_msg->msg.data_len;
smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
- pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data);
+ dev_dbg(intf->si_dev, "Resend: %*ph\n",
+ smi_msg->data_size, smi_msg->data);
return smi_msg;
}
@@ -5392,22 +5480,27 @@ static int ipmi_init_msghandler(void)
if (initialized)
goto out;
- init_srcu_struct(&ipmi_interfaces_srcu);
-
- timer_setup(&ipmi_timer, ipmi_timeout, 0);
- mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
-
- atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+ rv = init_srcu_struct(&ipmi_interfaces_srcu);
+ if (rv)
+ goto out;
remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
if (!remove_work_wq) {
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
rv = -ENOMEM;
- goto out;
+ goto out_wq;
}
+ timer_setup(&ipmi_timer, ipmi_timeout, 0);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+
+ atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+
initialized = true;
+out_wq:
+ if (rv)
+ cleanup_srcu_struct(&ipmi_interfaces_srcu);
out:
mutex_unlock(&ipmi_interfaces_mutex);
return rv;
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index bc3a18daf97a..163ec9749e55 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -94,12 +94,8 @@ static void dummy_recv_free(struct ipmi_recv_msg *msg)
{
atomic_dec(&dummy_count);
}
-static struct ipmi_smi_msg halt_smi_msg = {
- .done = dummy_smi_free
-};
-static struct ipmi_recv_msg halt_recv_msg = {
- .done = dummy_recv_free
-};
+static struct ipmi_smi_msg halt_smi_msg = INIT_IPMI_SMI_MSG(dummy_smi_free);
+static struct ipmi_recv_msg halt_recv_msg = INIT_IPMI_RECV_MSG(dummy_recv_free);
/*
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 64dedb3ef8ec..6e357ad76f2e 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -264,15 +264,16 @@ static void cleanup_one_si(struct smi_info *smi_info);
static void cleanup_ipmi_si(void);
#ifdef DEBUG_TIMING
-void debug_timestamp(char *msg)
+void debug_timestamp(struct smi_info *smi_info, char *msg)
{
struct timespec64 t;
ktime_get_ts64(&t);
- pr_debug("**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec);
+ dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n",
+ msg, t.tv_sec, t.tv_nsec);
}
#else
-#define debug_timestamp(x)
+#define debug_timestamp(smi_info, x)
#endif
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
@@ -318,7 +319,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
smi_info->curr_msg = smi_info->waiting_msg;
smi_info->waiting_msg = NULL;
- debug_timestamp("Start2");
+ debug_timestamp(smi_info, "Start2");
err = atomic_notifier_call_chain(&xaction_notifier_list,
0, smi_info);
if (err & NOTIFY_STOP_MASK) {
@@ -538,7 +539,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg;
- debug_timestamp("Done");
+ debug_timestamp(smi_info, "Done");
switch (smi_info->si_state) {
case SI_NORMAL:
if (!smi_info->curr_msg)
@@ -901,7 +902,7 @@ static void sender(void *send_info,
struct smi_info *smi_info = send_info;
unsigned long flags;
- debug_timestamp("Enqueue");
+ debug_timestamp(smi_info, "Enqueue");
if (smi_info->run_to_completion) {
/*
@@ -1079,7 +1080,7 @@ static void smi_timeout(struct timer_list *t)
long timeout;
spin_lock_irqsave(&(smi_info->si_lock), flags);
- debug_timestamp("Timer");
+ debug_timestamp(smi_info, "Timer");
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
@@ -1128,7 +1129,7 @@ irqreturn_t ipmi_si_irq_handler(int irq, void *data)
smi_inc_stat(smi_info, interrupts);
- debug_timestamp("Interrupt");
+ debug_timestamp(smi_info, "Interrupt");
smi_event_handler(smi_info, 0);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
@@ -2220,10 +2221,7 @@ static void cleanup_one_si(struct smi_info *smi_info)
return;
list_del(&smi_info->link);
-
- if (smi_info->intf)
- ipmi_unregister_smi(smi_info->intf);
-
+ ipmi_unregister_smi(smi_info->intf);
kfree(smi_info);
}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 0c62e578749e..e1072809fe31 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -814,6 +814,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
break;
case SSIF_GETTING_EVENTS:
+ if (!msg) {
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "No message set while getting events\n");
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+ }
+
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
/* Error getting event, probably done. */
msg->done(msg);
@@ -838,6 +846,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
break;
case SSIF_GETTING_MESSAGES:
+ if (!msg) {
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "No message set while getting messages\n");
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ break;
+ }
+
if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
/* Error getting event, probably done. */
msg->done(msg);
@@ -861,6 +877,13 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
deliver_recv_msg(ssif_info, msg);
}
break;
+
+ default:
+ /* Should never happen, but just in case. */
+ dev_warn(&ssif_info->client->dev,
+ "Invalid state in message done handling: %d\n",
+ ssif_info->ssif_state);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
}
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
@@ -1053,7 +1076,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
static void sender(void *send_info,
struct ipmi_smi_msg *msg)
{
- struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+ struct ssif_info *ssif_info = send_info;
unsigned long oflags, *flags;
BUG_ON(ssif_info->waiting_msg);
@@ -1090,7 +1113,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
*/
static void request_events(void *send_info)
{
- struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+ struct ssif_info *ssif_info = send_info;
unsigned long oflags, *flags;
if (!ssif_info->has_event_buffer)
@@ -1107,7 +1130,7 @@ static void request_events(void *send_info)
*/
static void ssif_set_need_watch(void *send_info, unsigned int watch_mask)
{
- struct ssif_info *ssif_info = (struct ssif_info *) send_info;
+ struct ssif_info *ssif_info = send_info;
unsigned long oflags, *flags;
long timeout = 0;
@@ -1258,13 +1281,13 @@ static void shutdown_ssif(void *send_info)
}
}
-static int ssif_remove(struct i2c_client *client)
+static void ssif_remove(struct i2c_client *client)
{
struct ssif_info *ssif_info = i2c_get_clientdata(client);
struct ssif_addr_info *addr_info;
if (!ssif_info)
- return 0;
+ return;
/*
* After this point, we won't deliver anything asychronously
@@ -1280,8 +1303,6 @@ static int ssif_remove(struct i2c_client *client)
}
kfree(ssif_info);
-
- return 0;
}
static int read_response(struct i2c_client *client, unsigned char *resp)
@@ -1354,7 +1375,7 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
if (rv)
rv = -ENODEV;
else
- strlcpy(info->type, DEVICE_NAME, I2C_NAME_SIZE);
+ strscpy(info->type, DEVICE_NAME, I2C_NAME_SIZE);
kfree(resp);
return rv;
}
@@ -1619,13 +1640,13 @@ static int ssif_check_and_remove(struct i2c_client *client,
return 0;
}
-static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int ssif_probe(struct i2c_client *client)
{
unsigned char msg[3];
unsigned char *resp;
struct ssif_info *ssif_info;
int rv = 0;
- int len;
+ int len = 0;
int i;
u8 slave_addr = 0;
struct ssif_addr_info *addr_info = NULL;
@@ -1659,6 +1680,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
+ ssif_info->client = client;
+ i2c_set_clientdata(client, ssif_info);
+
rv = ssif_check_and_remove(client, ssif_info);
/* If rv is 0 and addr source is not SI_ACPI, continue probing */
if (!rv && ssif_info->addr_source == SI_ACPI) {
@@ -1679,9 +1703,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
ipmi_addr_src_to_str(ssif_info->addr_source),
client->addr, client->adapter->name, slave_addr);
- ssif_info->client = client;
- i2c_set_clientdata(client, ssif_info);
-
/* Now check for system interface capabilities */
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
@@ -1881,6 +1902,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
dev_err(&ssif_info->client->dev,
"Unable to start IPMI SSIF: %d\n", rv);
+ i2c_set_clientdata(client, NULL);
kfree(ssif_info);
}
kfree(resp);
@@ -2036,7 +2058,7 @@ static struct i2c_driver ssif_i2c_driver = {
.driver = {
.name = DEVICE_NAME
},
- .probe = ssif_probe,
+ .probe_new = ssif_probe,
.remove = ssif_remove,
.alert = ssif_alert,
.id_table = ssif_id,
@@ -2076,7 +2098,7 @@ static struct platform_driver ipmi_driver = {
.id_table = ssif_plat_ids
};
-static int init_ipmi_ssif(void)
+static int __init init_ipmi_ssif(void)
{
int i;
int rv;
@@ -2118,7 +2140,7 @@ static int init_ipmi_ssif(void)
}
module_init(init_ipmi_ssif);
-static void cleanup_ipmi_ssif(void)
+static void __exit cleanup_ipmi_ssif(void)
{
if (!initialized)
return;
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 883b4a341012..5b4e677929ca 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -354,12 +354,8 @@ static void msg_free_recv(struct ipmi_recv_msg *msg)
complete(&msg_wait);
}
}
-static struct ipmi_smi_msg smi_msg = {
- .done = msg_free_smi
-};
-static struct ipmi_recv_msg recv_msg = {
- .done = msg_free_recv
-};
+static struct ipmi_smi_msg smi_msg = INIT_IPMI_SMI_MSG(msg_free_smi);
+static struct ipmi_recv_msg recv_msg = INIT_IPMI_RECV_MSG(msg_free_recv);
static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
struct ipmi_recv_msg *recv_msg,
@@ -475,12 +471,10 @@ static void panic_recv_free(struct ipmi_recv_msg *msg)
atomic_dec(&panic_done_count);
}
-static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = {
- .done = panic_smi_free
-};
-static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = {
- .done = panic_recv_free
-};
+static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
+ INIT_IPMI_SMI_MSG(panic_smi_free);
+static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
+ INIT_IPMI_RECV_MSG(panic_recv_free);
static void panic_halt_ipmi_heartbeat(void)
{
@@ -516,12 +510,10 @@ static void panic_halt_ipmi_heartbeat(void)
atomic_sub(2, &panic_done_count);
}
-static struct ipmi_smi_msg panic_halt_smi_msg = {
- .done = panic_smi_free
-};
-static struct ipmi_recv_msg panic_halt_recv_msg = {
- .done = panic_recv_free
-};
+static struct ipmi_smi_msg panic_halt_smi_msg =
+ INIT_IPMI_SMI_MSG(panic_smi_free);
+static struct ipmi_recv_msg panic_halt_recv_msg =
+ INIT_IPMI_RECV_MSG(panic_recv_free);
/*
* Special call, doesn't claim any locks. This is only to be called
@@ -668,7 +660,7 @@ static int ipmi_heartbeat(void)
return rv;
}
-static struct watchdog_info ident = {
+static const struct watchdog_info ident = {
.options = 0, /* WDIOF_SETTIMEOUT, */
.firmware_version = 1,
.identity = "IPMI"
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index 92a37b33494c..19c32bf50e0e 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -128,11 +128,6 @@ struct aspeed_kcs_bmc {
} obe;
};
-struct aspeed_kcs_of_ops {
- int (*get_channel)(struct platform_device *pdev);
- int (*get_io_address)(struct platform_device *pdev, u32 addrs[2]);
-};
-
static inline struct aspeed_kcs_bmc *to_aspeed_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
{
return container_of(kcs_bmc, struct aspeed_kcs_bmc, kcs_bmc);
@@ -212,17 +207,24 @@ static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask,
}
/*
- * AST_usrGuide_KCS.pdf
- * 2. Background:
- * we note D for Data, and C for Cmd/Status, default rules are
- * A. KCS1 / KCS2 ( D / C:X / X+4 )
- * D / C : CA0h / CA4h
- * D / C : CA8h / CACh
- * B. KCS3 ( D / C:XX2h / XX3h )
- * D / C : CA2h / CA3h
- * D / C : CB2h / CB3h
- * C. KCS4
- * D / C : CA4h / CA5h
+ * We note D for Data, and C for Cmd/Status, default rules are
+ *
+ * 1. Only the D address is given:
+ * A. KCS1/KCS2 (D/C: X/X+4)
+ * D/C: CA0h/CA4h
+ * D/C: CA8h/CACh
+ * B. KCS3 (D/C: XX2/XX3h)
+ * D/C: CA2h/CA3h
+ * C. KCS4 (D/C: X/X+1)
+ * D/C: CA4h/CA5h
+ *
+ * 2. Both the D/C addresses are given:
+ * A. KCS1/KCS2/KCS4 (D/C: X/Y)
+ * D/C: CA0h/CA1h
+ * D/C: CA8h/CA9h
+ * D/C: CA4h/CA5h
+ * B. KCS3 (D/C: XX2/XX3h)
+ * D/C: CA2h/CA3h
*/
static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs)
{
@@ -475,38 +477,7 @@ static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = {
{ .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 },
};
-static int aspeed_kcs_of_v1_get_channel(struct platform_device *pdev)
-{
- struct device_node *np;
- u32 channel;
- int rc;
-
- np = pdev->dev.of_node;
-
- rc = of_property_read_u32(np, "kcs_chan", &channel);
- if ((rc != 0) || (channel == 0 || channel > KCS_CHANNEL_MAX)) {
- dev_err(&pdev->dev, "no valid 'kcs_chan' configured\n");
- return -EINVAL;
- }
-
- return channel;
-}
-
-static int
-aspeed_kcs_of_v1_get_io_address(struct platform_device *pdev, u32 addrs[2])
-{
- int rc;
-
- rc = of_property_read_u32(pdev->dev.of_node, "kcs_addr", addrs);
- if (rc || addrs[0] > 0xffff) {
- dev_err(&pdev->dev, "no valid 'kcs_addr' configured\n");
- return -EINVAL;
- }
-
- return 1;
-}
-
-static int aspeed_kcs_of_v2_get_channel(struct platform_device *pdev)
+static int aspeed_kcs_of_get_channel(struct platform_device *pdev)
{
struct device_node *np;
struct kcs_ioreg ioreg;
@@ -535,12 +506,11 @@ static int aspeed_kcs_of_v2_get_channel(struct platform_device *pdev)
if (!memcmp(&ast_kcs_bmc_ioregs[i], &ioreg, sizeof(ioreg)))
return i + 1;
}
-
return -EINVAL;
}
static int
-aspeed_kcs_of_v2_get_io_address(struct platform_device *pdev, u32 addrs[2])
+aspeed_kcs_of_get_io_address(struct platform_device *pdev, u32 addrs[2])
{
int rc;
@@ -567,7 +537,6 @@ aspeed_kcs_of_v2_get_io_address(struct platform_device *pdev, u32 addrs[2])
static int aspeed_kcs_probe(struct platform_device *pdev)
{
- const struct aspeed_kcs_of_ops *ops;
struct kcs_bmc_device *kcs_bmc;
struct aspeed_kcs_bmc *priv;
struct device_node *np;
@@ -585,15 +554,11 @@ static int aspeed_kcs_probe(struct platform_device *pdev)
return -ENODEV;
}
- ops = of_device_get_match_data(&pdev->dev);
- if (!ops)
- return -EINVAL;
-
- channel = ops->get_channel(pdev);
+ channel = aspeed_kcs_of_get_channel(pdev);
if (channel < 0)
return channel;
- nr_addrs = ops->get_io_address(pdev, addrs);
+ nr_addrs = aspeed_kcs_of_get_io_address(pdev, addrs);
if (nr_addrs < 0)
return nr_addrs;
@@ -678,21 +643,10 @@ static int aspeed_kcs_remove(struct platform_device *pdev)
return 0;
}
-static const struct aspeed_kcs_of_ops of_v1_ops = {
- .get_channel = aspeed_kcs_of_v1_get_channel,
- .get_io_address = aspeed_kcs_of_v1_get_io_address,
-};
-
-static const struct aspeed_kcs_of_ops of_v2_ops = {
- .get_channel = aspeed_kcs_of_v2_get_channel,
- .get_io_address = aspeed_kcs_of_v2_get_io_address,
-};
-
static const struct of_device_id ast_kcs_bmc_match[] = {
- { .compatible = "aspeed,ast2400-kcs-bmc", .data = &of_v1_ops },
- { .compatible = "aspeed,ast2500-kcs-bmc", .data = &of_v1_ops },
- { .compatible = "aspeed,ast2400-kcs-bmc-v2", .data = &of_v2_ops },
- { .compatible = "aspeed,ast2500-kcs-bmc-v2", .data = &of_v2_ops },
+ { .compatible = "aspeed,ast2400-kcs-bmc-v2" },
+ { .compatible = "aspeed,ast2500-kcs-bmc-v2" },
+ { .compatible = "aspeed,ast2600-kcs-bmc" },
{ }
};
MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
diff --git a/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
index 486834a962c3..cf670e891966 100644
--- a/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
+++ b/drivers/char/ipmi/kcs_bmc_cdev_ipmi.c
@@ -548,7 +548,7 @@ static struct kcs_bmc_driver kcs_bmc_ipmi_driver = {
.ops = &kcs_bmc_ipmi_driver_ops,
};
-static int kcs_bmc_ipmi_init(void)
+static int __init kcs_bmc_ipmi_init(void)
{
kcs_bmc_register_driver(&kcs_bmc_ipmi_driver);
@@ -556,7 +556,7 @@ static int kcs_bmc_ipmi_init(void)
}
module_init(kcs_bmc_ipmi_init);
-static void kcs_bmc_ipmi_exit(void)
+static void __exit kcs_bmc_ipmi_exit(void)
{
kcs_bmc_unregister_driver(&kcs_bmc_ipmi_driver);
}
diff --git a/drivers/char/ipmi/kcs_bmc_serio.c b/drivers/char/ipmi/kcs_bmc_serio.c
index 7e2067628a6c..1793358be782 100644
--- a/drivers/char/ipmi/kcs_bmc_serio.c
+++ b/drivers/char/ipmi/kcs_bmc_serio.c
@@ -140,7 +140,7 @@ static struct kcs_bmc_driver kcs_bmc_serio_driver = {
.ops = &kcs_bmc_serio_driver_ops,
};
-static int kcs_bmc_serio_init(void)
+static int __init kcs_bmc_serio_init(void)
{
kcs_bmc_register_driver(&kcs_bmc_serio_driver);
@@ -148,7 +148,7 @@ static int kcs_bmc_serio_init(void)
}
module_init(kcs_bmc_serio_init);
-static void kcs_bmc_serio_exit(void)
+static void __exit kcs_bmc_serio_exit(void)
{
kcs_bmc_unregister_driver(&kcs_bmc_serio_driver);
}
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 0e22e3b0a04e..38aad99ebb61 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -1019,7 +1019,7 @@ static struct parport_driver lp_driver = {
static int __init lp_init(void)
{
- int i, err = 0;
+ int i, err;
if (parport_nr[0] == LP_PARPORT_OFF)
return 0;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index cc296f0823bd..5611d127363e 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -101,7 +101,7 @@ static inline bool should_stop_iteration(void)
{
if (need_resched())
cond_resched();
- return fatal_signal_pending(current);
+ return signal_pending(current);
}
/*
@@ -480,6 +480,11 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
}
+static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ return 0;
+}
+
static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
{
size_t written = 0;
@@ -663,6 +668,7 @@ static const struct file_operations null_fops = {
.read_iter = read_iter_null,
.write_iter = write_iter_null,
.splice_write = splice_write_null,
+ .uring_cmd = uring_cmd_null,
};
static const struct file_operations __maybe_unused port_fops = {
@@ -706,8 +712,8 @@ static const struct memdev {
#endif
[5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT },
[7] = { "full", 0666, &full_fops, 0 },
- [8] = { "random", 0666, &random_fops, 0 },
- [9] = { "urandom", 0666, &urandom_fops, 0 },
+ [8] = { "random", 0666, &random_fops, FMODE_NOWAIT },
+ [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT },
#ifdef CONFIG_PRINTK
[11] = { "kmsg", 0644, &kmsg_fops, 0 },
#endif
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index ca5141ed5ef3..cba19bfdc44d 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -100,17 +100,18 @@ static const struct seq_operations misc_seq_ops = {
static int misc_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
- struct miscdevice *c;
+ struct miscdevice *c = NULL, *iter;
int err = -ENODEV;
const struct file_operations *new_fops = NULL;
mutex_lock(&misc_mtx);
- list_for_each_entry(c, &misc_list, list) {
- if (c->minor == minor) {
- new_fops = fops_get(c->fops);
- break;
- }
+ list_for_each_entry(iter, &misc_list, list) {
+ if (iter->minor != minor)
+ continue;
+ c = iter;
+ new_fops = fops_get(iter->fops);
+ break;
}
if (!new_fops) {
@@ -118,11 +119,12 @@ static int misc_open(struct inode *inode, struct file *file)
request_module("char-major-%d-%d", MISC_MAJOR, minor);
mutex_lock(&misc_mtx);
- list_for_each_entry(c, &misc_list, list) {
- if (c->minor == minor) {
- new_fops = fops_get(c->fops);
- break;
- }
+ list_for_each_entry(iter, &misc_list, list) {
+ if (iter->minor != minor)
+ continue;
+ c = iter;
+ new_fops = fops_get(iter->fops);
+ break;
}
if (!new_fops)
goto fail;
diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h
index 9ccb6b270b07..95164246afd1 100644
--- a/drivers/char/mwave/3780i.h
+++ b/drivers/char/mwave/3780i.h
@@ -68,7 +68,7 @@ typedef struct {
unsigned char ClockControl:1; /* RW: Clock control: 0=normal, 1=stop 3780i clocks */
unsigned char SoftReset:1; /* RW: Soft reset 0=normal, 1=soft reset active */
unsigned char ConfigMode:1; /* RW: Configuration mode, 0=normal, 1=config mode */
- unsigned char Reserved:5; /* 0: Reserved */
+ unsigned short Reserved:13; /* 0: Reserved */
} DSP_ISA_SLAVE_CONTROL;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 78baba55a8b5..b2735be81ab2 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -922,7 +922,7 @@ static void rx_ready_async(MGSLPC_INFO *info, int tcd)
// BIT7:parity error
// BIT6:framing error
- if (status & (BIT7 + BIT6)) {
+ if (status & (BIT7 | BIT6)) {
if (status & BIT7)
icount->parity++;
else
@@ -1418,7 +1418,11 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
-
+ if ((cflag & CSIZE) != CS8) {
+ cflag &= ~CSIZE;
+ cflag |= CS7;
+ tty->termios.c_cflag = cflag;
+ }
info->params.data_bits = tty_get_char_size(cflag);
if (cflag & CSTOPB)
@@ -1432,10 +1436,8 @@ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty)
info->params.parity = ASYNC_PARITY_ODD;
else
info->params.parity = ASYNC_PARITY_EVEN;
-#ifdef CMSPAR
if (cflag & CMSPAR)
info->params.parity = ASYNC_PARITY_SPACE;
-#endif
}
/* calculate number of jiffies to transmit a full
@@ -2272,7 +2274,8 @@ static int mgslpc_ioctl(struct tty_struct *tty,
* tty pointer to tty structure
* termios pointer to buffer to hold returned old termios
*/
-static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+static void mgslpc_set_termios(struct tty_struct *tty,
+ const struct ktermios *old_termios)
{
MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
unsigned long flags;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 605969ed0f96..69754155300e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,310 +1,26 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
- * random.c -- A strong random number generator
- *
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
- * Rights Reserved.
- *
+ * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
- *
- * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
- * rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, and the entire permission notice in its entirety,
- * including the disclaimer of warranties.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote
- * products derived from this software without specific prior
- * written permission.
- *
- * ALTERNATIVELY, this product may be distributed under the terms of
- * the GNU General Public License, in which case the provisions of the GPL are
- * required INSTEAD OF the above restrictions. (This clause is
- * necessary due to a potential bad interaction between the GPL and
- * the restrictions contained in a BSD-style copyright.)
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
- * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- */
-
-/*
- * (now, with legal B.S. out of the way.....)
- *
- * This routine gathers environmental noise from device drivers, etc.,
- * and returns good random numbers, suitable for cryptographic use.
- * Besides the obvious cryptographic uses, these numbers are also good
- * for seeding TCP sequence numbers, and other places where it is
- * desirable to have numbers which are not only random, but hard to
- * predict by an attacker.
- *
- * Theory of operation
- * ===================
- *
- * Computers are very predictable devices. Hence it is extremely hard
- * to produce truly random numbers on a computer --- as opposed to
- * pseudo-random numbers, which can easily generated by using a
- * algorithm. Unfortunately, it is very easy for attackers to guess
- * the sequence of pseudo-random number generators, and for some
- * applications this is not acceptable. So instead, we must try to
- * gather "environmental noise" from the computer's environment, which
- * must be hard for outside attackers to observe, and use that to
- * generate random numbers. In a Unix environment, this is best done
- * from inside the kernel.
- *
- * Sources of randomness from the environment include inter-keyboard
- * timings, inter-interrupt timings from some interrupts, and other
- * events which are both (a) non-deterministic and (b) hard for an
- * outside observer to measure. Randomness from these sources are
- * added to an "entropy pool", which is mixed using a CRC-like function.
- * This is not cryptographically strong, but it is adequate assuming
- * the randomness is not chosen maliciously, and it is fast enough that
- * the overhead of doing it on every interrupt is very reasonable.
- * As random bytes are mixed into the entropy pool, the routines keep
- * an *estimate* of how many bits of randomness have been stored into
- * the random number generator's internal state.
- *
- * When random bytes are desired, they are obtained by taking the SHA
- * hash of the contents of the "entropy pool". The SHA hash avoids
- * exposing the internal state of the entropy pool. It is believed to
- * be computationally infeasible to derive any useful information
- * about the input of SHA from its output. Even if it is possible to
- * analyze SHA in some clever way, as long as the amount of data
- * returned from the generator is less than the inherent entropy in
- * the pool, the output data is totally unpredictable. For this
- * reason, the routine decreases its internal estimate of how many
- * bits of "true randomness" are contained in the entropy pool as it
- * outputs random numbers.
- *
- * If this estimate goes to zero, the routine can still generate
- * random numbers; however, an attacker may (at least in theory) be
- * able to infer the future output of the generator from prior
- * outputs. This requires successful cryptanalysis of SHA, which is
- * not believed to be feasible, but there is a remote possibility.
- * Nonetheless, these numbers should be useful for the vast majority
- * of purposes.
- *
- * Exported interfaces ---- output
- * ===============================
- *
- * There are four exported interfaces; two for use within the kernel,
- * and two or use from userspace.
- *
- * Exported interfaces ---- userspace output
- * -----------------------------------------
- *
- * The userspace interfaces are two character devices /dev/random and
- * /dev/urandom. /dev/random is suitable for use when very high
- * quality randomness is desired (for example, for key generation or
- * one-time pads), as it will only return a maximum of the number of
- * bits of randomness (as estimated by the random number generator)
- * contained in the entropy pool.
- *
- * The /dev/urandom device does not have this limit, and will return
- * as many bytes as are requested. As more and more random bytes are
- * requested without giving time for the entropy pool to recharge,
- * this will result in random numbers that are merely cryptographically
- * strong. For many applications, however, this is acceptable.
- *
- * Exported interfaces ---- kernel output
- * --------------------------------------
- *
- * The primary kernel interface is
- *
- * void get_random_bytes(void *buf, int nbytes);
- *
- * This interface will return the requested number of random bytes,
- * and place it in the requested buffer. This is equivalent to a
- * read from /dev/urandom.
- *
- * For less critical applications, there are the functions:
- *
- * u32 get_random_u32()
- * u64 get_random_u64()
- * unsigned int get_random_int()
- * unsigned long get_random_long()
- *
- * These are produced by a cryptographic RNG seeded from get_random_bytes,
- * and so do not deplete the entropy pool as much. These are recommended
- * for most in-kernel operations *if the result is going to be stored in
- * the kernel*.
- *
- * Specifically, the get_random_int() family do not attempt to do
- * "anti-backtracking". If you capture the state of the kernel (e.g.
- * by snapshotting the VM), you can figure out previous get_random_int()
- * return values. But if the value is stored in the kernel anyway,
- * this is not a problem.
- *
- * It *is* safe to expose get_random_int() output to attackers (e.g. as
- * network cookies); given outputs 1..n, it's not feasible to predict
- * outputs 0 or n+1. The only concern is an attacker who breaks into
- * the kernel later; the get_random_int() engine is not reseeded as
- * often as the get_random_bytes() one.
- *
- * get_random_bytes() is needed for keys that need to stay secret after
- * they are erased from the kernel. For example, any key that will
- * be wrapped and stored encrypted. And session encryption keys: we'd
- * like to know that after the session is closed and the keys erased,
- * the plaintext is unrecoverable to someone who recorded the ciphertext.
- *
- * But for network ports/cookies, stack canaries, PRNG seeds, address
- * space layout randomization, session *authentication* keys, or other
- * applications where the sensitive data is stored in the kernel in
- * plaintext for as long as it's sensitive, the get_random_int() family
- * is just fine.
- *
- * Consider ASLR. We want to keep the address space secret from an
- * outside attacker while the process is running, but once the address
- * space is torn down, it's of no use to an attacker any more. And it's
- * stored in kernel data structures as long as it's alive, so worrying
- * about an attacker's ability to extrapolate it from the get_random_int()
- * CRNG is silly.
- *
- * Even some cryptographic keys are safe to generate with get_random_int().
- * In particular, keys for SipHash are generally fine. Here, knowledge
- * of the key authorizes you to do something to a kernel object (inject
- * packets to a network connection, or flood a hash table), and the
- * key is stored with the object being protected. Once it goes away,
- * we no longer care if anyone knows the key.
- *
- * prandom_u32()
- * -------------
- *
- * For even weaker applications, see the pseudorandom generator
- * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
- * numbers aren't security-critical at all, these are *far* cheaper.
- * Useful for self-tests, random error simulation, randomized backoffs,
- * and any other application where you trust that nobody is trying to
- * maliciously mess with you by guessing the "random" numbers.
- *
- * Exported interfaces ---- input
- * ==============================
- *
- * The current exported interfaces for gathering environmental noise
- * from the devices are:
- *
- * void add_device_randomness(const void *buf, unsigned int size);
- * void add_input_randomness(unsigned int type, unsigned int code,
- * unsigned int value);
- * void add_interrupt_randomness(int irq, int irq_flags);
- * void add_disk_randomness(struct gendisk *disk);
- *
- * add_device_randomness() is for adding data to the random pool that
- * is likely to differ between two devices (or possibly even per boot).
- * This would be things like MAC addresses or serial numbers, or the
- * read-out of the RTC. This does *not* add any actual entropy to the
- * pool, but it initializes the pool to different values for devices
- * that might otherwise be identical and have very little entropy
- * available to them (particularly common in the embedded world).
- *
- * add_input_randomness() uses the input layer interrupt timing, as well as
- * the event type information from the hardware.
- *
- * add_interrupt_randomness() uses the interrupt timing as random
- * inputs to the entropy pool. Using the cycle counters and the irq source
- * as inputs, it feeds the randomness roughly once a second.
- *
- * add_disk_randomness() uses what amounts to the seek time of block
- * layer request events, on a per-disk_devt basis, as input to the
- * entropy pool. Note that high-speed solid state drives with very low
- * seek times do not make for good sources of entropy, as their seek
- * times are usually fairly consistent.
- *
- * All of these routines try to estimate how many bits of randomness a
- * particular randomness source. They do this by keeping track of the
- * first and second order deltas of the event timings.
- *
- * Ensuring unpredictability at system startup
- * ============================================
- *
- * When any operating system starts up, it will go through a sequence
- * of actions that are fairly predictable by an adversary, especially
- * if the start-up does not involve interaction with a human operator.
- * This reduces the actual number of bits of unpredictability in the
- * entropy pool below the value in entropy_count. In order to
- * counteract this effect, it helps to carry information in the
- * entropy pool across shut-downs and start-ups. To do this, put the
- * following lines an appropriate script which is run during the boot
- * sequence:
- *
- * echo "Initializing random number generator..."
- * random_seed=/var/run/random-seed
- * # Carry a random seed from start-up to start-up
- * # Load and then save the whole entropy pool
- * if [ -f $random_seed ]; then
- * cat $random_seed >/dev/urandom
- * else
- * touch $random_seed
- * fi
- * chmod 600 $random_seed
- * dd if=/dev/urandom of=$random_seed count=1 bs=512
- *
- * and the following lines in an appropriate script which is run as
- * the system is shutdown:
- *
- * # Carry a random seed from shut-down to start-up
- * # Save the whole entropy pool
- * echo "Saving random seed..."
- * random_seed=/var/run/random-seed
- * touch $random_seed
- * chmod 600 $random_seed
- * dd if=/dev/urandom of=$random_seed count=1 bs=512
- *
- * For example, on most modern systems using the System V init
- * scripts, such code fragments would be found in
- * /etc/rc.d/init.d/random. On older Linux systems, the correct script
- * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
- *
- * Effectively, these commands cause the contents of the entropy pool
- * to be saved at shut-down time and reloaded into the entropy pool at
- * start-up. (The 'dd' in the addition to the bootup script is to
- * make sure that /etc/random-seed is different for every start-up,
- * even if the system crashes without executing rc.0.) Even with
- * complete knowledge of the start-up activities, predicting the state
- * of the entropy pool requires knowledge of the previous history of
- * the system.
- *
- * Configuring the /dev/random driver under Linux
- * ==============================================
- *
- * The /dev/random driver under Linux uses minor numbers 8 and 9 of
- * the /dev/mem major number (#1). So if your system does not have
- * /dev/random and /dev/urandom created already, they can be created
- * by using the commands:
- *
- * mknod /dev/random c 1 8
- * mknod /dev/urandom c 1 9
- *
- * Acknowledgements:
- * =================
- *
- * Ideas for constructing this random number generator were derived
- * from Pretty Good Privacy's random number generator, and from private
- * discussions with Phil Karn. Colin Plumb provided a faster random
- * number generator, which speed up the mixing function of the entropy
- * pool, taken from PGPfone. Dale Worley has also contributed many
- * useful ideas and suggestions to improve this driver.
- *
- * Any flaws in the design are solely my responsibility, and should
- * not be attributed to the Phil, Colin, or any of authors of PGP.
- *
- * Further background information on this topic may be obtained from
- * RFC 1750, "Randomness Recommendations for Security", by Donald
- * Eastlake, Steve Crocker, and Jeff Schiller.
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
+ *
+ * This driver produces cryptographically secure pseudorandom data. It is divided
+ * into roughly six sections, each with a section header:
+ *
+ * - Initialization and readiness waiting.
+ * - Fast key erasure RNG, the "crng".
+ * - Entropy accumulation and extraction routines.
+ * - Entropy collection routines.
+ * - Userspace reader/writer interfaces.
+ * - Sysctl interface.
+ *
+ * The high level overview is that there is one input pool, into which
+ * various pieces of data are hashed. Prior to initialization, some of that
+ * data is then "credited" as having a certain number of bits of entropy.
+ * When enough bits of entropy are available, the hash is finalized and
+ * handed as a key to a stream cipher that expands it indefinitely for
+ * various consumers. This key is periodically refreshed as the various
+ * entropy collectors, described below, add data to the input pool.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -320,14 +36,13 @@
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
-#include <linux/genhd.h>
+#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/nodemask.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
-#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
@@ -335,773 +50,803 @@
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
+#include <linux/uaccess.h>
+#include <linux/suspend.h>
+#include <linux/siphash.h>
#include <crypto/chacha.h>
-#include <crypto/sha1.h>
-
+#include <crypto/blake2s.h>
#include <asm/processor.h>
-#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/io.h>
-#define CREATE_TRACE_POINTS
-#include <trace/events/random.h>
-
-/* #define ADD_INTERRUPT_BENCH */
+/*********************************************************************
+ *
+ * Initialization and readiness waiting.
+ *
+ * Much of the RNG infrastructure is devoted to various dependencies
+ * being able to wait until the RNG has collected enough entropy and
+ * is ready for safe consumption.
+ *
+ *********************************************************************/
/*
- * Configuration information
+ * crng_init is protected by base_crng->lock, and only increases
+ * its value (from empty->early->ready).
*/
-#define INPUT_POOL_SHIFT 12
-#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
-#define OUTPUT_POOL_SHIFT 10
-#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
-#define EXTRACT_SIZE 10
-
+static enum {
+ CRNG_EMPTY = 0, /* Little to no entropy collected */
+ CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
+ CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
+} crng_init __read_mostly = CRNG_EMPTY;
+static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
+#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
+/* Various types of waiters for crng_init->CRNG_READY transition. */
+static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+static struct fasync_struct *fasync;
-#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
+/* Control how we warn userspace. */
+static struct ratelimit_state urandom_warning =
+ RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
+static int ratelimit_disable __read_mostly =
+ IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
/*
- * To allow fractional bits to be tracked, the entropy_count field is
- * denominated in units of 1/8th bits.
+ * Returns whether or not the input pool has been seeded and thus guaranteed
+ * to supply cryptographically secure random numbers. This applies to: the
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
+ * u16,u32,u64,long} family of functions.
*
- * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
- * credit_entropy_bits() needs to be 64 bits wide.
+ * Returns: true if the input pool has been seeded.
+ * false if the input pool has not been seeded.
*/
-#define ENTROPY_SHIFT 3
-#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
+bool rng_is_initialized(void)
+{
+ return crng_ready();
+}
+EXPORT_SYMBOL(rng_is_initialized);
-/*
- * If the entropy count falls under this number of bits, then we
- * should wake up processes which are selecting or polling on write
- * access to /dev/random.
- */
-static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
+static void __cold crng_set_ready(struct work_struct *work)
+{
+ static_branch_enable(&crng_is_ready);
+}
-/*
- * Originally, we used a primitive polynomial of degree .poolwords
- * over GF(2). The taps for various sizes are defined below. They
- * were chosen to be evenly spaced except for the last tap, which is 1
- * to get the twisting happening as fast as possible.
- *
- * For the purposes of better mixing, we use the CRC-32 polynomial as
- * well to make a (modified) twisted Generalized Feedback Shift
- * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
- * generators. ACM Transactions on Modeling and Computer Simulation
- * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
- * GFSR generators II. ACM Transactions on Modeling and Computer
- * Simulation 4:254-266)
- *
- * Thanks to Colin Plumb for suggesting this.
- *
- * The mixing operation is much less sensitive than the output hash,
- * where we use SHA-1. All that we want of mixing operation is that
- * it be a good non-cryptographic hash; i.e. it not produce collisions
- * when fed "random" data of the sort we expect to see. As long as
- * the pool state differs for different inputs, we have preserved the
- * input entropy and done a good job. The fact that an intelligent
- * attacker can construct inputs that will produce controlled
- * alterations to the pool's state is not important because we don't
- * consider such inputs to contribute any randomness. The only
- * property we need with respect to them is that the attacker can't
- * increase his/her knowledge of the pool's state. Since all
- * additions are reversible (knowing the final state and the input,
- * you can reconstruct the initial state), if an attacker has any
- * uncertainty about the initial state, he/she can only shuffle that
- * uncertainty about, but never cause any collisions (which would
- * decrease the uncertainty).
- *
- * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
- * Videau in their paper, "The Linux Pseudorandom Number Generator
- * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
- * paper, they point out that we are not using a true Twisted GFSR,
- * since Matsumoto & Kurita used a trinomial feedback polynomial (that
- * is, with only three taps, instead of the six that we are using).
- * As a result, the resulting polynomial is neither primitive nor
- * irreducible, and hence does not have a maximal period over
- * GF(2**32). They suggest a slight change to the generator
- * polynomial which improves the resulting TGFSR polynomial to be
- * irreducible, which we have made here.
- */
-static const struct poolinfo {
- int poolbitshift, poolwords, poolbytes, poolfracbits;
-#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
- int tap1, tap2, tap3, tap4, tap5;
-} poolinfo_table[] = {
- /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
- /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
- { S(128), 104, 76, 51, 25, 1 },
-};
+/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
+static void try_to_generate_entropy(void);
/*
- * Static global variables
+ * Wait for the input pool to be seeded and thus guaranteed to supply
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
+ * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
+ * int,long} family of functions. Using any of these functions without first
+ * calling this function forfeits the guarantee of security.
+ *
+ * Returns: 0 if the input pool has been seeded.
+ * -ERESTARTSYS if the function was interrupted by a signal.
*/
-static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
-static struct fasync_struct *fasync;
+int wait_for_random_bytes(void)
+{
+ while (!crng_ready()) {
+ int ret;
-static DEFINE_SPINLOCK(random_ready_list_lock);
-static LIST_HEAD(random_ready_list);
+ try_to_generate_entropy();
+ ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
+ if (ret)
+ return ret > 0 ? 0 : ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
-struct crng_state {
- __u32 state[16];
- unsigned long init_time;
- spinlock_t lock;
-};
+#define warn_unseeded_randomness() \
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
+ __func__, (void *)_RET_IP_, crng_init)
-static struct crng_state primary_crng = {
- .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
-};
-/*
- * crng_init = 0 --> Uninitialized
- * 1 --> Initialized
- * 2 --> Initialized from input_pool
+/*********************************************************************
*
- * crng_init is protected by primary_crng->lock, and only increases
- * its value (from 0->1->2).
- */
-static int crng_init = 0;
-#define crng_ready() (likely(crng_init > 1))
-static int crng_init_cnt = 0;
-static unsigned long crng_global_init_time = 0;
-#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
-static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
-static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used);
-static void process_random_ready_list(void);
-static void _get_random_bytes(void *buf, int nbytes);
-
-static struct ratelimit_state unseeded_warning =
- RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
-static struct ratelimit_state urandom_warning =
- RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
-
-static int ratelimit_disable __read_mostly;
-
-module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
-MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
-
-/**********************************************************************
+ * Fast key erasure RNG, the "crng".
*
- * OS independent entropy store. Here are the functions which handle
- * storing entropy in an entropy pool.
+ * These functions expand entropy from the entropy extractor into
+ * long streams for external consumption using the "fast key erasure"
+ * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
*
- **********************************************************************/
+ * There are a few exported interfaces for use by other drivers:
+ *
+ * void get_random_bytes(void *buf, size_t len)
+ * u8 get_random_u8()
+ * u16 get_random_u16()
+ * u32 get_random_u32()
+ * u64 get_random_u64()
+ * unsigned long get_random_long()
+ *
+ * These interfaces will return the requested number of random bytes
+ * into the given buffer or as a return value. This is equivalent to
+ * a read from /dev/urandom. The u8, u16, u32, u64, long family of
+ * functions may be higher performance for one-off random integers,
+ * because they do a bit of buffering and do not invoke reseeding
+ * until the buffer is emptied.
+ *
+ *********************************************************************/
-struct entropy_store;
-struct entropy_store {
- /* read-only data: */
- const struct poolinfo *poolinfo;
- __u32 *pool;
- const char *name;
+enum {
+ CRNG_RESEED_START_INTERVAL = HZ,
+ CRNG_RESEED_INTERVAL = 60 * HZ
+};
- /* read-write data: */
+static struct {
+ u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
+ unsigned long birth;
+ unsigned long generation;
spinlock_t lock;
- unsigned short add_ptr;
- unsigned short input_rotate;
- int entropy_count;
- unsigned int last_data_init:1;
- __u8 last_data[EXTRACT_SIZE];
+} base_crng = {
+ .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
};
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int rsvd);
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips);
-
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
-static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
-
-static struct entropy_store input_pool = {
- .poolinfo = &poolinfo_table[0],
- .name = "input",
- .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
- .pool = input_pool_data
+struct crng {
+ u8 key[CHACHA_KEY_SIZE];
+ unsigned long generation;
+ local_lock_t lock;
};
-static __u32 const twist_table[8] = {
- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
-
-/*
- * This function adds bytes into the entropy "pool". It does not
- * update the entropy estimate. The caller should call
- * credit_entropy_bits if this is appropriate.
- *
- * The pool is stirred with a primitive polynomial of the appropriate
- * degree, and then twisted. We twist by three bits at a time because
- * it's cheap to do so and helps slightly in the expected case where
- * the entropy is concentrated in the low-order bits.
- */
-static void _mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
-{
- unsigned long i, tap1, tap2, tap3, tap4, tap5;
- int input_rotate;
- int wordmask = r->poolinfo->poolwords - 1;
- const char *bytes = in;
- __u32 w;
-
- tap1 = r->poolinfo->tap1;
- tap2 = r->poolinfo->tap2;
- tap3 = r->poolinfo->tap3;
- tap4 = r->poolinfo->tap4;
- tap5 = r->poolinfo->tap5;
-
- input_rotate = r->input_rotate;
- i = r->add_ptr;
-
- /* mix one byte at a time to simplify size handling and churn faster */
- while (nbytes--) {
- w = rol32(*bytes++, input_rotate);
- i = (i - 1) & wordmask;
-
- /* XOR in the various taps */
- w ^= r->pool[i];
- w ^= r->pool[(i + tap1) & wordmask];
- w ^= r->pool[(i + tap2) & wordmask];
- w ^= r->pool[(i + tap3) & wordmask];
- w ^= r->pool[(i + tap4) & wordmask];
- w ^= r->pool[(i + tap5) & wordmask];
-
- /* Mix the result back in with a twist */
- r->pool[i] = (w >> 3) ^ twist_table[w & 7];
-
- /*
- * Normally, we add 7 bits of rotation to the pool.
- * At the beginning of the pool, add an extra 7 bits
- * rotation, so that successive passes spread the
- * input bits across the pool evenly.
- */
- input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
- }
-
- r->input_rotate = input_rotate;
- r->add_ptr = i;
-}
+static DEFINE_PER_CPU(struct crng, crngs) = {
+ .generation = ULONG_MAX,
+ .lock = INIT_LOCAL_LOCK(crngs.lock),
+};
-static void __mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
-{
- trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
- _mix_pool_bytes(r, in, nbytes);
-}
+/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
+static void extract_entropy(void *buf, size_t len);
-static void mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+/* This extracts a new crng key from the input pool. */
+static void crng_reseed(void)
{
unsigned long flags;
+ unsigned long next_gen;
+ u8 key[CHACHA_KEY_SIZE];
- trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
- spin_lock_irqsave(&r->lock, flags);
- _mix_pool_bytes(r, in, nbytes);
- spin_unlock_irqrestore(&r->lock, flags);
-}
+ extract_entropy(key, sizeof(key));
-struct fast_pool {
- __u32 pool[4];
- unsigned long last;
- unsigned short reg_idx;
- unsigned char count;
-};
+ /*
+ * We copy the new key into the base_crng, overwriting the old one,
+ * and update the generation counter. We avoid hitting ULONG_MAX,
+ * because the per-cpu crngs are initialized to ULONG_MAX, so this
+ * forces new CPUs that come online to always initialize.
+ */
+ spin_lock_irqsave(&base_crng.lock, flags);
+ memcpy(base_crng.key, key, sizeof(base_crng.key));
+ next_gen = base_crng.generation + 1;
+ if (next_gen == ULONG_MAX)
+ ++next_gen;
+ WRITE_ONCE(base_crng.generation, next_gen);
+ WRITE_ONCE(base_crng.birth, jiffies);
+ if (!static_branch_likely(&crng_is_ready))
+ crng_init = CRNG_READY;
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ memzero_explicit(key, sizeof(key));
+}
/*
- * This is a fast mixing routine used by the interrupt randomness
- * collector. It's hardcoded for an 128 bit pool and assumes that any
- * locks that might be needed are taken by the caller.
+ * This generates a ChaCha block using the provided key, and then
+ * immediately overwrites that key with half the block. It returns
+ * the resultant ChaCha state to the user, along with the second
+ * half of the block containing 32 bytes of random data that may
+ * be used; random_data_len may not be greater than 32.
+ *
+ * The returned ChaCha state contains within it a copy of the old
+ * key value, at index 4, so the state should always be zeroed out
+ * immediately after using in order to maintain forward secrecy.
+ * If the state cannot be erased in a timely manner, then it is
+ * safer to set the random_data parameter to &chacha_state[4] so
+ * that this function overwrites it before returning.
*/
-static void fast_mix(struct fast_pool *f)
+static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
+ u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
{
- __u32 a = f->pool[0], b = f->pool[1];
- __u32 c = f->pool[2], d = f->pool[3];
+ u8 first_block[CHACHA_BLOCK_SIZE];
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
+ BUG_ON(random_data_len > 32);
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
+ chacha_init_consts(chacha_state);
+ memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
+ memset(&chacha_state[12], 0, sizeof(u32) * 4);
+ chacha20_block(chacha_state, first_block);
- a += b; c += d;
- b = rol32(b, 6); d = rol32(d, 27);
- d ^= a; b ^= c;
-
- a += b; c += d;
- b = rol32(b, 16); d = rol32(d, 14);
- d ^= a; b ^= c;
-
- f->pool[0] = a; f->pool[1] = b;
- f->pool[2] = c; f->pool[3] = d;
- f->count++;
+ memcpy(key, first_block, CHACHA_KEY_SIZE);
+ memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
+ memzero_explicit(first_block, sizeof(first_block));
}
-static void process_random_ready_list(void)
+/*
+ * Return the interval until the next reseeding, which is normally
+ * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
+ * proportional to the uptime.
+ */
+static unsigned int crng_reseed_interval(void)
{
- unsigned long flags;
- struct random_ready_callback *rdy, *tmp;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
- struct module *owner = rdy->owner;
-
- list_del_init(&rdy->list);
- rdy->func(rdy);
- module_put(owner);
+ static bool early_boot = true;
+
+ if (unlikely(READ_ONCE(early_boot))) {
+ time64_t uptime = ktime_get_seconds();
+ if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
+ WRITE_ONCE(early_boot, false);
+ else
+ return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
+ (unsigned int)uptime / 2 * HZ);
}
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
+ return CRNG_RESEED_INTERVAL;
}
/*
- * Credit (or debit) the entropy store with n bits of entropy.
- * Use credit_entropy_bits_safe() if the value comes from userspace
- * or otherwise should be checked for extreme values.
+ * This function returns a ChaCha state that you may use for generating
+ * random data. It also returns up to 32 bytes on its own of random data
+ * that may be used; random_data_len may not be greater than 32.
*/
-static void credit_entropy_bits(struct entropy_store *r, int nbits)
+static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
+ u8 *random_data, size_t random_data_len)
{
- int entropy_count, orig;
- const int pool_size = r->poolinfo->poolfracbits;
- int nfrac = nbits << ENTROPY_SHIFT;
+ unsigned long flags;
+ struct crng *crng;
- if (!nbits)
- return;
+ BUG_ON(random_data_len > 32);
-retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
- if (nfrac < 0) {
- /* Debit */
- entropy_count += nfrac;
- } else {
- /*
- * Credit: we have to account for the possibility of
- * overwriting already present entropy. Even in the
- * ideal case of pure Shannon entropy, new contributions
- * approach the full value asymptotically:
- *
- * entropy <- entropy + (pool_size - entropy) *
- * (1 - exp(-add_entropy/pool_size))
- *
- * For add_entropy <= pool_size/2 then
- * (1 - exp(-add_entropy/pool_size)) >=
- * (add_entropy/pool_size)*0.7869...
- * so we can approximate the exponential with
- * 3/4*add_entropy/pool_size and still be on the
- * safe side by adding at most pool_size/2 at a time.
- *
- * The use of pool_size-2 in the while statement is to
- * prevent rounding artifacts from making the loop
- * arbitrarily long; this limits the loop to log2(pool_size)*2
- * turns no matter how large nbits is.
- */
- int pnfrac = nfrac;
- const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
- /* The +2 corresponds to the /4 in the denominator */
-
- do {
- unsigned int anfrac = min(pnfrac, pool_size/2);
- unsigned int add =
- ((pool_size - entropy_count)*anfrac*3) >> s;
-
- entropy_count += add;
- pnfrac -= anfrac;
- } while (unlikely(entropy_count < pool_size-2 && pnfrac));
+ /*
+ * For the fast path, we check whether we're ready, unlocked first, and
+ * then re-check once locked later. In the case where we're really not
+ * ready, we do fast key erasure with the base_crng directly, extracting
+ * when crng_init is CRNG_EMPTY.
+ */
+ if (!crng_ready()) {
+ bool ready;
+
+ spin_lock_irqsave(&base_crng.lock, flags);
+ ready = crng_ready();
+ if (!ready) {
+ if (crng_init == CRNG_EMPTY)
+ extract_entropy(base_crng.key, sizeof(base_crng.key));
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ random_data, random_data_len);
+ }
+ spin_unlock_irqrestore(&base_crng.lock, flags);
+ if (!ready)
+ return;
}
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy/overflow: pool %s count %d\n",
- r->name, entropy_count);
- entropy_count = 0;
- } else if (entropy_count > pool_size)
- entropy_count = pool_size;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
-
- trace_credit_entropy_bits(r->name, nbits,
- entropy_count >> ENTROPY_SHIFT, _RET_IP_);
+ /*
+ * If the base_crng is old enough, we reseed, which in turn bumps the
+ * generation counter that we check below.
+ */
+ if (unlikely(time_is_before_jiffies(READ_ONCE(base_crng.birth) + crng_reseed_interval())))
+ crng_reseed();
- if (r == &input_pool) {
- int entropy_bits = entropy_count >> ENTROPY_SHIFT;
+ local_lock_irqsave(&crngs.lock, flags);
+ crng = raw_cpu_ptr(&crngs);
- if (crng_init < 2 && entropy_bits >= 128)
- crng_reseed(&primary_crng, r);
+ /*
+ * If our per-cpu crng is older than the base_crng, then it means
+ * somebody reseeded the base_crng. In that case, we do fast key
+ * erasure on the base_crng, and use its output as the new key
+ * for our per-cpu crng. This brings us up to date with base_crng.
+ */
+ if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
+ spin_lock(&base_crng.lock);
+ crng_fast_key_erasure(base_crng.key, chacha_state,
+ crng->key, sizeof(crng->key));
+ crng->generation = base_crng.generation;
+ spin_unlock(&base_crng.lock);
}
+
+ /*
+ * Finally, when we've made it this far, our per-cpu crng has an up
+ * to date key, and we can do fast key erasure with it to produce
+ * some random data and a ChaCha state for the caller. All other
+ * branches of this function are "unlikely", so most of the time we
+ * should wind up here immediately.
+ */
+ crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
+ local_unlock_irqrestore(&crngs.lock, flags);
}
-static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+static void _get_random_bytes(void *buf, size_t len)
{
- const int nbits_max = r->poolinfo->poolwords * 32;
-
- if (nbits < 0)
- return -EINVAL;
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 tmp[CHACHA_BLOCK_SIZE];
+ size_t first_block_len;
- /* Cap the value to avoid overflows */
- nbits = min(nbits, nbits_max);
+ if (!len)
+ return;
- credit_entropy_bits(r, nbits);
- return 0;
-}
+ first_block_len = min_t(size_t, 32, len);
+ crng_make_state(chacha_state, buf, first_block_len);
+ len -= first_block_len;
+ buf += first_block_len;
-/*********************************************************************
- *
- * CRNG using CHACHA20
- *
- *********************************************************************/
+ while (len) {
+ if (len < CHACHA_BLOCK_SIZE) {
+ chacha20_block(chacha_state, tmp);
+ memcpy(buf, tmp, len);
+ memzero_explicit(tmp, sizeof(tmp));
+ break;
+ }
-#define CRNG_RESEED_INTERVAL (300*HZ)
+ chacha20_block(chacha_state, buf);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
+ len -= CHACHA_BLOCK_SIZE;
+ buf += CHACHA_BLOCK_SIZE;
+ }
-static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+}
-#ifdef CONFIG_NUMA
/*
- * Hack to deal with crazy userspace progams when they are all trying
- * to access /dev/urandom in parallel. The programs are almost
- * certainly doing something terribly wrong, but we'll work around
- * their brain damage.
+ * This function is the exported kernel interface. It returns some number of
+ * good random numbers, suitable for key generation, seeding TCP sequence
+ * numbers, etc. In order to ensure that the randomness returned by this
+ * function is okay, the function wait_for_random_bytes() should be called and
+ * return 0 at least once at any point prior.
*/
-static struct crng_state **crng_node_pool __read_mostly;
-#endif
-
-static void invalidate_batched_entropy(void);
-static void numa_crng_init(void);
-
-static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
-static int __init parse_trust_cpu(char *arg)
+void get_random_bytes(void *buf, size_t len)
{
- return kstrtobool(arg, &trust_cpu);
+ warn_unseeded_randomness();
+ _get_random_bytes(buf, len);
}
-early_param("random.trust_cpu", parse_trust_cpu);
+EXPORT_SYMBOL(get_random_bytes);
-static bool crng_init_try_arch(struct crng_state *crng)
+static ssize_t get_random_bytes_user(struct iov_iter *iter)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
-
- for (i = 4; i < 16; i++) {
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv)) {
- rv = random_get_entropy();
- arch_init = false;
- }
- crng->state[i] ^= rv;
+ u32 chacha_state[CHACHA_STATE_WORDS];
+ u8 block[CHACHA_BLOCK_SIZE];
+ size_t ret = 0, copied;
+
+ if (unlikely(!iov_iter_count(iter)))
+ return 0;
+
+ /*
+ * Immediately overwrite the ChaCha key at index 4 with random
+ * bytes, in case userspace causes copy_to_iter() below to sleep
+ * forever, so that we still retain forward secrecy in that case.
+ */
+ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+ /*
+ * However, if we're doing a read of len <= 32, we don't need to
+ * use chacha_state after, so we can simply return those bytes to
+ * the user directly.
+ */
+ if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
+ ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
+ goto out_zero_chacha;
}
- return arch_init;
-}
+ for (;;) {
+ chacha20_block(chacha_state, block);
+ if (unlikely(chacha_state[12] == 0))
+ ++chacha_state[13];
-static bool __init crng_init_try_arch_early(struct crng_state *crng)
-{
- int i;
- bool arch_init = true;
- unsigned long rv;
-
- for (i = 4; i < 16; i++) {
- if (!arch_get_random_seed_long_early(&rv) &&
- !arch_get_random_long_early(&rv)) {
- rv = random_get_entropy();
- arch_init = false;
+ copied = copy_to_iter(block, sizeof(block), iter);
+ ret += copied;
+ if (!iov_iter_count(iter) || copied != sizeof(block))
+ break;
+
+ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
+ if (ret % PAGE_SIZE == 0) {
+ if (signal_pending(current))
+ break;
+ cond_resched();
}
- crng->state[i] ^= rv;
}
- return arch_init;
+ memzero_explicit(block, sizeof(block));
+out_zero_chacha:
+ memzero_explicit(chacha_state, sizeof(chacha_state));
+ return ret ? ret : -EFAULT;
}
-static void __maybe_unused crng_initialize_secondary(struct crng_state *crng)
-{
- chacha_init_consts(crng->state);
- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
- crng_init_try_arch(crng);
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
-}
+/*
+ * Batched entropy returns random integers. The quality of the random
+ * number is good as /dev/urandom. In order to ensure that the randomness
+ * provided by this function is okay, the function wait_for_random_bytes()
+ * should be called and return 0 at least once at any point prior.
+ */
-static void __init crng_initialize_primary(struct crng_state *crng)
+#define DEFINE_BATCHED_ENTROPY(type) \
+struct batch_ ##type { \
+ /* \
+ * We make this 1.5x a ChaCha block, so that we get the \
+ * remaining 32 bytes from fast key erasure, plus one full \
+ * block from the detached ChaCha state. We can increase \
+ * the size of this later if needed so long as we keep the \
+ * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
+ */ \
+ type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
+ local_lock_t lock; \
+ unsigned long generation; \
+ unsigned int position; \
+}; \
+ \
+static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
+ .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
+ .position = UINT_MAX \
+}; \
+ \
+type get_random_ ##type(void) \
+{ \
+ type ret; \
+ unsigned long flags; \
+ struct batch_ ##type *batch; \
+ unsigned long next_gen; \
+ \
+ warn_unseeded_randomness(); \
+ \
+ if (!crng_ready()) { \
+ _get_random_bytes(&ret, sizeof(ret)); \
+ return ret; \
+ } \
+ \
+ local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
+ batch = raw_cpu_ptr(&batched_entropy_##type); \
+ \
+ next_gen = READ_ONCE(base_crng.generation); \
+ if (batch->position >= ARRAY_SIZE(batch->entropy) || \
+ next_gen != batch->generation) { \
+ _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
+ batch->position = 0; \
+ batch->generation = next_gen; \
+ } \
+ \
+ ret = batch->entropy[batch->position]; \
+ batch->entropy[batch->position] = 0; \
+ ++batch->position; \
+ local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
+ return ret; \
+} \
+EXPORT_SYMBOL(get_random_ ##type);
+
+DEFINE_BATCHED_ENTROPY(u8)
+DEFINE_BATCHED_ENTROPY(u16)
+DEFINE_BATCHED_ENTROPY(u32)
+DEFINE_BATCHED_ENTROPY(u64)
+
+#ifdef CONFIG_SMP
+/*
+ * This function is called when the CPU is coming up, with entry
+ * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
+ */
+int __cold random_prepare_cpu(unsigned int cpu)
{
- chacha_init_consts(crng->state);
- _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
- if (crng_init_try_arch_early(crng) && trust_cpu) {
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- pr_notice("crng done (trusting CPU's manufacturer)\n");
- }
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+ /*
+ * When the cpu comes back online, immediately invalidate both
+ * the per-cpu crng and all batches, so that we serve fresh
+ * randomness.
+ */
+ per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
+ per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
+ per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
+ return 0;
}
+#endif
-#ifdef CONFIG_NUMA
-static void do_numa_crng_init(struct work_struct *work)
-{
- int i;
- struct crng_state *crng;
- struct crng_state **pool;
-
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
- for_each_online_node(i) {
- crng = kmalloc_node(sizeof(struct crng_state),
- GFP_KERNEL | __GFP_NOFAIL, i);
- spin_lock_init(&crng->lock);
- crng_initialize_secondary(crng);
- pool[i] = crng;
- }
- mb();
- if (cmpxchg(&crng_node_pool, NULL, pool)) {
- for_each_node(i)
- kfree(pool[i]);
- kfree(pool);
- }
-}
-static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
+/**********************************************************************
+ *
+ * Entropy accumulation and extraction routines.
+ *
+ * Callers may add entropy via:
+ *
+ * static void mix_pool_bytes(const void *buf, size_t len)
+ *
+ * After which, if added entropy should be credited:
+ *
+ * static void credit_init_bits(size_t bits)
+ *
+ * Finally, extract entropy via:
+ *
+ * static void extract_entropy(void *buf, size_t len)
+ *
+ **********************************************************************/
-static void numa_crng_init(void)
+enum {
+ POOL_BITS = BLAKE2S_HASH_SIZE * 8,
+ POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
+ POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
+};
+
+static struct {
+ struct blake2s_state hash;
+ spinlock_t lock;
+ unsigned int init_bits;
+} input_pool = {
+ .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
+ BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
+ BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
+ .hash.outlen = BLAKE2S_HASH_SIZE,
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+};
+
+static void _mix_pool_bytes(const void *buf, size_t len)
{
- schedule_work(&numa_crng_init_work);
+ blake2s_update(&input_pool.hash, buf, len);
}
-#else
-static void numa_crng_init(void) {}
-#endif
/*
- * crng_fast_load() can be called by code in the interrupt service
- * path. So we can't afford to dilly-dally.
+ * This function adds bytes into the input pool. It does not
+ * update the initialization bit counter; the caller should call
+ * credit_init_bits if this is appropriate.
*/
-static int crng_fast_load(const char *cp, size_t len)
+static void mix_pool_bytes(const void *buf, size_t len)
{
unsigned long flags;
- char *p;
- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
- return 0;
- if (crng_init != 0) {
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 0;
- }
- p = (unsigned char *) &primary_crng.state[4];
- while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
- p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
- cp++; crng_init_cnt++; len--;
- }
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
- invalidate_batched_entropy();
- crng_init = 1;
- pr_notice("fast init done\n");
- }
- return 1;
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(buf, len);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
}
/*
- * crng_slow_load() is called by add_device_randomness, which has two
- * attributes. (1) We can't trust the buffer passed to it is
- * guaranteed to be unpredictable (so it might not have any entropy at
- * all), and (2) it doesn't have the performance constraints of
- * crng_fast_load().
- *
- * So we do something more comprehensive which is guaranteed to touch
- * all of the primary_crng's state, and which uses a LFSR with a
- * period of 255 as part of the mixing algorithm. Finally, we do
- * *not* advance crng_init_cnt since buffer we may get may be something
- * like a fixed DMI table (for example), which might very well be
- * unique to the machine, but is otherwise unvarying.
+ * This is an HKDF-like construction for using the hashed collected entropy
+ * as a PRF key, that's then expanded block-by-block.
*/
-static int crng_slow_load(const char *cp, size_t len)
+static void extract_entropy(void *buf, size_t len)
{
- unsigned long flags;
- static unsigned char lfsr = 1;
- unsigned char tmp;
- unsigned i, max = CHACHA_KEY_SIZE;
- const char * src_buf = cp;
- char * dest_buf = (char *) &primary_crng.state[4];
-
- if (!spin_trylock_irqsave(&primary_crng.lock, flags))
- return 0;
- if (crng_init != 0) {
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 0;
+ unsigned long flags;
+ u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
+ struct {
+ unsigned long rdseed[32 / sizeof(long)];
+ size_t counter;
+ } block;
+ size_t i, longs;
+
+ for (i = 0; i < ARRAY_SIZE(block.rdseed);) {
+ longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
+ if (longs) {
+ i += longs;
+ continue;
+ }
+ longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
+ if (longs) {
+ i += longs;
+ continue;
+ }
+ block.rdseed[i++] = random_get_entropy();
}
- if (len > max)
- max = len;
-
- for (i = 0; i < max ; i++) {
- tmp = lfsr;
- lfsr >>= 1;
- if (tmp & 1)
- lfsr ^= 0xE1;
- tmp = dest_buf[i % CHACHA_KEY_SIZE];
- dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
- lfsr += (tmp << 3) | (tmp >> 5);
+
+ spin_lock_irqsave(&input_pool.lock, flags);
+
+ /* seed = HASHPRF(last_key, entropy_input) */
+ blake2s_final(&input_pool.hash, seed);
+
+ /* next_key = HASHPRF(seed, RDSEED || 0) */
+ block.counter = 0;
+ blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
+ blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
+
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+ memzero_explicit(next_key, sizeof(next_key));
+
+ while (len) {
+ i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
+ /* output = HASHPRF(seed, RDSEED || ++counter) */
+ ++block.counter;
+ blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
+ len -= i;
+ buf += i;
}
- spin_unlock_irqrestore(&primary_crng.lock, flags);
- return 1;
+
+ memzero_explicit(seed, sizeof(seed));
+ memzero_explicit(&block, sizeof(block));
}
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
+#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+
+static void __cold _credit_init_bits(size_t bits)
{
- unsigned long flags;
- int i, num;
- union {
- __u8 block[CHACHA_BLOCK_SIZE];
- __u32 key[8];
- } buf;
-
- if (r) {
- num = extract_entropy(r, &buf, 32, 16, 0);
- if (num == 0)
- return;
- } else {
- _extract_crng(&primary_crng, buf.block);
- _crng_backtrack_protect(&primary_crng, buf.block,
- CHACHA_KEY_SIZE);
- }
- spin_lock_irqsave(&crng->lock, flags);
- for (i = 0; i < 8; i++) {
- unsigned long rv;
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
- rv = random_get_entropy();
- crng->state[i+4] ^= buf.key[i] ^ rv;
- }
- memzero_explicit(&buf, sizeof(buf));
- crng->init_time = jiffies;
- spin_unlock_irqrestore(&crng->lock, flags);
- if (crng == &primary_crng && crng_init < 2) {
- invalidate_batched_entropy();
- numa_crng_init();
- crng_init = 2;
- process_random_ready_list();
+ static struct execute_work set_ready;
+ unsigned int new, orig, add;
+ unsigned long flags;
+
+ if (!bits)
+ return;
+
+ add = min_t(size_t, bits, POOL_BITS);
+
+ orig = READ_ONCE(input_pool.init_bits);
+ do {
+ new = min_t(unsigned int, POOL_BITS, orig + add);
+ } while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
+
+ if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
+ crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
+ if (static_key_initialized)
+ execute_in_process_context(crng_set_ready, &set_ready);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
- if (unseeded_warning.missed) {
- pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
- unseeded_warning.missed);
- unseeded_warning.missed = 0;
- }
- if (urandom_warning.missed) {
+ if (urandom_warning.missed)
pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
urandom_warning.missed);
- urandom_warning.missed = 0;
+ } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
+ spin_lock_irqsave(&base_crng.lock, flags);
+ /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
+ if (crng_init == CRNG_EMPTY) {
+ extract_entropy(base_crng.key, sizeof(base_crng.key));
+ crng_init = CRNG_EARLY;
}
+ spin_unlock_irqrestore(&base_crng.lock, flags);
}
}
-static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA_BLOCK_SIZE])
+
+/**********************************************************************
+ *
+ * Entropy collection routines.
+ *
+ * The following exported functions are used for pushing entropy into
+ * the above entropy accumulation routines:
+ *
+ * void add_device_randomness(const void *buf, size_t len);
+ * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
+ * void add_bootloader_randomness(const void *buf, size_t len);
+ * void add_vmfork_randomness(const void *unique_vm_id, size_t len);
+ * void add_interrupt_randomness(int irq);
+ * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
+ * void add_disk_randomness(struct gendisk *disk);
+ *
+ * add_device_randomness() adds data to the input pool that
+ * is likely to differ between two devices (or possibly even per boot).
+ * This would be things like MAC addresses or serial numbers, or the
+ * read-out of the RTC. This does *not* credit any actual entropy to
+ * the pool, but it initializes the pool to different values for devices
+ * that might otherwise be identical and have very little entropy
+ * available to them (particularly common in the embedded world).
+ *
+ * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
+ * entropy as specified by the caller. If the entropy pool is full it will
+ * block until more entropy is needed.
+ *
+ * add_bootloader_randomness() is called by bootloader drivers, such as EFI
+ * and device tree, and credits its input depending on whether or not the
+ * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ *
+ * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
+ * representing the current instance of a VM to the pool, without crediting,
+ * and then force-reseeds the crng so that it takes effect immediately.
+ *
+ * add_interrupt_randomness() uses the interrupt timing as random
+ * inputs to the entropy pool. Using the cycle counters and the irq source
+ * as inputs, it feeds the input pool roughly once a second or after 64
+ * interrupts, crediting 1 bit of entropy for whichever comes first.
+ *
+ * add_input_randomness() uses the input layer interrupt timing, as well
+ * as the event type information from the hardware.
+ *
+ * add_disk_randomness() uses what amounts to the seek time of block
+ * layer request events, on a per-disk_devt basis, as input to the
+ * entropy pool. Note that high-speed solid state drives with very low
+ * seek times do not make for good sources of entropy, as their seek
+ * times are usually fairly consistent.
+ *
+ * The last two routines try to estimate how many bits of entropy
+ * to credit. They do this by keeping track of the first and second
+ * order deltas of the event timings.
+ *
+ **********************************************************************/
+
+static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
+static int __init parse_trust_cpu(char *arg)
{
- unsigned long v, flags;
-
- if (crng_ready() &&
- (time_after(crng_global_init_time, crng->init_time) ||
- time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
- crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
- spin_lock_irqsave(&crng->lock, flags);
- if (arch_get_random_long(&v))
- crng->state[14] ^= v;
- chacha20_block(&crng->state[0], out);
- if (crng->state[12] == 0)
- crng->state[13]++;
- spin_unlock_irqrestore(&crng->lock, flags);
+ return kstrtobool(arg, &trust_cpu);
}
-
-static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
+static int __init parse_trust_bootloader(char *arg)
{
- struct crng_state *crng = NULL;
-
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
-#endif
- crng = &primary_crng;
- _extract_crng(crng, out);
+ return kstrtobool(arg, &trust_bootloader);
}
+early_param("random.trust_cpu", parse_trust_cpu);
+early_param("random.trust_bootloader", parse_trust_bootloader);
-/*
- * Use the leftover bytes from the CRNG block output (if there is
- * enough) to mutate the CRNG key to provide backtracking protection.
- */
-static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used)
+static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
{
- unsigned long flags;
- __u32 *s, *d;
- int i;
-
- used = round_up(used, sizeof(__u32));
- if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
- extract_crng(tmp);
- used = 0;
+ unsigned long flags, entropy = random_get_entropy();
+
+ /*
+ * Encode a representation of how long the system has been suspended,
+ * in a way that is distinct from prior system suspends.
+ */
+ ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() };
+
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&action, sizeof(action));
+ _mix_pool_bytes(stamps, sizeof(stamps));
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ spin_unlock_irqrestore(&input_pool.lock, flags);
+
+ if (crng_ready() && (action == PM_RESTORE_PREPARE ||
+ (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
+ !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
+ crng_reseed();
+ pr_notice("crng reseeded on system resumption\n");
}
- spin_lock_irqsave(&crng->lock, flags);
- s = (__u32 *) &tmp[used];
- d = &crng->state[4];
- for (i=0; i < 8; i++)
- *d++ ^= *s++;
- spin_unlock_irqrestore(&crng->lock, flags);
+ return 0;
}
-static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
+static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
+
+/*
+ * This is called extremely early, before time keeping functionality is
+ * available, but arch randomness is. Interrupts are not yet enabled.
+ */
+void __init random_init_early(const char *command_line)
{
- struct crng_state *crng = NULL;
+ unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
+ size_t i, longs, arch_bits;
-#ifdef CONFIG_NUMA
- if (crng_node_pool)
- crng = crng_node_pool[numa_node_id()];
- if (crng == NULL)
+#if defined(LATENT_ENTROPY_PLUGIN)
+ static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
+ _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
#endif
- crng = &primary_crng;
- _crng_backtrack_protect(crng, tmp, used);
-}
-static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
-{
- ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
- int large_request = (nbytes > 256);
-
- while (nbytes) {
- if (large_request && need_resched()) {
- if (signal_pending(current)) {
- if (ret == 0)
- ret = -ERESTARTSYS;
- break;
- }
- schedule();
+ for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
+ longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ if (longs) {
+ _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
+ i += longs;
+ continue;
}
-
- extract_crng(tmp);
- i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
- if (copy_to_user(buf, tmp, i)) {
- ret = -EFAULT;
- break;
+ longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i);
+ if (longs) {
+ _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
+ i += longs;
+ continue;
}
-
- nbytes -= i;
- buf += i;
- ret += i;
+ arch_bits -= sizeof(*entropy) * 8;
+ ++i;
}
- crng_backtrack_protect(tmp, i);
- /* Wipe data just written to memory */
- memzero_explicit(tmp, sizeof(tmp));
+ _mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
+ _mix_pool_bytes(command_line, strlen(command_line));
- return ret;
+ /* Reseed if already seeded by earlier phases. */
+ if (crng_ready())
+ crng_reseed();
+ else if (trust_cpu)
+ _credit_init_bits(arch_bits);
}
+/*
+ * This is called a little bit after the prior function, and now there is
+ * access to timestamps counters. Interrupts are not yet enabled.
+ */
+void __init random_init(void)
+{
+ unsigned long entropy = random_get_entropy();
+ ktime_t now = ktime_get_real();
-/*********************************************************************
- *
- * Entropy input management
- *
- *********************************************************************/
+ _mix_pool_bytes(&now, sizeof(now));
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ add_latent_entropy();
-/* There is one of these per entropy source */
-struct timer_rand_state {
- cycles_t last_time;
- long last_delta, last_delta2;
-};
+ /*
+ * If we were initialized by the cpu or bootloader before jump labels
+ * are initialized, then we should enable the static branch here, where
+ * it's guaranteed that jump labels have been initialized.
+ */
+ if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
+ crng_set_ready(NULL);
+
+ /* Reseed if already seeded by earlier phases. */
+ if (crng_ready())
+ crng_reseed();
-#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
+ WARN_ON(register_pm_notifier(&pm_notifier));
+
+ WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
+ "entropy collection will consequently suffer.");
+}
/*
* Add device- or boot-specific data to the input pool to help
@@ -1111,447 +856,328 @@ struct timer_rand_state {
* the entropy pool having similar initial state across largely
* identical devices.
*/
-void add_device_randomness(const void *buf, unsigned int size)
+void add_device_randomness(const void *buf, size_t len)
{
- unsigned long time = random_get_entropy() ^ jiffies;
+ unsigned long entropy = random_get_entropy();
unsigned long flags;
- if (!crng_ready() && size)
- crng_slow_load(buf, size);
-
- trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
- _mix_pool_bytes(&input_pool, buf, size);
- _mix_pool_bytes(&input_pool, &time, sizeof(time));
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ _mix_pool_bytes(buf, len);
spin_unlock_irqrestore(&input_pool.lock, flags);
}
EXPORT_SYMBOL(add_device_randomness);
-static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
-
/*
- * This function adds entropy to the entropy "pool" by using timing
- * delays. It uses the timer_rand_state structure to make an estimate
- * of how many bits of entropy this call has added to the pool.
- *
- * The number "num" is also added to the pool - it should somehow describe
- * the type of event which just happened. This is currently 0-255 for
- * keyboard scan codes, and 256 upwards for interrupts.
- *
+ * Interface for in-kernel drivers of true hardware RNGs.
+ * Those devices may produce endless random bits and will be throttled
+ * when our pool is full.
*/
-static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
{
- struct entropy_store *r;
- struct {
- long jiffies;
- unsigned cycles;
- unsigned num;
- } sample;
- long delta, delta2, delta3;
-
- sample.jiffies = jiffies;
- sample.cycles = random_get_entropy();
- sample.num = num;
- r = &input_pool;
- mix_pool_bytes(r, &sample, sizeof(sample));
+ mix_pool_bytes(buf, len);
+ credit_init_bits(entropy);
/*
- * Calculate number of bits of randomness we probably added.
- * We take into account the first, second and third-order deltas
- * in order to make our estimate.
+ * Throttle writing to once every reseed interval, unless we're not yet
+ * initialized or no entropy is credited.
*/
- delta = sample.jiffies - READ_ONCE(state->last_time);
- WRITE_ONCE(state->last_time, sample.jiffies);
-
- delta2 = delta - READ_ONCE(state->last_delta);
- WRITE_ONCE(state->last_delta, delta);
-
- delta3 = delta2 - READ_ONCE(state->last_delta2);
- WRITE_ONCE(state->last_delta2, delta2);
-
- if (delta < 0)
- delta = -delta;
- if (delta2 < 0)
- delta2 = -delta2;
- if (delta3 < 0)
- delta3 = -delta3;
- if (delta > delta2)
- delta = delta2;
- if (delta > delta3)
- delta = delta3;
-
- /*
- * delta is now minimum absolute delta.
- * Round down by 1 bit on general principles,
- * and limit entropy estimate to 12 bits.
- */
- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
+ if (!kthread_should_stop() && (crng_ready() || !entropy))
+ schedule_timeout_interruptible(crng_reseed_interval());
}
+EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
-void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value)
+/*
+ * Handle random seed passed by bootloader, and credit it if
+ * CONFIG_RANDOM_TRUST_BOOTLOADER is set.
+ */
+void __init add_bootloader_randomness(const void *buf, size_t len)
{
- static unsigned char last_value;
-
- /* ignore autorepeat and the like */
- if (value == last_value)
- return;
-
- last_value = value;
- add_timer_randomness(&input_timer_state,
- (type << 4) ^ code ^ (code >> 4) ^ value);
- trace_add_input_randomness(ENTROPY_BITS(&input_pool));
+ mix_pool_bytes(buf, len);
+ if (trust_bootloader)
+ credit_init_bits(len * 8);
}
-EXPORT_SYMBOL_GPL(add_input_randomness);
-
-static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
-
-#ifdef ADD_INTERRUPT_BENCH
-static unsigned long avg_cycles, avg_deviation;
-#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
-#define FIXED_1_2 (1 << (AVG_SHIFT-1))
+#if IS_ENABLED(CONFIG_VMGENID)
+static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
-static void add_interrupt_bench(cycles_t start)
+/*
+ * Handle a new unique VM ID, which is unique, not secret, so we
+ * don't credit it, but we do immediately force a reseed after so
+ * that it's used by the crng posthaste.
+ */
+void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
{
- long delta = random_get_entropy() - start;
-
- /* Use a weighted moving average */
- delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
- avg_cycles += delta;
- /* And average deviation */
- delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
- avg_deviation += delta;
+ add_device_randomness(unique_vm_id, len);
+ if (crng_ready()) {
+ crng_reseed();
+ pr_notice("crng reseeded due to virtual machine fork\n");
+ }
+ blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
}
-#else
-#define add_interrupt_bench(x)
+#if IS_MODULE(CONFIG_VMGENID)
+EXPORT_SYMBOL_GPL(add_vmfork_randomness);
#endif
-static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
+int __cold register_random_vmfork_notifier(struct notifier_block *nb)
{
- __u32 *ptr = (__u32 *) regs;
- unsigned int idx;
-
- if (regs == NULL)
- return 0;
- idx = READ_ONCE(f->reg_idx);
- if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
- idx = 0;
- ptr += idx++;
- WRITE_ONCE(f->reg_idx, idx);
- return *ptr;
+ return blocking_notifier_chain_register(&vmfork_chain, nb);
}
+EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
-void add_interrupt_randomness(int irq, int irq_flags)
+int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
{
- struct entropy_store *r;
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- __u32 c_high, j_high;
- __u64 ip;
-
- if (cycles == 0)
- cycles = get_reg(fast_pool, regs);
- c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
- j_high = (sizeof(now) > 4) ? now >> 32 : 0;
- fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
- fast_pool->pool[1] ^= now ^ c_high;
- ip = regs ? instruction_pointer(regs) : _RET_IP_;
- fast_pool->pool[2] ^= ip;
- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
- get_reg(fast_pool, regs);
-
- fast_mix(fast_pool);
- add_interrupt_bench(cycles);
-
- if (unlikely(crng_init == 0)) {
- if ((fast_pool->count >= 64) &&
- crng_fast_load((char *) fast_pool->pool,
- sizeof(fast_pool->pool))) {
- fast_pool->count = 0;
- fast_pool->last = now;
- }
- return;
- }
-
- if ((fast_pool->count < 64) &&
- !time_after(now, fast_pool->last + HZ))
- return;
-
- r = &input_pool;
- if (!spin_trylock(&r->lock))
- return;
-
- fast_pool->last = now;
- __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
- spin_unlock(&r->lock);
+ return blocking_notifier_chain_unregister(&vmfork_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
+#endif
- fast_pool->count = 0;
+struct fast_pool {
+ unsigned long pool[4];
+ unsigned long last;
+ unsigned int count;
+ struct timer_list mix;
+};
- /* award one bit for the contents of the fast pool */
- credit_entropy_bits(r, 1);
-}
-EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+static void mix_interrupt_randomness(struct timer_list *work);
-#ifdef CONFIG_BLOCK
-void add_disk_randomness(struct gendisk *disk)
-{
- if (!disk || !disk->random)
- return;
- /* first major is 1, so we get >= 0x200 here */
- add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
- trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
-}
-EXPORT_SYMBOL_GPL(add_disk_randomness);
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
+#ifdef CONFIG_64BIT
+#define FASTMIX_PERM SIPHASH_PERMUTATION
+ .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
+#else
+#define FASTMIX_PERM HSIPHASH_PERMUTATION
+ .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
#endif
-
-/*********************************************************************
- *
- * Entropy extraction routines
- *
- *********************************************************************/
+ .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
+};
/*
- * This function decides how many bytes to actually take from the
- * given pool, and also debits the entropy count accordingly.
+ * This is [Half]SipHash-1-x, starting from an empty key. Because
+ * the key is fixed, it assumes that its inputs are non-malicious,
+ * and therefore this has no security on its own. s represents the
+ * four-word SipHash state, while v represents a two-word input.
*/
-static size_t account(struct entropy_store *r, size_t nbytes, int min,
- int reserved)
+static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
{
- int entropy_count, orig, have_bytes;
- size_t ibytes, nfrac;
-
- BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
-
- /* Can we pull enough? */
-retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
- ibytes = nbytes;
- /* never pull more than available */
- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
-
- if ((have_bytes -= reserved) < 0)
- have_bytes = 0;
- ibytes = min_t(size_t, ibytes, have_bytes);
- if (ibytes < min)
- ibytes = 0;
-
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy count: pool %s count %d\n",
- r->name, entropy_count);
- entropy_count = 0;
- }
- nfrac = ibytes << (ENTROPY_SHIFT + 3);
- if ((size_t) entropy_count > nfrac)
- entropy_count -= nfrac;
- else
- entropy_count = 0;
-
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
- goto retry;
-
- trace_debit_entropy(r->name, 8 * ibytes);
- if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
- wake_up_interruptible(&random_write_wait);
- kill_fasync(&fasync, SIGIO, POLL_OUT);
- }
-
- return ibytes;
+ s[3] ^= v1;
+ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
+ s[0] ^= v1;
+ s[3] ^= v2;
+ FASTMIX_PERM(s[0], s[1], s[2], s[3]);
+ s[0] ^= v2;
}
+#ifdef CONFIG_SMP
/*
- * This function does the actual extraction for extract_entropy.
- *
- * Note: we assume that .poolwords is a multiple of 16 words.
+ * This function is called when the CPU has just come online, with
+ * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
*/
-static void extract_buf(struct entropy_store *r, __u8 *out)
+int __cold random_online_cpu(unsigned int cpu)
{
- int i;
- union {
- __u32 w[5];
- unsigned long l[LONGS(20)];
- } hash;
- __u32 workspace[SHA1_WORKSPACE_WORDS];
- unsigned long flags;
-
/*
- * If we have an architectural hardware random number
- * generator, use it for SHA's initial vector
+ * During CPU shutdown and before CPU onlining, add_interrupt_
+ * randomness() may schedule mix_interrupt_randomness(), and
+ * set the MIX_INFLIGHT flag. However, because the worker can
+ * be scheduled on a different CPU during this period, that
+ * flag will never be cleared. For that reason, we zero out
+ * the flag here, which runs just after workqueues are onlined
+ * for the CPU again. This also has the effect of setting the
+ * irq randomness count to zero so that new accumulated irqs
+ * are fresh.
*/
- sha1_init(hash.w);
- for (i = 0; i < LONGS(20); i++) {
- unsigned long v;
- if (!arch_get_random_long(&v))
- break;
- hash.l[i] = v;
- }
-
- /* Generate a hash across the pool, 16 words (512 bits) at a time */
- spin_lock_irqsave(&r->lock, flags);
- for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha1_transform(hash.w, (__u8 *)(r->pool + i), workspace);
+ per_cpu_ptr(&irq_randomness, cpu)->count = 0;
+ return 0;
+}
+#endif
+static void mix_interrupt_randomness(struct timer_list *work)
+{
+ struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
/*
- * We mix the hash back into the pool to prevent backtracking
- * attacks (where the attacker knows the state of the pool
- * plus the current outputs, and attempts to find previous
- * ouputs), unless the hash function can be inverted. By
- * mixing at least a SHA1 worth of hash data back, we make
- * brute-forcing the feedback as hard as brute-forcing the
- * hash.
+ * The size of the copied stack pool is explicitly 2 longs so that we
+ * only ever ingest half of the siphash output each time, retaining
+ * the other half as the next "key" that carries over. The entropy is
+ * supposed to be sufficiently dispersed between bits so on average
+ * we don't wind up "losing" some.
*/
- __mix_pool_bytes(r, hash.w, sizeof(hash.w));
- spin_unlock_irqrestore(&r->lock, flags);
+ unsigned long pool[2];
+ unsigned int count;
- memzero_explicit(workspace, sizeof(workspace));
+ /* Check to see if we're running on the wrong CPU due to hotplug. */
+ local_irq_disable();
+ if (fast_pool != this_cpu_ptr(&irq_randomness)) {
+ local_irq_enable();
+ return;
+ }
/*
- * In case the hash function has some recognizable output
- * pattern, we fold it in half. Thus, we always feed back
- * twice as much data as we output.
+ * Copy the pool to the stack so that the mixer always has a
+ * consistent view, before we reenable irqs again.
*/
- hash.w[0] ^= hash.w[3];
- hash.w[1] ^= hash.w[4];
- hash.w[2] ^= rol32(hash.w[2], 16);
+ memcpy(pool, fast_pool->pool, sizeof(pool));
+ count = fast_pool->count;
+ fast_pool->count = 0;
+ fast_pool->last = jiffies;
+ local_irq_enable();
+
+ mix_pool_bytes(pool, sizeof(pool));
+ credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
- memcpy(out, &hash, EXTRACT_SIZE);
- memzero_explicit(&hash, sizeof(hash));
+ memzero_explicit(pool, sizeof(pool));
}
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips)
+void add_interrupt_randomness(int irq)
{
- ssize_t ret = 0, i;
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
+ enum { MIX_INFLIGHT = 1U << 31 };
+ unsigned long entropy = random_get_entropy();
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned int new_count;
- while (nbytes) {
- extract_buf(r, tmp);
+ fast_mix(fast_pool->pool, entropy,
+ (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
+ new_count = ++fast_pool->count;
- if (fips) {
- spin_lock_irqsave(&r->lock, flags);
- if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
- panic("Hardware RNG duplicated output!\n");
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- spin_unlock_irqrestore(&r->lock, flags);
- }
- i = min_t(int, nbytes, EXTRACT_SIZE);
- memcpy(buf, tmp, i);
- nbytes -= i;
- buf += i;
- ret += i;
- }
+ if (new_count & MIX_INFLIGHT)
+ return;
- /* Wipe data just returned from memory */
- memzero_explicit(tmp, sizeof(tmp));
+ if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
+ return;
- return ret;
+ fast_pool->count |= MIX_INFLIGHT;
+ if (!timer_pending(&fast_pool->mix)) {
+ fast_pool->mix.expires = jiffies;
+ add_timer_on(&fast_pool->mix, raw_smp_processor_id());
+ }
}
+EXPORT_SYMBOL_GPL(add_interrupt_randomness);
+
+/* There is one of these per entropy source */
+struct timer_rand_state {
+ unsigned long last_time;
+ long last_delta, last_delta2;
+};
/*
- * This function extracts randomness from the "entropy pool", and
- * returns it in a buffer.
- *
- * The min parameter specifies the minimum amount we can pull before
- * failing to avoid races that defeat catastrophic reseeding while the
- * reserved parameter indicates how much entropy we must leave in the
- * pool after each pull to avoid starving other readers.
+ * This function adds entropy to the entropy "pool" by using timing
+ * delays. It uses the timer_rand_state structure to make an estimate
+ * of how many bits of entropy this call has added to the pool. The
+ * value "num" is also added to the pool; it should somehow describe
+ * the type of event that just happened.
*/
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int reserved)
+static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
{
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
+ unsigned long entropy = random_get_entropy(), now = jiffies, flags;
+ long delta, delta2, delta3;
+ unsigned int bits;
- /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
- if (fips_enabled) {
- spin_lock_irqsave(&r->lock, flags);
- if (!r->last_data_init) {
- r->last_data_init = 1;
- spin_unlock_irqrestore(&r->lock, flags);
- trace_extract_entropy(r->name, EXTRACT_SIZE,
- ENTROPY_BITS(r), _RET_IP_);
- extract_buf(r, tmp);
- spin_lock_irqsave(&r->lock, flags);
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- }
- spin_unlock_irqrestore(&r->lock, flags);
+ /*
+ * If we're in a hard IRQ, add_interrupt_randomness() will be called
+ * sometime after, so mix into the fast pool.
+ */
+ if (in_hardirq()) {
+ fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
+ } else {
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(&entropy, sizeof(entropy));
+ _mix_pool_bytes(&num, sizeof(num));
+ spin_unlock_irqrestore(&input_pool.lock, flags);
}
- trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
- nbytes = account(r, nbytes, min, reserved);
+ if (crng_ready())
+ return;
- return _extract_entropy(r, buf, nbytes, fips_enabled);
-}
+ /*
+ * Calculate number of bits of randomness we probably added.
+ * We take into account the first, second and third-order deltas
+ * in order to make our estimate.
+ */
+ delta = now - READ_ONCE(state->last_time);
+ WRITE_ONCE(state->last_time, now);
-#define warn_unseeded_randomness(previous) \
- _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
+ delta2 = delta - READ_ONCE(state->last_delta);
+ WRITE_ONCE(state->last_delta, delta);
-static void _warn_unseeded_randomness(const char *func_name, void *caller,
- void **previous)
-{
-#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
- const bool print_once = false;
-#else
- static bool print_once __read_mostly;
-#endif
+ delta3 = delta2 - READ_ONCE(state->last_delta2);
+ WRITE_ONCE(state->last_delta2, delta2);
- if (print_once ||
- crng_ready() ||
- (previous && (caller == READ_ONCE(*previous))))
- return;
- WRITE_ONCE(*previous, caller);
-#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
- print_once = true;
-#endif
- if (__ratelimit(&unseeded_warning))
- printk_deferred(KERN_NOTICE "random: %s called from %pS "
- "with crng_init=%d\n", func_name, caller,
- crng_init);
+ if (delta < 0)
+ delta = -delta;
+ if (delta2 < 0)
+ delta2 = -delta2;
+ if (delta3 < 0)
+ delta3 = -delta3;
+ if (delta > delta2)
+ delta = delta2;
+ if (delta > delta3)
+ delta = delta3;
+
+ /*
+ * delta is now minimum absolute delta. Round down by 1 bit
+ * on general principles, and limit entropy estimate to 11 bits.
+ */
+ bits = min(fls(delta >> 1), 11);
+
+ /*
+ * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
+ * will run after this, which uses a different crediting scheme of 1 bit
+ * per every 64 interrupts. In order to let that function do accounting
+ * close to the one in this function, we credit a full 64/64 bit per bit,
+ * and then subtract one to account for the extra one added.
+ */
+ if (in_hardirq())
+ this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
+ else
+ _credit_init_bits(bits);
}
-/*
- * This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for key generation, seeding
- * TCP sequence numbers, etc. It does not rely on the hardware random
- * number generator. For random bytes direct from the hardware RNG
- * (when available), use get_random_bytes_arch(). In order to ensure
- * that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once
- * at any point prior.
- */
-static void _get_random_bytes(void *buf, int nbytes)
+void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
{
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
+ static unsigned char last_value;
+ static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
- trace_get_random_bytes(nbytes, _RET_IP_);
+ /* Ignore autorepeat and the like. */
+ if (value == last_value)
+ return;
- while (nbytes >= CHACHA_BLOCK_SIZE) {
- extract_crng(buf);
- buf += CHACHA_BLOCK_SIZE;
- nbytes -= CHACHA_BLOCK_SIZE;
- }
+ last_value = value;
+ add_timer_randomness(&input_timer_state,
+ (type << 4) ^ code ^ (code >> 4) ^ value);
+}
+EXPORT_SYMBOL_GPL(add_input_randomness);
- if (nbytes > 0) {
- extract_crng(tmp);
- memcpy(buf, tmp, nbytes);
- crng_backtrack_protect(tmp, nbytes);
- } else
- crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
- memzero_explicit(tmp, sizeof(tmp));
+#ifdef CONFIG_BLOCK
+void add_disk_randomness(struct gendisk *disk)
+{
+ if (!disk || !disk->random)
+ return;
+ /* First major is 1, so we get >= 0x200 here. */
+ add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
}
+EXPORT_SYMBOL_GPL(add_disk_randomness);
-void get_random_bytes(void *buf, int nbytes)
+void __cold rand_initialize_disk(struct gendisk *disk)
{
- static void *previous;
+ struct timer_rand_state *state;
- warn_unseeded_randomness(&previous);
- _get_random_bytes(buf, nbytes);
+ /*
+ * If kzalloc returns null, we just won't use that entropy
+ * source.
+ */
+ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+ if (state) {
+ state->last_time = INITIAL_JIFFIES;
+ disk->random = state;
+ }
}
-EXPORT_SYMBOL(get_random_bytes);
+#endif
+struct entropy_timer_state {
+ unsigned long entropy;
+ struct timer_list timer;
+ unsigned int samples, samples_per_bit;
+};
/*
* Each time the timer fires, we expect that we got an unpredictable
@@ -1566,351 +1192,198 @@ EXPORT_SYMBOL(get_random_bytes);
*
* So the re-arming always happens in the entropy loop itself.
*/
-static void entropy_timer(struct timer_list *t)
+static void __cold entropy_timer(struct timer_list *timer)
{
- credit_entropy_bits(&input_pool, 1);
+ struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
+
+ if (++state->samples == state->samples_per_bit) {
+ credit_init_bits(1);
+ state->samples = 0;
+ }
}
/*
* If we have an actual cycle counter, see if we can
* generate enough entropy with timing noise
*/
-static void try_to_generate_entropy(void)
+static void __cold try_to_generate_entropy(void)
{
- struct {
- unsigned long now;
- struct timer_list timer;
- } stack;
-
- stack.now = random_get_entropy();
-
- /* Slow counter - or none. Don't even bother */
- if (stack.now == random_get_entropy())
+ enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
+ struct entropy_timer_state stack;
+ unsigned int i, num_different = 0;
+ unsigned long last = random_get_entropy();
+
+ for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
+ stack.entropy = random_get_entropy();
+ if (stack.entropy != last)
+ ++num_different;
+ last = stack.entropy;
+ }
+ stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
+ if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT)
return;
+ stack.samples = 0;
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
- while (!crng_ready()) {
+ while (!crng_ready() && !signal_pending(current)) {
if (!timer_pending(&stack.timer))
- mod_timer(&stack.timer, jiffies+1);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+ mod_timer(&stack.timer, jiffies);
+ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
schedule();
- stack.now = random_get_entropy();
+ stack.entropy = random_get_entropy();
}
del_timer_sync(&stack.timer);
destroy_timer_on_stack(&stack.timer);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+ mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
}
-/*
- * Wait for the urandom pool to be seeded and thus guaranteed to supply
- * cryptographically secure random numbers. This applies to: the /dev/urandom
- * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
- * family of functions. Using any of these functions without first calling
- * this function forfeits the guarantee of security.
- *
- * Returns: 0 if the urandom pool has been seeded.
- * -ERESTARTSYS if the function was interrupted by a signal.
- */
-int wait_for_random_bytes(void)
-{
- if (likely(crng_ready()))
- return 0;
- do {
- int ret;
- ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
- if (ret)
- return ret > 0 ? 0 : ret;
-
- try_to_generate_entropy();
- } while (!crng_ready());
-
- return 0;
-}
-EXPORT_SYMBOL(wait_for_random_bytes);
-
-/*
- * Returns whether or not the urandom pool has been seeded and thus guaranteed
- * to supply cryptographically secure random numbers. This applies to: the
- * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
- * ,u64,int,long} family of functions.
+/**********************************************************************
*
- * Returns: true if the urandom pool has been seeded.
- * false if the urandom pool has not been seeded.
- */
-bool rng_is_initialized(void)
-{
- return crng_ready();
-}
-EXPORT_SYMBOL(rng_is_initialized);
-
-/*
- * Add a callback function that will be invoked when the nonblocking
- * pool is initialised.
+ * Userspace reader/writer interfaces.
*
- * returns: 0 if callback is successfully added
- * -EALREADY if pool is already initialised (callback not called)
- * -ENOENT if module for callback is not alive
- */
-int add_random_ready_callback(struct random_ready_callback *rdy)
-{
- struct module *owner;
- unsigned long flags;
- int err = -EALREADY;
-
- if (crng_ready())
- return err;
-
- owner = rdy->owner;
- if (!try_module_get(owner))
- return -ENOENT;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- if (crng_ready())
- goto out;
+ * getrandom(2) is the primary modern interface into the RNG and should
+ * be used in preference to anything else.
+ *
+ * Reading from /dev/random has the same functionality as calling
+ * getrandom(2) with flags=0. In earlier versions, however, it had
+ * vastly different semantics and should therefore be avoided, to
+ * prevent backwards compatibility issues.
+ *
+ * Reading from /dev/urandom has the same functionality as calling
+ * getrandom(2) with flags=GRND_INSECURE. Because it does not block
+ * waiting for the RNG to be ready, it should not be used.
+ *
+ * Writing to either /dev/random or /dev/urandom adds entropy to
+ * the input pool but does not credit it.
+ *
+ * Polling on /dev/random indicates when the RNG is initialized, on
+ * the read side, and when it wants new entropy, on the write side.
+ *
+ * Both /dev/random and /dev/urandom have the same set of ioctls for
+ * adding entropy, getting the entropy count, zeroing the count, and
+ * reseeding the crng.
+ *
+ **********************************************************************/
- owner = NULL;
+SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
+{
+ struct iov_iter iter;
+ struct iovec iov;
+ int ret;
- list_add(&rdy->list, &random_ready_list);
- err = 0;
+ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
+ return -EINVAL;
-out:
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
+ /*
+ * Requesting insecure and blocking randomness at the same time makes
+ * no sense.
+ */
+ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
+ return -EINVAL;
- module_put(owner);
+ if (!crng_ready() && !(flags & GRND_INSECURE)) {
+ if (flags & GRND_NONBLOCK)
+ return -EAGAIN;
+ ret = wait_for_random_bytes();
+ if (unlikely(ret))
+ return ret;
+ }
- return err;
+ ret = import_single_range(READ, ubuf, len, &iov, &iter);
+ if (unlikely(ret))
+ return ret;
+ return get_random_bytes_user(&iter);
}
-EXPORT_SYMBOL(add_random_ready_callback);
-/*
- * Delete a previously registered readiness callback function.
- */
-void del_random_ready_callback(struct random_ready_callback *rdy)
+static __poll_t random_poll(struct file *file, poll_table *wait)
{
- unsigned long flags;
- struct module *owner = NULL;
-
- spin_lock_irqsave(&random_ready_list_lock, flags);
- if (!list_empty(&rdy->list)) {
- list_del_init(&rdy->list);
- owner = rdy->owner;
- }
- spin_unlock_irqrestore(&random_ready_list_lock, flags);
-
- module_put(owner);
+ poll_wait(file, &crng_init_wait, wait);
+ return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
}
-EXPORT_SYMBOL(del_random_ready_callback);
-/*
- * This function will use the architecture-specific hardware random
- * number generator if it is available. The arch-specific hw RNG will
- * almost certainly be faster than what we can do in software, but it
- * is impossible to verify that it is implemented securely (as
- * opposed, to, say, the AES encryption of a sequence number using a
- * key known by the NSA). So it's useful if we need the speed, but
- * only if we're willing to trust the hardware manufacturer not to
- * have put in a back door.
- *
- * Return number of bytes filled in.
- */
-int __must_check get_random_bytes_arch(void *buf, int nbytes)
+static ssize_t write_pool_user(struct iov_iter *iter)
{
- int left = nbytes;
- char *p = buf;
+ u8 block[BLAKE2S_BLOCK_SIZE];
+ ssize_t ret = 0;
+ size_t copied;
- trace_get_random_bytes_arch(left, _RET_IP_);
- while (left) {
- unsigned long v;
- int chunk = min_t(int, left, sizeof(unsigned long));
+ if (unlikely(!iov_iter_count(iter)))
+ return 0;
- if (!arch_get_random_long(&v))
+ for (;;) {
+ copied = copy_from_iter(block, sizeof(block), iter);
+ ret += copied;
+ mix_pool_bytes(block, copied);
+ if (!iov_iter_count(iter) || copied != sizeof(block))
break;
- memcpy(p, &v, chunk);
- p += chunk;
- left -= chunk;
+ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
+ if (ret % PAGE_SIZE == 0) {
+ if (signal_pending(current))
+ break;
+ cond_resched();
+ }
}
- return nbytes - left;
+ memzero_explicit(block, sizeof(block));
+ return ret ? ret : -EFAULT;
}
-EXPORT_SYMBOL(get_random_bytes_arch);
-/*
- * init_std_data - initialize pool with system data
- *
- * @r: pool to initialize
- *
- * This function clears the pool's entropy count and mixes some system
- * data into the pool to prepare it for use. The pool is not cleared
- * as that can only decrease the entropy in the pool.
- */
-static void __init init_std_data(struct entropy_store *r)
+static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
- int i;
- ktime_t now = ktime_get_real();
- unsigned long rv;
-
- mix_pool_bytes(r, &now, sizeof(now));
- for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
- if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
- rv = random_get_entropy();
- mix_pool_bytes(r, &rv, sizeof(rv));
- }
- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+ return write_pool_user(iter);
}
-/*
- * Note that setup_arch() may call add_device_randomness()
- * long before we get here. This allows seeding of the pools
- * with some platform dependent data very early in the boot
- * process. But it limits our options here. We must use
- * statically allocated structures that already have all
- * initializations complete at compile time. We should also
- * take care not to overwrite the precious per platform data
- * we were given.
- */
-int __init rand_initialize(void)
+static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
- init_std_data(&input_pool);
- crng_initialize_primary(&primary_crng);
- crng_global_init_time = jiffies;
- if (ratelimit_disable) {
- urandom_warning.interval = 0;
- unseeded_warning.interval = 0;
- }
- return 0;
-}
-
-#ifdef CONFIG_BLOCK
-void rand_initialize_disk(struct gendisk *disk)
-{
- struct timer_rand_state *state;
+ static int maxwarn = 10;
/*
- * If kzalloc returns null, we just won't use that entropy
- * source.
+ * Opportunistically attempt to initialize the RNG on platforms that
+ * have fast cycle counters, but don't (for now) require it to succeed.
*/
- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
- if (state) {
- state->last_time = INITIAL_JIFFIES;
- disk->random = state;
- }
-}
-#endif
-
-static ssize_t
-urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
- loff_t *ppos)
-{
- int ret;
-
- nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
- ret = extract_crng_user(buf, nbytes);
- trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
- return ret;
-}
-
-static ssize_t
-urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
-{
- unsigned long flags;
- static int maxwarn = 10;
+ if (!crng_ready())
+ try_to_generate_entropy();
- if (!crng_ready() && maxwarn > 0) {
- maxwarn--;
- if (__ratelimit(&urandom_warning))
- pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
- current->comm, nbytes);
- spin_lock_irqsave(&primary_crng.lock, flags);
- crng_init_cnt = 0;
- spin_unlock_irqrestore(&primary_crng.lock, flags);
+ if (!crng_ready()) {
+ if (!ratelimit_disable && maxwarn <= 0)
+ ++urandom_warning.missed;
+ else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
+ --maxwarn;
+ pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
+ current->comm, iov_iter_count(iter));
+ }
}
- return urandom_read_nowarn(file, buf, nbytes, ppos);
+ return get_random_bytes_user(iter);
}
-static ssize_t
-random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
int ret;
+ if (!crng_ready() &&
+ ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
+ (kiocb->ki_filp->f_flags & O_NONBLOCK)))
+ return -EAGAIN;
+
ret = wait_for_random_bytes();
if (ret != 0)
return ret;
- return urandom_read_nowarn(file, buf, nbytes, ppos);
-}
-
-static __poll_t
-random_poll(struct file *file, poll_table * wait)
-{
- __poll_t mask;
-
- poll_wait(file, &crng_init_wait, wait);
- poll_wait(file, &random_write_wait, wait);
- mask = 0;
- if (crng_ready())
- mask |= EPOLLIN | EPOLLRDNORM;
- if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
- mask |= EPOLLOUT | EPOLLWRNORM;
- return mask;
-}
-
-static int
-write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
-{
- size_t bytes;
- __u32 t, buf[16];
- const char __user *p = buffer;
-
- while (count > 0) {
- int b, i = 0;
-
- bytes = min(count, sizeof(buf));
- if (copy_from_user(&buf, p, bytes))
- return -EFAULT;
-
- for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
- if (!arch_get_random_int(&t))
- break;
- buf[i] ^= t;
- }
-
- count -= bytes;
- p += bytes;
-
- mix_pool_bytes(r, buf, bytes);
- cond_resched();
- }
-
- return 0;
-}
-
-static ssize_t random_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- size_t ret;
-
- ret = write_pool(&input_pool, buffer, count);
- if (ret)
- return ret;
-
- return (ssize_t)count;
+ return get_random_bytes_user(iter);
}
static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
- int size, ent_count;
int __user *p = (int __user *)arg;
- int retval;
+ int ent_count;
switch (cmd) {
case RNDGETENTCNT:
- /* inherently racy, no point locking */
- ent_count = ENTROPY_BITS(&input_pool);
- if (put_user(ent_count, p))
+ /* Inherently racy, no point locking. */
+ if (put_user(input_pool.init_bits, p))
return -EFAULT;
return 0;
case RNDADDTOENTCNT:
@@ -1918,38 +1391,48 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- return credit_entropy_bits_safe(&input_pool, ent_count);
- case RNDADDENTROPY:
+ if (ent_count < 0)
+ return -EINVAL;
+ credit_init_bits(ent_count);
+ return 0;
+ case RNDADDENTROPY: {
+ struct iov_iter iter;
+ struct iovec iov;
+ ssize_t ret;
+ int len;
+
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(ent_count, p++))
return -EFAULT;
if (ent_count < 0)
return -EINVAL;
- if (get_user(size, p++))
+ if (get_user(len, p++))
+ return -EFAULT;
+ ret = import_single_range(WRITE, p, len, &iov, &iter);
+ if (unlikely(ret))
+ return ret;
+ ret = write_pool_user(&iter);
+ if (unlikely(ret < 0))
+ return ret;
+ /* Since we're crediting, enforce that it was all written into the pool. */
+ if (unlikely(ret != len))
return -EFAULT;
- retval = write_pool(&input_pool, (const char __user *)p,
- size);
- if (retval < 0)
- return retval;
- return credit_entropy_bits_safe(&input_pool, ent_count);
+ credit_init_bits(ent_count);
+ return 0;
+ }
case RNDZAPENTCNT:
case RNDCLEARPOOL:
- /*
- * Clear the entropy pool counters. We no longer clear
- * the entropy pool, as that's silly.
- */
+ /* No longer has any effect. */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- input_pool.entropy_count = 0;
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (crng_init < 2)
+ if (!crng_ready())
return -ENODATA;
- crng_reseed(&primary_crng, &input_pool);
- crng_global_init_time = jiffies - 1;
+ crng_reseed();
return 0;
default:
return -EINVAL;
@@ -1962,55 +1445,56 @@ static int random_fasync(int fd, struct file *filp, int on)
}
const struct file_operations random_fops = {
- .read = random_read,
- .write = random_write,
- .poll = random_poll,
+ .read_iter = random_read_iter,
+ .write_iter = random_write_iter,
+ .poll = random_poll,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
};
const struct file_operations urandom_fops = {
- .read = urandom_read,
- .write = random_write,
+ .read_iter = urandom_read_iter,
+ .write_iter = random_write_iter,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
};
-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
- unsigned int, flags)
-{
- int ret;
-
- if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
- return -EINVAL;
-
- /*
- * Requesting insecure and blocking randomness at the same time makes
- * no sense.
- */
- if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
- return -EINVAL;
-
- if (count > INT_MAX)
- count = INT_MAX;
-
- if (!(flags & GRND_INSECURE) && !crng_ready()) {
- if (flags & GRND_NONBLOCK)
- return -EAGAIN;
- ret = wait_for_random_bytes();
- if (unlikely(ret))
- return ret;
- }
- return urandom_read_nowarn(NULL, buf, count, NULL);
-}
/********************************************************************
*
- * Sysctl interface
+ * Sysctl interface.
+ *
+ * These are partly unused legacy knobs with dummy values to not break
+ * userspace and partly still useful things. They are usually accessible
+ * in /proc/sys/kernel/random/ and are as follows:
+ *
+ * - boot_id - a UUID representing the current boot.
+ *
+ * - uuid - a random UUID, different each time the file is read.
+ *
+ * - poolsize - the number of bits of entropy that the input pool can
+ * hold, tied to the POOL_BITS constant.
+ *
+ * - entropy_avail - the number of bits of entropy currently in the
+ * input pool. Always <= poolsize.
+ *
+ * - write_wakeup_threshold - the amount of entropy in the input pool
+ * below which write polls to /dev/random will unblock, requesting
+ * more entropy, tied to the POOL_READY_BITS constant. It is writable
+ * to avoid breaking old userspaces, but writing to it does not
+ * change any behavior of the RNG.
+ *
+ * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
+ * It is writable to avoid breaking old userspaces, but writing
+ * to it does not change any behavior of the RNG.
*
********************************************************************/
@@ -2018,25 +1502,28 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
#include <linux/sysctl.h>
-static int min_write_thresh;
-static int max_write_thresh = INPUT_POOL_WORDS * 32;
-static int random_min_urandom_seed = 60;
-static char sysctl_bootid[16];
+static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
+static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
+static int sysctl_poolsize = POOL_BITS;
+static u8 sysctl_bootid[UUID_SIZE];
/*
* This function is used to return both the bootid UUID, and random
- * UUID. The difference is in whether table->data is NULL; if it is,
+ * UUID. The difference is in whether table->data is NULL; if it is,
* then a new UUID is generated and returned to the user.
- *
- * If the user accesses this via the proc interface, the UUID will be
- * returned as an ASCII string in the standard UUID format; if via the
- * sysctl system call, as 16 bytes of binary data.
*/
-static int proc_do_uuid(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
+ size_t *lenp, loff_t *ppos)
{
- struct ctl_table fake_table;
- unsigned char buf[64], tmp_uuid[16], *uuid;
+ u8 tmp_uuid[UUID_SIZE], *uuid;
+ char uuid_string[UUID_STRING_LEN + 1];
+ struct ctl_table fake_table = {
+ .data = uuid_string,
+ .maxlen = UUID_STRING_LEN
+ };
+
+ if (write)
+ return -EPERM;
uuid = table->data;
if (!uuid) {
@@ -2051,34 +1538,18 @@ static int proc_do_uuid(struct ctl_table *table, int write,
spin_unlock(&bootid_spinlock);
}
- sprintf(buf, "%pU", uuid);
-
- fake_table.data = buf;
- fake_table.maxlen = sizeof(buf);
-
- return proc_dostring(&fake_table, write, buffer, lenp, ppos);
+ snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
+ return proc_dostring(&fake_table, 0, buf, lenp, ppos);
}
-/*
- * Return entropy available scaled to integral bits
- */
-static int proc_do_entropy(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+/* The same as proc_dointvec, but writes don't change anything. */
+static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
+ size_t *lenp, loff_t *ppos)
{
- struct ctl_table fake_table;
- int entropy_count;
-
- entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
-
- fake_table.data = &entropy_count;
- fake_table.maxlen = sizeof(entropy_count);
-
- return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
+ return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
}
-static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
-extern struct ctl_table random_table[];
-struct ctl_table random_table[] = {
+static struct ctl_table random_table[] = {
{
.procname = "poolsize",
.data = &sysctl_poolsize,
@@ -2088,218 +1559,47 @@ struct ctl_table random_table[] = {
},
{
.procname = "entropy_avail",
+ .data = &input_pool.init_bits,
.maxlen = sizeof(int),
.mode = 0444,
- .proc_handler = proc_do_entropy,
- .data = &input_pool.entropy_count,
+ .proc_handler = proc_dointvec,
},
{
.procname = "write_wakeup_threshold",
- .data = &random_write_wakeup_bits,
+ .data = &sysctl_random_write_wakeup_bits,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &min_write_thresh,
- .extra2 = &max_write_thresh,
+ .proc_handler = proc_do_rointvec,
},
{
.procname = "urandom_min_reseed_secs",
- .data = &random_min_urandom_seed,
+ .data = &sysctl_random_min_urandom_seed,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_do_rointvec,
},
{
.procname = "boot_id",
.data = &sysctl_bootid,
- .maxlen = 16,
.mode = 0444,
.proc_handler = proc_do_uuid,
},
{
.procname = "uuid",
- .maxlen = 16,
.mode = 0444,
.proc_handler = proc_do_uuid,
},
-#ifdef ADD_INTERRUPT_BENCH
- {
- .procname = "add_interrupt_avg_cycles",
- .data = &avg_cycles,
- .maxlen = sizeof(avg_cycles),
- .mode = 0444,
- .proc_handler = proc_doulongvec_minmax,
- },
- {
- .procname = "add_interrupt_avg_deviation",
- .data = &avg_deviation,
- .maxlen = sizeof(avg_deviation),
- .mode = 0444,
- .proc_handler = proc_doulongvec_minmax,
- },
-#endif
{ }
};
-#endif /* CONFIG_SYSCTL */
-
-struct batched_entropy {
- union {
- u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
- u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
- };
- unsigned int position;
- spinlock_t batch_lock;
-};
/*
- * Get a random word for internal kernel use only. The quality of the random
- * number is good as /dev/urandom, but there is no backtrack protection, with
- * the goal of being quite fast and not depleting entropy. In order to ensure
- * that the randomness provided by this function is okay, the function
- * wait_for_random_bytes() should be called and return 0 at least once at any
- * point prior.
+ * random_init() is called before sysctl_init(),
+ * so we cannot call register_sysctl_init() in random_init()
*/
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
-};
-
-u64 get_random_u64(void)
+static int __init random_sysctls_init(void)
{
- u64 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- static void *previous;
-
- warn_unseeded_randomness(&previous);
-
- batch = raw_cpu_ptr(&batched_entropy_u64);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((u8 *)batch->entropy_u64);
- batch->position = 0;
- }
- ret = batch->entropy_u64[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u64);
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
-};
-u32 get_random_u32(void)
-{
- u32 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- static void *previous;
-
- warn_unseeded_randomness(&previous);
-
- batch = raw_cpu_ptr(&batched_entropy_u32);
- spin_lock_irqsave(&batch->batch_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng((u8 *)batch->entropy_u32);
- batch->position = 0;
- }
- ret = batch->entropy_u32[batch->position++];
- spin_unlock_irqrestore(&batch->batch_lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u32);
-
-/* It's important to invalidate all potential batched entropy that might
- * be stored before the crng is initialized, which we can do lazily by
- * simply resetting the counter to zero so that it's re-extracted on the
- * next usage. */
-static void invalidate_batched_entropy(void)
-{
- int cpu;
- unsigned long flags;
-
- for_each_possible_cpu (cpu) {
- struct batched_entropy *batched_entropy;
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
- spin_lock_irqsave(&batched_entropy->batch_lock, flags);
- batched_entropy->position = 0;
- spin_unlock(&batched_entropy->batch_lock);
-
- batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
- spin_lock(&batched_entropy->batch_lock);
- batched_entropy->position = 0;
- spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
- }
-}
-
-/**
- * randomize_page - Generate a random, page aligned address
- * @start: The smallest acceptable address the caller will take.
- * @range: The size of the area, starting at @start, within which the
- * random address must fall.
- *
- * If @start + @range would overflow, @range is capped.
- *
- * NOTE: Historical use of randomize_range, which this replaces, presumed that
- * @start was already page aligned. We now align it regardless.
- *
- * Return: A page aligned address within [start, start + range). On error,
- * @start is returned.
- */
-unsigned long
-randomize_page(unsigned long start, unsigned long range)
-{
- if (!PAGE_ALIGNED(start)) {
- range -= PAGE_ALIGN(start) - start;
- start = PAGE_ALIGN(start);
- }
-
- if (start > ULONG_MAX - range)
- range = ULONG_MAX - start;
-
- range >>= PAGE_SHIFT;
-
- if (range == 0)
- return start;
-
- return start + (get_random_long() % range << PAGE_SHIFT);
-}
-
-/* Interface for in-kernel drivers of true hardware RNGs.
- * Those devices may produce endless random bits and will be throttled
- * when our pool is full.
- */
-void add_hwgenerator_randomness(const char *buffer, size_t count,
- size_t entropy)
-{
- struct entropy_store *poolp = &input_pool;
-
- if (unlikely(crng_init == 0)) {
- crng_fast_load(buffer, count);
- return;
- }
-
- /* Suspend writing if we're above the trickle threshold.
- * We'll be woken up again once below random_write_wakeup_thresh,
- * or when the calling thread is about to terminate.
- */
- wait_event_interruptible(random_write_wait, kthread_should_stop() ||
- ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
- mix_pool_bytes(poolp, buffer, count);
- credit_entropy_bits(poolp, entropy);
-}
-EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
-
-/* Handle random seed passed by bootloader.
- * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
- * it would be regarded as device data.
- * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
- */
-void add_bootloader_randomness(const void *buf, unsigned int size)
-{
- if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
- add_hwgenerator_randomness(buf, size, size * 8);
- else
- add_device_randomness(buf, size);
+ register_sysctl_init("kernel/random", random_table);
+ return 0;
}
-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
+device_initcall(random_sysctls_init);
+#endif
diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c
deleted file mode 100644
index 1f36be14978f..000000000000
--- a/drivers/char/tb0219.c
+++ /dev/null
@@ -1,359 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Driver for TANBAC TB0219 base board.
- *
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
- */
-#include <linux/platform_device.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/uaccess.h>
-
-#include <asm/io.h>
-#include <asm/reboot.h>
-#include <asm/vr41xx/giu.h>
-#include <asm/vr41xx/tb0219.h>
-
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_DESCRIPTION("TANBAC TB0219 base board driver");
-MODULE_LICENSE("GPL");
-
-static int major; /* default is dynamic major device number */
-module_param(major, int, 0);
-MODULE_PARM_DESC(major, "Major device number");
-
-static void (*old_machine_restart)(char *command);
-static void __iomem *tb0219_base;
-static DEFINE_SPINLOCK(tb0219_lock);
-
-#define tb0219_read(offset) readw(tb0219_base + (offset))
-#define tb0219_write(offset, value) writew((value), tb0219_base + (offset))
-
-#define TB0219_START 0x0a000000UL
-#define TB0219_SIZE 0x20UL
-
-#define TB0219_LED 0x00
-#define TB0219_GPIO_INPUT 0x02
-#define TB0219_GPIO_OUTPUT 0x04
-#define TB0219_DIP_SWITCH 0x06
-#define TB0219_MISC 0x08
-#define TB0219_RESET 0x0e
-#define TB0219_PCI_SLOT1_IRQ_STATUS 0x10
-#define TB0219_PCI_SLOT2_IRQ_STATUS 0x12
-#define TB0219_PCI_SLOT3_IRQ_STATUS 0x14
-
-typedef enum {
- TYPE_LED,
- TYPE_GPIO_OUTPUT,
-} tb0219_type_t;
-
-/*
- * Minor device number
- * 0 = 7 segment LED
- *
- * 16 = GPIO IN 0
- * 17 = GPIO IN 1
- * 18 = GPIO IN 2
- * 19 = GPIO IN 3
- * 20 = GPIO IN 4
- * 21 = GPIO IN 5
- * 22 = GPIO IN 6
- * 23 = GPIO IN 7
- *
- * 32 = GPIO OUT 0
- * 33 = GPIO OUT 1
- * 34 = GPIO OUT 2
- * 35 = GPIO OUT 3
- * 36 = GPIO OUT 4
- * 37 = GPIO OUT 5
- * 38 = GPIO OUT 6
- * 39 = GPIO OUT 7
- *
- * 48 = DIP switch 1
- * 49 = DIP switch 2
- * 50 = DIP switch 3
- * 51 = DIP switch 4
- * 52 = DIP switch 5
- * 53 = DIP switch 6
- * 54 = DIP switch 7
- * 55 = DIP switch 8
- */
-
-static inline char get_led(void)
-{
- return (char)tb0219_read(TB0219_LED);
-}
-
-static inline char get_gpio_input_pin(unsigned int pin)
-{
- uint16_t values;
-
- values = tb0219_read(TB0219_GPIO_INPUT);
- if (values & (1 << pin))
- return '1';
-
- return '0';
-}
-
-static inline char get_gpio_output_pin(unsigned int pin)
-{
- uint16_t values;
-
- values = tb0219_read(TB0219_GPIO_OUTPUT);
- if (values & (1 << pin))
- return '1';
-
- return '0';
-}
-
-static inline char get_dip_switch(unsigned int pin)
-{
- uint16_t values;
-
- values = tb0219_read(TB0219_DIP_SWITCH);
- if (values & (1 << pin))
- return '1';
-
- return '0';
-}
-
-static inline int set_led(char command)
-{
- tb0219_write(TB0219_LED, command);
-
- return 0;
-}
-
-static inline int set_gpio_output_pin(unsigned int pin, char command)
-{
- unsigned long flags;
- uint16_t value;
-
- if (command != '0' && command != '1')
- return -EINVAL;
-
- spin_lock_irqsave(&tb0219_lock, flags);
- value = tb0219_read(TB0219_GPIO_OUTPUT);
- if (command == '0')
- value &= ~(1 << pin);
- else
- value |= 1 << pin;
- tb0219_write(TB0219_GPIO_OUTPUT, value);
- spin_unlock_irqrestore(&tb0219_lock, flags);
-
- return 0;
-
-}
-
-static ssize_t tanbac_tb0219_read(struct file *file, char __user *buf, size_t len,
- loff_t *ppos)
-{
- unsigned int minor;
- char value;
-
- minor = iminor(file_inode(file));
- switch (minor) {
- case 0:
- value = get_led();
- break;
- case 16 ... 23:
- value = get_gpio_input_pin(minor - 16);
- break;
- case 32 ... 39:
- value = get_gpio_output_pin(minor - 32);
- break;
- case 48 ... 55:
- value = get_dip_switch(minor - 48);
- break;
- default:
- return -EBADF;
- }
-
- if (len <= 0)
- return -EFAULT;
-
- if (put_user(value, buf))
- return -EFAULT;
-
- return 1;
-}
-
-static ssize_t tanbac_tb0219_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- unsigned int minor;
- tb0219_type_t type;
- size_t i;
- int retval = 0;
- char c;
-
- minor = iminor(file_inode(file));
- switch (minor) {
- case 0:
- type = TYPE_LED;
- break;
- case 32 ... 39:
- type = TYPE_GPIO_OUTPUT;
- break;
- default:
- return -EBADF;
- }
-
- for (i = 0; i < len; i++) {
- if (get_user(c, data + i))
- return -EFAULT;
-
- switch (type) {
- case TYPE_LED:
- retval = set_led(c);
- break;
- case TYPE_GPIO_OUTPUT:
- retval = set_gpio_output_pin(minor - 32, c);
- break;
- }
-
- if (retval < 0)
- break;
- }
-
- return i;
-}
-
-static int tanbac_tb0219_open(struct inode *inode, struct file *file)
-{
- unsigned int minor;
-
- minor = iminor(inode);
- switch (minor) {
- case 0:
- case 16 ... 23:
- case 32 ... 39:
- case 48 ... 55:
- return stream_open(inode, file);
- default:
- break;
- }
-
- return -EBADF;
-}
-
-static int tanbac_tb0219_release(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static const struct file_operations tb0219_fops = {
- .owner = THIS_MODULE,
- .read = tanbac_tb0219_read,
- .write = tanbac_tb0219_write,
- .open = tanbac_tb0219_open,
- .release = tanbac_tb0219_release,
- .llseek = no_llseek,
-};
-
-static void tb0219_restart(char *command)
-{
- tb0219_write(TB0219_RESET, 0);
-}
-
-static void tb0219_pci_irq_init(void)
-{
- /* PCI Slot 1 */
- vr41xx_set_irq_trigger(TB0219_PCI_SLOT1_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
- vr41xx_set_irq_level(TB0219_PCI_SLOT1_PIN, IRQ_LEVEL_LOW);
-
- /* PCI Slot 2 */
- vr41xx_set_irq_trigger(TB0219_PCI_SLOT2_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
- vr41xx_set_irq_level(TB0219_PCI_SLOT2_PIN, IRQ_LEVEL_LOW);
-
- /* PCI Slot 3 */
- vr41xx_set_irq_trigger(TB0219_PCI_SLOT3_PIN, IRQ_TRIGGER_LEVEL, IRQ_SIGNAL_THROUGH);
- vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, IRQ_LEVEL_LOW);
-}
-
-static int tb0219_probe(struct platform_device *dev)
-{
- int retval;
-
- if (request_mem_region(TB0219_START, TB0219_SIZE, "TB0219") == NULL)
- return -EBUSY;
-
- tb0219_base = ioremap(TB0219_START, TB0219_SIZE);
- if (tb0219_base == NULL) {
- release_mem_region(TB0219_START, TB0219_SIZE);
- return -ENOMEM;
- }
-
- retval = register_chrdev(major, "TB0219", &tb0219_fops);
- if (retval < 0) {
- iounmap(tb0219_base);
- tb0219_base = NULL;
- release_mem_region(TB0219_START, TB0219_SIZE);
- return retval;
- }
-
- old_machine_restart = _machine_restart;
- _machine_restart = tb0219_restart;
-
- tb0219_pci_irq_init();
-
- if (major == 0) {
- major = retval;
- printk(KERN_INFO "TB0219: major number %d\n", major);
- }
-
- return 0;
-}
-
-static int tb0219_remove(struct platform_device *dev)
-{
- _machine_restart = old_machine_restart;
-
- iounmap(tb0219_base);
- tb0219_base = NULL;
-
- release_mem_region(TB0219_START, TB0219_SIZE);
-
- return 0;
-}
-
-static struct platform_device *tb0219_platform_device;
-
-static struct platform_driver tb0219_device_driver = {
- .probe = tb0219_probe,
- .remove = tb0219_remove,
- .driver = {
- .name = "TB0219",
- },
-};
-
-static int __init tanbac_tb0219_init(void)
-{
- int retval;
-
- tb0219_platform_device = platform_device_alloc("TB0219", -1);
- if (!tb0219_platform_device)
- return -ENOMEM;
-
- retval = platform_device_add(tb0219_platform_device);
- if (retval < 0) {
- platform_device_put(tb0219_platform_device);
- return retval;
- }
-
- retval = platform_driver_register(&tb0219_device_driver);
- if (retval < 0)
- platform_device_unregister(tb0219_platform_device);
-
- return retval;
-}
-
-static void __exit tanbac_tb0219_exit(void)
-{
- platform_driver_unregister(&tb0219_device_driver);
- platform_device_unregister(tb0219_platform_device);
-}
-
-module_init(tanbac_tb0219_init);
-module_exit(tanbac_tb0219_exit);
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 4a5516406c22..927088b2c3d3 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -74,6 +74,18 @@ config TCG_TIS_SPI_CR50
If you have a H1 secure module running Cr50 firmware on SPI bus,
say Yes and it will be accessible from within Linux.
+config TCG_TIS_I2C
+ tristate "TPM Interface Specification 1.3 Interface / TPM 2.0 FIFO Interface - (I2C - generic)"
+ depends on I2C
+ select CRC_CCITT
+ select TCG_TIS_CORE
+ help
+ If you have a TPM security chip, compliant with the TCG TPM PTP
+ (I2C interface) specification and connected to an I2C bus master,
+ say Yes and it will be accessible from within Linux.
+ To compile this driver as a module, choose M here;
+ the module will be called tpm_tis_i2c.
+
config TCG_TIS_SYNQUACER
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface (MMIO - SynQuacer)"
depends on ARCH_SYNQUACER || COMPILE_TEST
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 66d39ea6bd10..0222b1ddb310 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -29,6 +29,7 @@ tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
obj-$(CONFIG_TCG_TIS_I2C_CR50) += tpm_tis_i2c_cr50.o
+obj-$(CONFIG_TCG_TIS_I2C) += tpm_tis_i2c.o
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 7c617edff4ca..a3aa411389e7 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -264,16 +264,11 @@ static int st33zp24_i2c_probe(struct i2c_client *client,
* @param: client, the i2c_client description (TPM I2C description).
* @return: 0 in case of success.
*/
-static int st33zp24_i2c_remove(struct i2c_client *client)
+static void st33zp24_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
- int ret;
- ret = st33zp24_remove(chip);
- if (ret)
- return ret;
-
- return 0;
+ st33zp24_remove(chip);
}
static const struct i2c_device_id st33zp24_i2c_id[] = {
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index a75dafd39445..22d184884694 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -381,16 +381,11 @@ static int st33zp24_spi_probe(struct spi_device *dev)
* @param: client, the spi_device description (TPM SPI description).
* @return: 0 in case of success.
*/
-static int st33zp24_spi_remove(struct spi_device *dev)
+static void st33zp24_spi_remove(struct spi_device *dev)
{
struct tpm_chip *chip = spi_get_drvdata(dev);
- int ret;
- ret = st33zp24_remove(chip);
- if (ret)
- return ret;
-
- return 0;
+ st33zp24_remove(chip);
}
static const struct spi_device_id st33zp24_spi_id[] = {
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 4ec10ab5e576..15b393e92c8e 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -61,9 +61,7 @@ enum tis_defaults {
};
/*
- * clear_interruption clear the pending interrupt.
- * @param: tpm_dev, the tpm device device.
- * @return: the interrupt status value.
+ * clear the pending interrupt.
*/
static u8 clear_interruption(struct st33zp24_dev *tpm_dev)
{
@@ -72,12 +70,10 @@ static u8 clear_interruption(struct st33zp24_dev *tpm_dev)
tpm_dev->ops->recv(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
tpm_dev->ops->send(tpm_dev->phy_id, TPM_INT_STATUS, &interrupt, 1);
return interrupt;
-} /* clear_interruption() */
+}
/*
- * st33zp24_cancel, cancel the current command execution or
- * set STS to COMMAND READY.
- * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
+ * cancel the current command execution or set STS to COMMAND READY.
*/
static void st33zp24_cancel(struct tpm_chip *chip)
{
@@ -86,12 +82,10 @@ static void st33zp24_cancel(struct tpm_chip *chip)
data = TPM_STS_COMMAND_READY;
tpm_dev->ops->send(tpm_dev->phy_id, TPM_STS, &data, 1);
-} /* st33zp24_cancel() */
+}
/*
- * st33zp24_status return the TPM_STS register
- * @param: chip, the tpm chip description
- * @return: the TPM_STS register value.
+ * return the TPM_STS register
*/
static u8 st33zp24_status(struct tpm_chip *chip)
{
@@ -100,12 +94,10 @@ static u8 st33zp24_status(struct tpm_chip *chip)
tpm_dev->ops->recv(tpm_dev->phy_id, TPM_STS, &data, 1);
return data;
-} /* st33zp24_status() */
+}
/*
- * check_locality if the locality is active
- * @param: chip, the tpm chip description
- * @return: true if LOCALITY0 is active, otherwise false
+ * if the locality is active
*/
static bool check_locality(struct tpm_chip *chip)
{
@@ -120,13 +112,8 @@ static bool check_locality(struct tpm_chip *chip)
return true;
return false;
-} /* check_locality() */
+}
-/*
- * request_locality request the TPM locality
- * @param: chip, the chip description
- * @return: the active locality or negative value.
- */
static int request_locality(struct tpm_chip *chip)
{
struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
@@ -153,12 +140,8 @@ static int request_locality(struct tpm_chip *chip)
/* could not get locality */
return -EACCES;
-} /* request_locality() */
+}
-/*
- * release_locality release the active locality
- * @param: chip, the tpm chip description.
- */
static void release_locality(struct tpm_chip *chip)
{
struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
@@ -171,8 +154,6 @@ static void release_locality(struct tpm_chip *chip)
/*
* get_burstcount return the burstcount value
- * @param: chip, the chip description
- * return: the burstcount or negative value.
*/
static int get_burstcount(struct tpm_chip *chip)
{
@@ -200,18 +181,8 @@ static int get_burstcount(struct tpm_chip *chip)
msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
return -EBUSY;
-} /* get_burstcount() */
-
+}
-/*
- * wait_for_tpm_stat_cond
- * @param: chip, chip description
- * @param: mask, expected mask value
- * @param: check_cancel, does the command expected to be canceled ?
- * @param: canceled, did we received a cancel request ?
- * @return: true if status == mask or if the command is canceled.
- * false in other cases.
- */
static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
bool check_cancel, bool *canceled)
{
@@ -228,13 +199,7 @@ static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
}
/*
- * wait_for_stat wait for a TPM_STS value
- * @param: chip, the tpm chip description
- * @param: mask, the value mask to wait
- * @param: timeout, the timeout
- * @param: queue, the wait queue.
- * @param: check_cancel, does the command can be cancelled ?
- * @return: the tpm status, 0 if success, -ETIME if timeout is reached.
+ * wait for a TPM_STS value
*/
static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
wait_queue_head_t *queue, bool check_cancel)
@@ -292,15 +257,8 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
}
return -ETIME;
-} /* wait_for_stat() */
+}
-/*
- * recv_data receive data
- * @param: chip, the tpm chip description
- * @param: buf, the buffer where the data are received
- * @param: count, the number of data to receive
- * @return: the number of bytes read from TPM FIFO.
- */
static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
@@ -325,12 +283,6 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
return size;
}
-/*
- * tpm_ioserirq_handler the serirq irq handler
- * @param: irq, the tpm chip description
- * @param: dev_id, the description of the chip
- * @return: the status of the handler.
- */
static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
{
struct tpm_chip *chip = dev_id;
@@ -341,16 +293,10 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
disable_irq_nosync(tpm_dev->irq);
return IRQ_HANDLED;
-} /* tpm_ioserirq_handler() */
+}
/*
- * st33zp24_send send TPM commands through the I2C bus.
- *
- * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h
- * @param: buf, the buffer to send.
- * @param: count, the number of bytes to send.
- * @return: In case of success the number of bytes sent.
- * In other case, a < 0 value describing the issue.
+ * send TPM commands through the I2C bus.
*/
static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
size_t len)
@@ -431,14 +377,6 @@ out_err:
return ret;
}
-/*
- * st33zp24_recv received TPM response through TPM phy.
- * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h.
- * @param: buf, the buffer to store datas.
- * @param: count, the number of bytes to send.
- * @return: In case of success the number of bytes received.
- * In other case, a < 0 value describing the issue.
- */
static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
size_t count)
{
@@ -478,12 +416,6 @@ out:
return size;
}
-/*
- * st33zp24_req_canceled
- * @param: chip, the tpm_chip description as specified in driver/char/tpm/tpm.h.
- * @param: status, the TPM status.
- * @return: Does TPM ready to compute a new command ? true.
- */
static bool st33zp24_req_canceled(struct tpm_chip *chip, u8 status)
{
return (status == TPM_STS_COMMAND_READY);
@@ -501,11 +433,7 @@ static const struct tpm_class_ops st33zp24_tpm = {
};
/*
- * st33zp24_probe initialize the TPM device
- * @param: client, the i2c_client description (TPM I2C description).
- * @param: id, the i2c_device_id struct.
- * @return: 0 in case of success.
- * -1 in other case.
+ * initialize the TPM device
*/
int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
struct device *dev, int irq, int io_lpcpd)
@@ -583,25 +511,13 @@ _tpm_clean_answer:
}
EXPORT_SYMBOL(st33zp24_probe);
-/*
- * st33zp24_remove remove the TPM device
- * @param: tpm_data, the tpm phy.
- * @return: 0 in case of success.
- */
-int st33zp24_remove(struct tpm_chip *chip)
+void st33zp24_remove(struct tpm_chip *chip)
{
tpm_chip_unregister(chip);
- return 0;
}
EXPORT_SYMBOL(st33zp24_remove);
#ifdef CONFIG_PM_SLEEP
-/*
- * st33zp24_pm_suspend suspend the TPM device
- * @param: tpm_data, the tpm phy.
- * @param: mesg, the power management message.
- * @return: 0 in case of success.
- */
int st33zp24_pm_suspend(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
@@ -615,14 +531,9 @@ int st33zp24_pm_suspend(struct device *dev)
ret = tpm_pm_suspend(dev);
return ret;
-} /* st33zp24_pm_suspend() */
+}
EXPORT_SYMBOL(st33zp24_pm_suspend);
-/*
- * st33zp24_pm_resume resume the TPM device
- * @param: tpm_data, the tpm phy.
- * @return: 0 in case of success.
- */
int st33zp24_pm_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
@@ -640,7 +551,7 @@ int st33zp24_pm_resume(struct device *dev)
tpm1_do_selftest(chip);
}
return ret;
-} /* st33zp24_pm_resume() */
+}
EXPORT_SYMBOL(st33zp24_pm_resume);
#endif
diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
index 6747be1e2502..b387a476c555 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.h
+++ b/drivers/char/tpm/st33zp24/st33zp24.h
@@ -34,5 +34,5 @@ int st33zp24_pm_resume(struct device *dev);
int st33zp24_probe(void *phy_id, const struct st33zp24_phy_ops *ops,
struct device *dev, int irq, int io_lpcpd);
-int st33zp24_remove(struct tpm_chip *chip);
+void st33zp24_remove(struct tpm_chip *chip);
#endif /* __LOCAL_ST33ZP24_H__ */
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index ddaeceb7e109..783d65fc71f0 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -274,14 +274,6 @@ static void tpm_dev_release(struct device *dev)
kfree(chip);
}
-static void tpm_devs_release(struct device *dev)
-{
- struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
-
- /* release the master device reference */
- put_device(&chip->dev);
-}
-
/**
* tpm_class_shutdown() - prepare the TPM device for loss of power.
* @dev: device to which the chip is associated.
@@ -344,7 +336,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
chip->dev_num = rc;
device_initialize(&chip->dev);
- device_initialize(&chip->devs);
chip->dev.class = tpm_class;
chip->dev.class->shutdown_pre = tpm_class_shutdown;
@@ -352,39 +343,20 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
- chip->devs.parent = pdev;
- chip->devs.class = tpmrm_class;
- chip->devs.release = tpm_devs_release;
- /* get extra reference on main device to hold on
- * behalf of devs. This holds the chip structure
- * while cdevs is in use. The corresponding put
- * is in the tpm_devs_release (TPM2 only)
- */
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- get_device(&chip->dev);
-
if (chip->dev_num == 0)
chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR);
else
chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num);
- chip->devs.devt =
- MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES);
-
rc = dev_set_name(&chip->dev, "tpm%d", chip->dev_num);
if (rc)
goto out;
- rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num);
- if (rc)
- goto out;
if (!pdev)
chip->flags |= TPM_CHIP_FLAG_VIRTUAL;
cdev_init(&chip->cdev, &tpm_fops);
- cdev_init(&chip->cdevs, &tpmrm_fops);
chip->cdev.owner = THIS_MODULE;
- chip->cdevs.owner = THIS_MODULE;
rc = tpm2_init_space(&chip->work_space, TPM2_SPACE_BUFFER_SIZE);
if (rc) {
@@ -396,7 +368,6 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
return chip;
out:
- put_device(&chip->devs);
put_device(&chip->dev);
return ERR_PTR(rc);
}
@@ -444,15 +415,10 @@ static int tpm_add_char_device(struct tpm_chip *chip)
return rc;
}
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- rc = cdev_device_add(&chip->cdevs, &chip->devs);
- if (rc) {
- dev_err(&chip->devs,
- "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
- dev_name(&chip->devs), MAJOR(chip->devs.devt),
- MINOR(chip->devs.devt), rc);
- return rc;
- }
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip)) {
+ rc = tpm_devs_add(chip);
+ if (rc)
+ goto err_del_cdev;
}
/* Make the chip available. */
@@ -460,6 +426,10 @@ static int tpm_add_char_device(struct tpm_chip *chip)
idr_replace(&dev_nums_idr, chip, chip->dev_num);
mutex_unlock(&idr_lock);
+ return 0;
+
+err_del_cdev:
+ cdev_device_del(&chip->cdev, &chip->dev);
return rc;
}
@@ -474,13 +444,21 @@ static void tpm_del_char_device(struct tpm_chip *chip)
/* Make the driver uncallable. */
down_write(&chip->ops_sem);
- if (chip->flags & TPM_CHIP_FLAG_TPM2) {
- if (!tpm_chip_start(chip)) {
- tpm2_shutdown(chip, TPM2_SU_CLEAR);
- tpm_chip_stop(chip);
+
+ /*
+ * Check if chip->ops is still valid: In case that the controller
+ * drivers shutdown handler unregisters the controller in its
+ * shutdown handler we are called twice and chip->ops to NULL.
+ */
+ if (chip->ops) {
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ if (!tpm_chip_start(chip)) {
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ tpm_chip_stop(chip);
+ }
}
+ chip->ops = NULL;
}
- chip->ops = NULL;
up_write(&chip->ops_sem);
}
@@ -488,7 +466,8 @@ static void tpm_del_legacy_sysfs(struct tpm_chip *chip)
{
struct attribute **i;
- if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL))
+ if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL) ||
+ tpm_is_firmware_upgrade(chip))
return;
sysfs_remove_link(&chip->dev.parent->kobj, "ppi");
@@ -506,7 +485,8 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
struct attribute **i;
int rc;
- if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL))
+ if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL) ||
+ tpm_is_firmware_upgrade(chip))
return 0;
rc = compat_only_sysfs_link_entry_to_kobj(
@@ -536,7 +516,7 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
static int tpm_add_hwrng(struct tpm_chip *chip)
{
- if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip))
return 0;
snprintf(chip->hwrng_name, sizeof(chip->hwrng_name),
@@ -550,6 +530,9 @@ static int tpm_get_pcr_allocation(struct tpm_chip *chip)
{
int rc;
+ if (tpm_is_firmware_upgrade(chip))
+ return 0;
+
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ?
tpm2_get_pcr_allocation(chip) :
tpm1_get_pcr_allocation(chip);
@@ -612,7 +595,7 @@ int tpm_chip_register(struct tpm_chip *chip)
return 0;
out_hwrng:
- if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
hwrng_unregister(&chip->hwrng);
out_ppi:
tpm_bios_log_teardown(chip);
@@ -637,11 +620,11 @@ EXPORT_SYMBOL_GPL(tpm_chip_register);
void tpm_chip_unregister(struct tpm_chip *chip)
{
tpm_del_legacy_sysfs(chip);
- if (IS_ENABLED(CONFIG_HW_RANDOM_TPM))
+ if (IS_ENABLED(CONFIG_HW_RANDOM_TPM) && !tpm_is_firmware_upgrade(chip))
hwrng_unregister(&chip->hwrng);
tpm_bios_log_teardown(chip);
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- cdev_device_del(&chip->cdevs, &chip->devs);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2 && !tpm_is_firmware_upgrade(chip))
+ tpm_devs_remove(chip);
tpm_del_char_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index c08cbb306636..dc4c0a0a5129 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -69,7 +69,13 @@ static void tpm_dev_async_work(struct work_struct *work)
ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
sizeof(priv->data_buffer));
tpm_put_ops(priv->chip);
- if (ret > 0) {
+
+ /*
+ * If ret is > 0 then tpm_dev_transmit returned the size of the
+ * response. If ret is < 0 then tpm_dev_transmit failed and
+ * returned an error code.
+ */
+ if (ret != 0) {
priv->response_length = ret;
mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
}
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index 63f03cfb8e6a..54c71473aa29 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -480,6 +480,9 @@ void tpm_sysfs_add_device(struct tpm_chip *chip)
WARN_ON(chip->groups_cnt != 0);
+ if (tpm_is_firmware_upgrade(chip))
+ return;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
chip->groups[chip->groups_cnt++] = &tpm2_dev_group;
else
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 283f78211c3a..24ee4e1cc452 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -55,6 +55,7 @@ enum tpm_addr {
#define TPM_WARN_DOING_SELFTEST 0x802
#define TPM_ERR_DEACTIVATED 0x6
#define TPM_ERR_DISABLED 0x7
+#define TPM_ERR_FAILEDSELFTEST 0x1C
#define TPM_ERR_INVALID_POSTINIT 38
#define TPM_TAG_RQU_COMMAND 193
@@ -234,6 +235,8 @@ int tpm2_prepare_space(struct tpm_chip *chip, struct tpm_space *space, u8 *cmd,
size_t cmdsiz);
int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space, void *buf,
size_t *bufsiz);
+int tpm_devs_add(struct tpm_chip *chip);
+void tpm_devs_remove(struct tpm_chip *chip);
void tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index f7dc986fa4a0..cf64c7385105 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -709,7 +709,12 @@ int tpm1_auto_startup(struct tpm_chip *chip)
if (rc)
goto out;
rc = tpm1_do_selftest(chip);
- if (rc) {
+ if (rc == TPM_ERR_FAILEDSELFTEST) {
+ dev_warn(&chip->dev, "TPM self test failed, switching to the firmware upgrade mode\n");
+ /* A TPM in this state possibly allows or needs a firmware upgrade */
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ return 0;
+ } else if (rc) {
dev_err(&chip->dev, "TPM self test failed\n");
goto out;
}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index a25815a6f625..65d03867e114 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -400,7 +400,16 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id, u32 *value,
if (!rc) {
out = (struct tpm2_get_cap_out *)
&buf.data[TPM_HEADER_SIZE];
- *value = be32_to_cpu(out->value);
+ /*
+ * To prevent failing boot up of some systems, Infineon TPM2.0
+ * returns SUCCESS on TPM2_Startup in field upgrade mode. Also
+ * the TPM2_Getcapability command returns a zero length list
+ * in field upgrade mode.
+ */
+ if (be32_to_cpu(out->property_cnt) > 0)
+ *value = be32_to_cpu(out->value);
+ else
+ rc = -ENODATA;
}
tpm_buf_destroy(&buf);
return rc;
@@ -743,8 +752,24 @@ int tpm2_auto_startup(struct tpm_chip *chip)
}
rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc == TPM2_RC_FAILURE || (rc < 0 && rc != -ENOMEM)) {
+ dev_info(&chip->dev,
+ "TPM in field failure mode, requires firmware upgrade\n");
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ rc = 0;
+ }
out:
+ /*
+ * Infineon TPM in field upgrade mode will return no data for the number
+ * of supported commands.
+ */
+ if (rc == TPM2_RC_UPGRADE || rc == -ENODATA) {
+ dev_info(&chip->dev, "TPM in field upgrade mode, requires firmware upgrade\n");
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_UPGRADE;
+ rc = 0;
+ }
+
if (rc > 0)
rc = -ENODEV;
return rc;
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 97e916856cf3..ffb35f0154c1 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -58,12 +58,12 @@ int tpm2_init_space(struct tpm_space *space, unsigned int buf_size)
void tpm2_del_space(struct tpm_chip *chip, struct tpm_space *space)
{
- mutex_lock(&chip->tpm_mutex);
- if (!tpm_chip_start(chip)) {
+
+ if (tpm_try_get_ops(chip) == 0) {
tpm2_flush_sessions(chip, space);
- tpm_chip_stop(chip);
+ tpm_put_ops(chip);
}
- mutex_unlock(&chip->tpm_mutex);
+
kfree(space->context_buf);
kfree(space->session_buf);
}
@@ -574,3 +574,68 @@ out:
dev_err(&chip->dev, "%s: error %d\n", __func__, rc);
return rc;
}
+
+/*
+ * Put the reference to the main device.
+ */
+static void tpm_devs_release(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, devs);
+
+ /* release the master device reference */
+ put_device(&chip->dev);
+}
+
+/*
+ * Remove the device file for exposed TPM spaces and release the device
+ * reference. This may also release the reference to the master device.
+ */
+void tpm_devs_remove(struct tpm_chip *chip)
+{
+ cdev_device_del(&chip->cdevs, &chip->devs);
+ put_device(&chip->devs);
+}
+
+/*
+ * Add a device file to expose TPM spaces. Also take a reference to the
+ * main device.
+ */
+int tpm_devs_add(struct tpm_chip *chip)
+{
+ int rc;
+
+ device_initialize(&chip->devs);
+ chip->devs.parent = chip->dev.parent;
+ chip->devs.class = tpmrm_class;
+
+ /*
+ * Get extra reference on main device to hold on behalf of devs.
+ * This holds the chip structure while cdevs is in use. The
+ * corresponding put is in the tpm_devs_release.
+ */
+ get_device(&chip->dev);
+ chip->devs.release = tpm_devs_release;
+ chip->devs.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num + TPM_NUM_DEVICES);
+ cdev_init(&chip->cdevs, &tpmrm_fops);
+ chip->cdevs.owner = THIS_MODULE;
+
+ rc = dev_set_name(&chip->devs, "tpmrm%d", chip->dev_num);
+ if (rc)
+ goto err_put_devs;
+
+ rc = cdev_device_add(&chip->cdevs, &chip->devs);
+ if (rc) {
+ dev_err(&chip->devs,
+ "unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
+ dev_name(&chip->devs), MAJOR(chip->devs.devt),
+ MINOR(chip->devs.devt), rc);
+ goto err_put_devs;
+ }
+
+ return 0;
+
+err_put_devs:
+ put_device(&chip->devs);
+
+ return rc;
+}
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 6e3235565a4d..5c233423c56f 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -177,7 +177,7 @@ static u8 ftpm_tee_tpm_op_status(struct tpm_chip *chip)
static bool ftpm_tee_tpm_req_canceled(struct tpm_chip *chip, u8 status)
{
- return 0;
+ return false;
}
static const struct tpm_class_ops ftpm_tee_tpm_ops = {
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index d5ac85558214..4be3677c1463 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -179,12 +179,11 @@ static int i2c_atmel_probe(struct i2c_client *client,
return tpm_chip_register(chip);
}
-static int i2c_atmel_remove(struct i2c_client *client)
+static void i2c_atmel_remove(struct i2c_client *client)
{
struct device *dev = &(client->dev);
struct tpm_chip *chip = dev_get_drvdata(dev);
tpm_chip_unregister(chip);
- return 0;
}
static const struct i2c_device_id i2c_atmel_id[] = {
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index a19d32cb4e94..fd3c3661e646 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -706,15 +706,13 @@ static int tpm_tis_i2c_probe(struct i2c_client *client,
return rc;
}
-static int tpm_tis_i2c_remove(struct i2c_client *client)
+static void tpm_tis_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = tpm_dev.chip;
tpm_chip_unregister(chip);
release_locality(chip, tpm_dev.locality, 1);
tpm_dev.client = NULL;
-
- return 0;
}
static struct i2c_driver tpm_tis_i2c_driver = {
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index b77c18e38662..95c37350cc8e 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -622,12 +622,11 @@ static int i2c_nuvoton_probe(struct i2c_client *client,
return tpm_chip_register(chip);
}
-static int i2c_nuvoton_remove(struct i2c_client *client)
+static void i2c_nuvoton_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
tpm_chip_unregister(chip);
- return 0;
}
static const struct i2c_device_id i2c_nuvoton_id[] = {
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 3af4c07a9342..d3989b257f42 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -681,6 +681,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
ibmvtpm->rtce_buf != NULL,
HZ)) {
+ rc = -ENODEV;
dev_err(dev, "CRQ response timed out\n");
goto init_irq_cleanup;
}
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index 40018a73b3cb..bc7b1b4501b3 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -380,7 +380,7 @@ void tpm_add_ppi(struct tpm_chip *chip)
TPM_PPI_FN_VERSION,
NULL, ACPI_TYPE_STRING);
if (obj) {
- strlcpy(chip->ppi_version, obj->string.pointer,
+ strscpy(chip->ppi_version, obj->string.pointer,
sizeof(chip->ppi_version));
ACPI_FREE(obj);
}
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index d3f2e5364c27..bcff6429e0b4 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -153,50 +153,46 @@ static int check_acpi_tpm2(struct device *dev)
#endif
static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *result)
+ u8 *result, enum tpm_tis_io_mode io_mode)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
-
- while (len--)
- *result++ = ioread8(phy->iobase + addr);
+ __le16 result_le16;
+ __le32 result_le32;
+
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ *result++ = ioread8(phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ result_le16 = cpu_to_le16(ioread16(phy->iobase + addr));
+ memcpy(result, &result_le16, sizeof(u16));
+ break;
+ case TPM_TIS_PHYS_32:
+ result_le32 = cpu_to_le32(ioread32(phy->iobase + addr));
+ memcpy(result, &result_le32, sizeof(u32));
+ break;
+ }
return 0;
}
static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
- const u8 *value)
+ const u8 *value, enum tpm_tis_io_mode io_mode)
{
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
- while (len--)
- iowrite8(*value++, phy->iobase + addr);
-
- return 0;
-}
-
-static int tpm_tcg_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
-{
- struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
-
- *result = ioread16(phy->iobase + addr);
-
- return 0;
-}
-
-static int tpm_tcg_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
-{
- struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
-
- *result = ioread32(phy->iobase + addr);
-
- return 0;
-}
-
-static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
-{
- struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
-
- iowrite32(value, phy->iobase + addr);
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ iowrite8(*value++, phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ return -EINVAL;
+ case TPM_TIS_PHYS_32:
+ iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase + addr);
+ break;
+ }
return 0;
}
@@ -204,9 +200,6 @@ static int tpm_tcg_write32(struct tpm_tis_data *data, u32 addr, u32 value)
static const struct tpm_tis_phy_ops tpm_tcg = {
.read_bytes = tpm_tcg_read_bytes,
.write_bytes = tpm_tcg_write_bytes,
- .read16 = tpm_tcg_read16,
- .read32 = tpm_tcg_read32,
- .write32 = tpm_tcg_write32,
};
static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index b2659a4c4016..757623bacfd5 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -289,6 +289,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
int size = 0;
int status;
u32 expected;
+ int rc;
if (count < TPM_HEADER_SIZE) {
size = -EIO;
@@ -328,6 +329,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
+ rc = tpm_tis_verify_crc(priv, (size_t)size, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for response.\n");
+ size = rc;
+ goto out;
+ }
+
out:
tpm_tis_ready(chip);
return size;
@@ -443,6 +451,12 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
if (rc < 0)
return rc;
+ rc = tpm_tis_verify_crc(priv, len, buf);
+ if (rc < 0) {
+ dev_err(&chip->dev, "CRC mismatch for command.\n");
+ return rc;
+ }
+
/* go and do it */
rc = tpm_tis_write8(priv, TPM_STS(priv->locality), TPM_STS_GO);
if (rc < 0)
@@ -950,9 +964,11 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
priv->phy_ops = phy_ops;
+ dev_set_drvdata(&chip->dev, priv);
+
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
if (rc < 0)
- goto out_err;
+ return rc;
priv->manufacturer_id = vendor;
@@ -962,8 +978,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
}
- dev_set_drvdata(&chip->dev, priv);
-
if (is_bsw()) {
priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR,
ILB_REMAP_SIZE);
@@ -994,7 +1008,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
intmask &= ~TPM_GLOBAL_INT_ENABLE;
+
+ rc = request_locality(chip, 0);
+ if (rc < 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+
tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+ release_locality(chip, 0);
rc = tpm_chip_start(chip);
if (rc)
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index 3be24f221e32..66a5a13cd1df 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -104,54 +104,98 @@ struct tpm_tis_data {
unsigned int timeout_max; /* usecs */
};
+/*
+ * IO modes to indicate how many bytes should be read/written at once in the
+ * tpm_tis_phy_ops read_bytes/write_bytes calls. Use TPM_TIS_PHYS_8 to
+ * receive/transmit byte-wise, TPM_TIS_PHYS_16 for two bytes etc.
+ */
+enum tpm_tis_io_mode {
+ TPM_TIS_PHYS_8,
+ TPM_TIS_PHYS_16,
+ TPM_TIS_PHYS_32,
+};
+
struct tpm_tis_phy_ops {
+ /* data is passed in little endian */
int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
- u8 *result);
+ u8 *result, enum tpm_tis_io_mode mode);
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
- const u8 *value);
- int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
- int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
- int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
+ const u8 *value, enum tpm_tis_io_mode mode);
+ int (*verify_crc)(struct tpm_tis_data *data, size_t len,
+ const u8 *value);
};
static inline int tpm_tis_read_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *result)
{
- return data->phy_ops->read_bytes(data, addr, len, result);
+ return data->phy_ops->read_bytes(data, addr, len, result,
+ TPM_TIS_PHYS_8);
}
static inline int tpm_tis_read8(struct tpm_tis_data *data, u32 addr, u8 *result)
{
- return data->phy_ops->read_bytes(data, addr, 1, result);
+ return data->phy_ops->read_bytes(data, addr, 1, result, TPM_TIS_PHYS_8);
}
static inline int tpm_tis_read16(struct tpm_tis_data *data, u32 addr,
u16 *result)
{
- return data->phy_ops->read16(data, addr, result);
+ __le16 result_le;
+ int rc;
+
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
+ (u8 *)&result_le, TPM_TIS_PHYS_16);
+ if (!rc)
+ *result = le16_to_cpu(result_le);
+
+ return rc;
}
static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
u32 *result)
{
- return data->phy_ops->read32(data, addr, result);
+ __le32 result_le;
+ int rc;
+
+ rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
+ (u8 *)&result_le, TPM_TIS_PHYS_32);
+ if (!rc)
+ *result = le32_to_cpu(result_le);
+
+ return rc;
}
static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, const u8 *value)
{
- return data->phy_ops->write_bytes(data, addr, len, value);
+ return data->phy_ops->write_bytes(data, addr, len, value,
+ TPM_TIS_PHYS_8);
}
static inline int tpm_tis_write8(struct tpm_tis_data *data, u32 addr, u8 value)
{
- return data->phy_ops->write_bytes(data, addr, 1, &value);
+ return data->phy_ops->write_bytes(data, addr, 1, &value,
+ TPM_TIS_PHYS_8);
}
static inline int tpm_tis_write32(struct tpm_tis_data *data, u32 addr,
u32 value)
{
- return data->phy_ops->write32(data, addr, value);
+ __le32 value_le;
+ int rc;
+
+ value_le = cpu_to_le32(value);
+ rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
+ (u8 *)&value_le, TPM_TIS_PHYS_32);
+ return rc;
+}
+
+static inline int tpm_tis_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ if (!data->phy_ops->verify_crc)
+ return 0;
+ return data->phy_ops->verify_crc(data, len, value);
}
static inline bool is_bsw(void)
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
new file mode 100644
index 000000000000..0692510dfcab
--- /dev/null
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2021 Nuvoton Technology corporation
+ * Copyright (C) 2019-2022 Infineon Technologies AG
+ *
+ * This device driver implements the TPM interface as defined in the TCG PC
+ * Client Platform TPM Profile (PTP) Specification for TPM 2.0 v1.04
+ * Revision 14.
+ *
+ * It is based on the tpm_tis_spi device driver.
+ */
+
+#include <linux/i2c.h>
+#include <linux/crc-ccitt.h>
+#include "tpm_tis_core.h"
+
+/* TPM registers */
+#define TPM_I2C_LOC_SEL 0x00
+#define TPM_I2C_ACCESS 0x04
+#define TPM_I2C_INTERFACE_CAPABILITY 0x30
+#define TPM_I2C_DEVICE_ADDRESS 0x38
+#define TPM_I2C_DATA_CSUM_ENABLE 0x40
+#define TPM_DATA_CSUM 0x44
+#define TPM_I2C_DID_VID 0x48
+#define TPM_I2C_RID 0x4C
+
+/* TIS-compatible register address to avoid clash with TPM_ACCESS (0x00) */
+#define TPM_LOC_SEL 0x0FFF
+
+/* Mask to extract the I2C register from TIS register addresses */
+#define TPM_TIS_REGISTER_MASK 0x0FFF
+
+/* Default Guard Time of 250µs until interface capability register is read */
+#define GUARD_TIME_DEFAULT_MIN 250
+#define GUARD_TIME_DEFAULT_MAX 300
+
+/* Guard Time of 250µs after I2C slave NACK */
+#define GUARD_TIME_ERR_MIN 250
+#define GUARD_TIME_ERR_MAX 300
+
+/* Guard Time bit masks; SR is repeated start, RW is read then write, etc. */
+#define TPM_GUARD_TIME_SR_MASK 0x40000000
+#define TPM_GUARD_TIME_RR_MASK 0x00100000
+#define TPM_GUARD_TIME_RW_MASK 0x00080000
+#define TPM_GUARD_TIME_WR_MASK 0x00040000
+#define TPM_GUARD_TIME_WW_MASK 0x00020000
+#define TPM_GUARD_TIME_MIN_MASK 0x0001FE00
+#define TPM_GUARD_TIME_MIN_SHIFT 9
+
+/* Masks with bits that must be read zero */
+#define TPM_ACCESS_READ_ZERO 0x48
+#define TPM_INT_ENABLE_ZERO 0x7FFFFF6
+#define TPM_STS_READ_ZERO 0x23
+#define TPM_INTF_CAPABILITY_ZERO 0x0FFFF000
+#define TPM_I2C_INTERFACE_CAPABILITY_ZERO 0x80000000
+
+struct tpm_tis_i2c_phy {
+ struct tpm_tis_data priv;
+ struct i2c_client *i2c_client;
+ bool guard_time_read;
+ bool guard_time_write;
+ u16 guard_time_min;
+ u16 guard_time_max;
+ u8 *io_buf;
+};
+
+static inline struct tpm_tis_i2c_phy *
+to_tpm_tis_i2c_phy(struct tpm_tis_data *data)
+{
+ return container_of(data, struct tpm_tis_i2c_phy, priv);
+}
+
+/*
+ * tpm_tis_core uses the register addresses as defined in Table 19 "Allocation
+ * of Register Space for FIFO TPM Access" of the TCG PC Client PTP
+ * Specification. In order for this code to work together with tpm_tis_core,
+ * those addresses need to mapped to the registers defined for I2C TPMs in
+ * Table 51 "I2C-TPM Register Overview".
+ *
+ * For most addresses this can be done by simply stripping off the locality
+ * information from the address. A few addresses need to be mapped explicitly,
+ * since the corresponding I2C registers have been moved around. TPM_LOC_SEL is
+ * only defined for I2C TPMs and is also mapped explicitly here to distinguish
+ * it from TPM_ACCESS(0).
+ *
+ * Locality information is ignored, since this driver assumes exclusive access
+ * to the TPM and always uses locality 0.
+ */
+static u8 tpm_tis_i2c_address_to_register(u32 addr)
+{
+ addr &= TPM_TIS_REGISTER_MASK;
+
+ switch (addr) {
+ case TPM_ACCESS(0):
+ return TPM_I2C_ACCESS;
+ case TPM_LOC_SEL:
+ return TPM_I2C_LOC_SEL;
+ case TPM_DID_VID(0):
+ return TPM_I2C_DID_VID;
+ case TPM_RID(0):
+ return TPM_I2C_RID;
+ default:
+ return addr;
+ }
+}
+
+static int tpm_tis_i2c_retry_transfer_until_ack(struct tpm_tis_data *data,
+ struct i2c_msg *msg)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ bool guard_time;
+ int i = 0;
+ int ret;
+
+ if (msg->flags & I2C_M_RD)
+ guard_time = phy->guard_time_read;
+ else
+ guard_time = phy->guard_time_write;
+
+ do {
+ ret = i2c_transfer(phy->i2c_client->adapter, msg, 1);
+ if (ret < 0)
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ else if (guard_time)
+ usleep_range(phy->guard_time_min, phy->guard_time_max);
+ /* retry on TPM NACK */
+ } while (ret < 0 && i++ < TPM_RETRY);
+
+ return ret;
+}
+
+/* Check that bits which must be read zero are not set */
+static int tpm_tis_i2c_sanity_check_read(u8 reg, u16 len, u8 *buf)
+{
+ u32 zero_mask;
+ u32 value;
+
+ switch (len) {
+ case sizeof(u8):
+ value = buf[0];
+ break;
+ case sizeof(u16):
+ value = le16_to_cpup((__le16 *)buf);
+ break;
+ case sizeof(u32):
+ value = le32_to_cpup((__le32 *)buf);
+ break;
+ default:
+ /* unknown length, skip check */
+ return 0;
+ }
+
+ switch (reg) {
+ case TPM_I2C_ACCESS:
+ zero_mask = TPM_ACCESS_READ_ZERO;
+ break;
+ case TPM_INT_ENABLE(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INT_ENABLE_ZERO;
+ break;
+ case TPM_STS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_STS_READ_ZERO;
+ break;
+ case TPM_INTF_CAPS(0) & TPM_TIS_REGISTER_MASK:
+ zero_mask = TPM_INTF_CAPABILITY_ZERO;
+ break;
+ case TPM_I2C_INTERFACE_CAPABILITY:
+ zero_mask = TPM_I2C_INTERFACE_CAPABILITY_ZERO;
+ break;
+ default:
+ /* unknown register, skip check */
+ return 0;
+ }
+
+ if (unlikely((value & zero_mask) != 0x00)) {
+ pr_debug("TPM I2C read of register 0x%02x failed sanity check: 0x%x\n", reg, value);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int tpm_tis_i2c_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ u8 *result, enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int i;
+ int ret;
+
+ for (i = 0; i < TPM_RETRY; i++) {
+ /* write register */
+ msg.len = sizeof(reg);
+ msg.buf = &reg;
+ msg.flags = 0;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ /* read data */
+ msg.buf = result;
+ msg.len = len;
+ msg.flags = I2C_M_RD;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ ret = tpm_tis_i2c_sanity_check_read(reg, len, result);
+ if (ret == 0)
+ return 0;
+
+ usleep_range(GUARD_TIME_ERR_MIN, GUARD_TIME_ERR_MAX);
+ }
+
+ return ret;
+}
+
+static int tpm_tis_i2c_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
+ const u8 *value,
+ enum tpm_tis_io_mode io_mode)
+{
+ struct tpm_tis_i2c_phy *phy = to_tpm_tis_i2c_phy(data);
+ struct i2c_msg msg = { .addr = phy->i2c_client->addr };
+ u8 reg = tpm_tis_i2c_address_to_register(addr);
+ int ret;
+
+ if (len > TPM_BUFSIZE - 1)
+ return -EIO;
+
+ /* write register and data in one go */
+ phy->io_buf[0] = reg;
+ memcpy(phy->io_buf + sizeof(reg), value, len);
+
+ msg.len = sizeof(reg) + len;
+ msg.buf = phy->io_buf;
+ ret = tpm_tis_i2c_retry_transfer_until_ack(data, &msg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int tpm_tis_i2c_verify_crc(struct tpm_tis_data *data, size_t len,
+ const u8 *value)
+{
+ u16 crc_tpm, crc_host;
+ int rc;
+
+ rc = tpm_tis_read16(data, TPM_DATA_CSUM, &crc_tpm);
+ if (rc < 0)
+ return rc;
+
+ /* reflect crc result, regardless of host endianness */
+ crc_host = swab16(crc_ccitt(0, value, len));
+ if (crc_tpm != crc_host)
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * Guard Time:
+ * After each I2C operation, the TPM might require the master to wait.
+ * The time period is vendor-specific and must be read from the
+ * TPM_I2C_INTERFACE_CAPABILITY register.
+ *
+ * Before the Guard Time is read (or after the TPM failed to send an I2C NACK),
+ * a Guard Time of 250µs applies.
+ *
+ * Various flags in the same register indicate if a guard time is needed:
+ * - SR: <I2C read with repeated start> <guard time> <I2C read>
+ * - RR: <I2C read> <guard time> <I2C read>
+ * - RW: <I2C read> <guard time> <I2C write>
+ * - WR: <I2C write> <guard time> <I2C read>
+ * - WW: <I2C write> <guard time> <I2C write>
+ *
+ * See TCG PC Client PTP Specification v1.04, 8.1.10 GUARD_TIME
+ */
+static int tpm_tis_i2c_init_guard_time(struct tpm_tis_i2c_phy *phy)
+{
+ u32 i2c_caps;
+ int ret;
+
+ phy->guard_time_read = true;
+ phy->guard_time_write = true;
+ phy->guard_time_min = GUARD_TIME_DEFAULT_MIN;
+ phy->guard_time_max = GUARD_TIME_DEFAULT_MAX;
+
+ ret = tpm_tis_i2c_read_bytes(&phy->priv, TPM_I2C_INTERFACE_CAPABILITY,
+ sizeof(i2c_caps), (u8 *)&i2c_caps,
+ TPM_TIS_PHYS_32);
+ if (ret)
+ return ret;
+
+ phy->guard_time_read = (i2c_caps & TPM_GUARD_TIME_RR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_RW_MASK);
+ phy->guard_time_write = (i2c_caps & TPM_GUARD_TIME_WR_MASK) ||
+ (i2c_caps & TPM_GUARD_TIME_WW_MASK);
+ phy->guard_time_min = (i2c_caps & TPM_GUARD_TIME_MIN_MASK) >>
+ TPM_GUARD_TIME_MIN_SHIFT;
+ /* guard_time_max = guard_time_min * 1.2 */
+ phy->guard_time_max = phy->guard_time_min + phy->guard_time_min / 5;
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
+static const struct tpm_tis_phy_ops tpm_i2c_phy_ops = {
+ .read_bytes = tpm_tis_i2c_read_bytes,
+ .write_bytes = tpm_tis_i2c_write_bytes,
+ .verify_crc = tpm_tis_i2c_verify_crc,
+};
+
+static int tpm_tis_i2c_probe(struct i2c_client *dev,
+ const struct i2c_device_id *id)
+{
+ struct tpm_tis_i2c_phy *phy;
+ const u8 crc_enable = 1;
+ const u8 locality = 0;
+ int ret;
+
+ phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_i2c_phy),
+ GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->io_buf = devm_kzalloc(&dev->dev, TPM_BUFSIZE, GFP_KERNEL);
+ if (!phy->io_buf)
+ return -ENOMEM;
+
+ phy->i2c_client = dev;
+
+ /* must precede all communication with the tpm */
+ ret = tpm_tis_i2c_init_guard_time(phy);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_LOC_SEL, sizeof(locality),
+ &locality, TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ ret = tpm_tis_i2c_write_bytes(&phy->priv, TPM_I2C_DATA_CSUM_ENABLE,
+ sizeof(crc_enable), &crc_enable,
+ TPM_TIS_PHYS_8);
+ if (ret)
+ return ret;
+
+ return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_i2c_phy_ops,
+ NULL);
+}
+
+static void tpm_tis_i2c_remove(struct i2c_client *client)
+{
+ struct tpm_chip *chip = i2c_get_clientdata(client);
+
+ tpm_chip_unregister(chip);
+ tpm_tis_remove(chip);
+}
+
+static const struct i2c_device_id tpm_tis_i2c_id[] = {
+ { "tpm_tis_i2c", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tpm_tis_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_tis_i2c_match[] = {
+ { .compatible = "infineon,slb9673", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_tis_i2c_match);
+#endif
+
+static struct i2c_driver tpm_tis_i2c_driver = {
+ .driver = {
+ .name = "tpm_tis_i2c",
+ .pm = &tpm_tis_pm,
+ .of_match_table = of_match_ptr(of_tis_i2c_match),
+ },
+ .probe = tpm_tis_i2c_probe,
+ .remove = tpm_tis_i2c_remove,
+ .id_table = tpm_tis_i2c_id,
+};
+module_i2c_driver(tpm_tis_i2c_driver);
+
+MODULE_DESCRIPTION("TPM Driver for native I2C access");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index c89278103703..77cea5b31c6e 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -31,6 +31,7 @@
#define TPM_CR50_TIMEOUT_SHORT_MS 2 /* Short timeout during transactions */
#define TPM_CR50_TIMEOUT_NOIRQ_MS 20 /* Timeout for TPM ready without IRQ */
#define TPM_CR50_I2C_DID_VID 0x00281ae0L /* Device and vendor ID reg value */
+#define TPM_TI50_I2C_DID_VID 0x504a6666L /* Device and vendor ID reg value */
#define TPM_CR50_I2C_MAX_RETRIES 3 /* Max retries due to I2C errors */
#define TPM_CR50_I2C_RETRY_DELAY_LO 55 /* Min usecs between retries on I2C */
#define TPM_CR50_I2C_RETRY_DELAY_HI 65 /* Max usecs between retries on I2C */
@@ -628,6 +629,19 @@ static bool tpm_cr50_i2c_req_canceled(struct tpm_chip *chip, u8 status)
return status == TPM_STS_COMMAND_READY;
}
+static bool tpm_cr50_i2c_is_firmware_power_managed(struct device *dev)
+{
+ u8 val;
+ int ret;
+
+ /* This flag should default true when the device property is not present */
+ ret = device_property_read_u8(dev, "firmware-power-managed", &val);
+ if (ret)
+ return true;
+
+ return val;
+}
+
static const struct tpm_class_ops cr50_i2c = {
.flags = TPM_OPS_AUTO_STARTUP,
.status = &tpm_cr50_i2c_tis_status,
@@ -686,7 +700,8 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
/* cr50 is a TPM 2.0 chip */
chip->flags |= TPM_CHIP_FLAG_TPM2;
- chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+ if (tpm_cr50_i2c_is_firmware_power_managed(dev))
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
/* Default timeouts */
chip->timeout_a = msecs_to_jiffies(TIS_SHORT_TIMEOUT);
@@ -728,15 +743,15 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
}
vendor = le32_to_cpup((__le32 *)buf);
- if (vendor != TPM_CR50_I2C_DID_VID) {
+ if (vendor != TPM_CR50_I2C_DID_VID && vendor != TPM_TI50_I2C_DID_VID) {
dev_err(dev, "Vendor ID did not match! ID was %08x\n", vendor);
tpm_cr50_release_locality(chip, true);
return -ENODEV;
}
- dev_info(dev, "cr50 TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n",
+ dev_info(dev, "%s TPM 2.0 (i2c 0x%02x irq %d id 0x%x)\n",
+ vendor == TPM_TI50_I2C_DID_VID ? "ti50" : "cr50",
client->addr, client->irq, vendor >> 16);
-
return tpm_chip_register(chip);
}
@@ -748,20 +763,18 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
* - 0: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_i2c_remove(struct i2c_client *client)
+static void tpm_cr50_i2c_remove(struct i2c_client *client)
{
struct tpm_chip *chip = i2c_get_clientdata(client);
struct device *dev = &client->dev;
if (!chip) {
- dev_err(dev, "Could not get client data at remove\n");
- return -ENODEV;
+ dev_crit(dev, "Could not get client data at remove, memory corruption ahead\n");
+ return;
}
tpm_chip_unregister(chip);
tpm_cr50_release_locality(chip, true);
-
- return 0;
}
static SIMPLE_DEV_PM_OPS(cr50_i2c_pm, tpm_pm_suspend, tpm_pm_resume);
diff --git a/drivers/char/tpm/tpm_tis_spi.h b/drivers/char/tpm/tpm_tis_spi.h
index bba73979c368..d0f66f6f1931 100644
--- a/drivers/char/tpm/tpm_tis_spi.h
+++ b/drivers/char/tpm/tpm_tis_spi.h
@@ -31,10 +31,6 @@ extern int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
extern int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *in, const u8 *out);
-extern int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result);
-extern int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result);
-extern int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value);
-
#ifdef CONFIG_TCG_TIS_SPI_CR50
extern int cr50_spi_probe(struct spi_device *spi);
#else
diff --git a/drivers/char/tpm/tpm_tis_spi_cr50.c b/drivers/char/tpm/tpm_tis_spi_cr50.c
index ea759af25634..f4937280e940 100644
--- a/drivers/char/tpm/tpm_tis_spi_cr50.c
+++ b/drivers/char/tpm/tpm_tis_spi_cr50.c
@@ -36,6 +36,9 @@
#define TPM_CR50_FW_VER(l) (0x0f90 | ((l) << 12))
#define TPM_CR50_MAX_FW_VER_LEN 64
+/* Default quality for hwrng. */
+#define TPM_CR50_DEFAULT_RNG_QUALITY 700
+
struct cr50_spi_phy {
struct tpm_tis_spi_phy spi_phy;
@@ -182,6 +185,19 @@ static int cr50_spi_flow_control(struct tpm_tis_spi_phy *phy,
return 0;
}
+static bool tpm_cr50_spi_is_firmware_power_managed(struct device *dev)
+{
+ u8 val;
+ int ret;
+
+ /* This flag should default true when the device property is not present */
+ ret = device_property_read_u8(dev, "firmware-power-managed", &val);
+ if (ret)
+ return true;
+
+ return val;
+}
+
static int tpm_tis_spi_cr50_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *in, const u8 *out)
{
@@ -206,13 +222,13 @@ static int tpm_tis_spi_cr50_transfer(struct tpm_tis_data *data, u32 addr, u16 le
}
static int tpm_tis_spi_cr50_read_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *result)
+ u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
{
return tpm_tis_spi_cr50_transfer(data, addr, len, result, NULL);
}
static int tpm_tis_spi_cr50_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, const u8 *value)
+ u16 len, const u8 *value, enum tpm_tis_io_mode io_mode)
{
return tpm_tis_spi_cr50_transfer(data, addr, len, NULL, value);
}
@@ -220,9 +236,6 @@ static int tpm_tis_spi_cr50_write_bytes(struct tpm_tis_data *data, u32 addr,
static const struct tpm_tis_phy_ops tpm_spi_cr50_phy_ops = {
.read_bytes = tpm_tis_spi_cr50_read_bytes,
.write_bytes = tpm_tis_spi_cr50_write_bytes,
- .read16 = tpm_tis_spi_read16,
- .read32 = tpm_tis_spi_read32,
- .write32 = tpm_tis_spi_write32,
};
static void cr50_print_fw_version(struct tpm_tis_data *data)
@@ -264,6 +277,7 @@ int cr50_spi_probe(struct spi_device *spi)
phy = &cr50_phy->spi_phy;
phy->flow_control = cr50_spi_flow_control;
phy->wake_after = jiffies;
+ phy->priv.rng_quality = TPM_CR50_DEFAULT_RNG_QUALITY;
init_completion(&phy->ready);
cr50_phy->access_delay = CR50_NOIRQ_ACCESS_DELAY;
@@ -305,7 +319,8 @@ int cr50_spi_probe(struct spi_device *spi)
cr50_print_fw_version(&phy->priv);
chip = dev_get_drvdata(&spi->dev);
- chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
+ if (tpm_cr50_spi_is_firmware_power_managed(&spi->dev))
+ chip->flags |= TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED;
return 0;
}
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index aaa59a00eeae..a0963a3e92bd 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -141,55 +141,17 @@ exit:
}
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *result)
+ u16 len, u8 *result, enum tpm_tis_io_mode io_mode)
{
return tpm_tis_spi_transfer(data, addr, len, result, NULL);
}
static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, const u8 *value)
+ u16 len, const u8 *value, enum tpm_tis_io_mode io_mode)
{
return tpm_tis_spi_transfer(data, addr, len, NULL, value);
}
-int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
-{
- __le16 result_le;
- int rc;
-
- rc = data->phy_ops->read_bytes(data, addr, sizeof(u16),
- (u8 *)&result_le);
- if (!rc)
- *result = le16_to_cpu(result_le);
-
- return rc;
-}
-
-int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result)
-{
- __le32 result_le;
- int rc;
-
- rc = data->phy_ops->read_bytes(data, addr, sizeof(u32),
- (u8 *)&result_le);
- if (!rc)
- *result = le32_to_cpu(result_le);
-
- return rc;
-}
-
-int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value)
-{
- __le32 value_le;
- int rc;
-
- value_le = cpu_to_le32(value);
- rc = data->phy_ops->write_bytes(data, addr, sizeof(u32),
- (u8 *)&value_le);
-
- return rc;
-}
-
int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
int irq, const struct tpm_tis_phy_ops *phy_ops)
{
@@ -205,9 +167,6 @@ int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
.read_bytes = tpm_tis_spi_read_bytes,
.write_bytes = tpm_tis_spi_write_bytes,
- .read16 = tpm_tis_spi_read16,
- .read32 = tpm_tis_spi_read32,
- .write32 = tpm_tis_spi_write32,
};
static int tpm_tis_spi_probe(struct spi_device *dev)
@@ -254,13 +213,12 @@ static int tpm_tis_spi_driver_probe(struct spi_device *spi)
static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_spi_resume);
-static int tpm_tis_spi_remove(struct spi_device *dev)
+static void tpm_tis_spi_remove(struct spi_device *dev)
{
struct tpm_chip *chip = spi_get_drvdata(dev);
tpm_chip_unregister(chip);
tpm_tis_remove(chip);
- return 0;
}
static const struct spi_device_id tpm_tis_spi_id[] = {
diff --git a/drivers/char/tpm/tpm_tis_synquacer.c b/drivers/char/tpm/tpm_tis_synquacer.c
index e47bdd272704..679196c61401 100644
--- a/drivers/char/tpm/tpm_tis_synquacer.c
+++ b/drivers/char/tpm/tpm_tis_synquacer.c
@@ -35,72 +35,53 @@ static inline struct tpm_tis_synquacer_phy *to_tpm_tis_tcg_phy(struct tpm_tis_da
}
static int tpm_tis_synquacer_read_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, u8 *result)
+ u16 len, u8 *result,
+ enum tpm_tis_io_mode io_mode)
{
struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
-
- while (len--)
- *result++ = ioread8(phy->iobase + addr);
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ *result++ = ioread8(phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ result[1] = ioread8(phy->iobase + addr + 1);
+ result[0] = ioread8(phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_32:
+ result[3] = ioread8(phy->iobase + addr + 3);
+ result[2] = ioread8(phy->iobase + addr + 2);
+ result[1] = ioread8(phy->iobase + addr + 1);
+ result[0] = ioread8(phy->iobase + addr);
+ break;
+ }
return 0;
}
static int tpm_tis_synquacer_write_bytes(struct tpm_tis_data *data, u32 addr,
- u16 len, const u8 *value)
+ u16 len, const u8 *value,
+ enum tpm_tis_io_mode io_mode)
{
struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
-
- while (len--)
- iowrite8(*value++, phy->iobase + addr);
-
- return 0;
-}
-
-static int tpm_tis_synquacer_read16_bw(struct tpm_tis_data *data,
- u32 addr, u16 *result)
-{
- struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
-
- /*
- * Due to the limitation of SPI controller on SynQuacer,
- * 16/32 bits access must be done in byte-wise and descending order.
- */
- *result = (ioread8(phy->iobase + addr + 1) << 8) |
- (ioread8(phy->iobase + addr));
-
- return 0;
-}
-
-static int tpm_tis_synquacer_read32_bw(struct tpm_tis_data *data,
- u32 addr, u32 *result)
-{
- struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
-
- /*
- * Due to the limitation of SPI controller on SynQuacer,
- * 16/32 bits access must be done in byte-wise and descending order.
- */
- *result = (ioread8(phy->iobase + addr + 3) << 24) |
- (ioread8(phy->iobase + addr + 2) << 16) |
- (ioread8(phy->iobase + addr + 1) << 8) |
- (ioread8(phy->iobase + addr));
-
- return 0;
-}
-
-static int tpm_tis_synquacer_write32_bw(struct tpm_tis_data *data,
- u32 addr, u32 value)
-{
- struct tpm_tis_synquacer_phy *phy = to_tpm_tis_tcg_phy(data);
-
- /*
- * Due to the limitation of SPI controller on SynQuacer,
- * 16/32 bits access must be done in byte-wise and descending order.
- */
- iowrite8(value >> 24, phy->iobase + addr + 3);
- iowrite8(value >> 16, phy->iobase + addr + 2);
- iowrite8(value >> 8, phy->iobase + addr + 1);
- iowrite8(value, phy->iobase + addr);
+ switch (io_mode) {
+ case TPM_TIS_PHYS_8:
+ while (len--)
+ iowrite8(*value++, phy->iobase + addr);
+ break;
+ case TPM_TIS_PHYS_16:
+ return -EINVAL;
+ case TPM_TIS_PHYS_32:
+ /*
+ * Due to the limitation of SPI controller on SynQuacer,
+ * 16/32 bits access must be done in byte-wise and descending order.
+ */
+ iowrite8(value[3], phy->iobase + addr + 3);
+ iowrite8(value[2], phy->iobase + addr + 2);
+ iowrite8(value[1], phy->iobase + addr + 1);
+ iowrite8(value[0], phy->iobase + addr);
+ break;
+ }
return 0;
}
@@ -108,9 +89,6 @@ static int tpm_tis_synquacer_write32_bw(struct tpm_tis_data *data,
static const struct tpm_tis_phy_ops tpm_tcg_bw = {
.read_bytes = tpm_tis_synquacer_read_bytes,
.write_bytes = tpm_tis_synquacer_write_bytes,
- .read16 = tpm_tis_synquacer_read16_bw,
- .read32 = tpm_tis_synquacer_read32_bw,
- .write32 = tpm_tis_synquacer_write32_bw,
};
static int tpm_tis_synquacer_init(struct device *dev,
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 91c772e38bb5..5c865987ba5c 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -91,7 +91,7 @@ static ssize_t vtpm_proxy_fops_read(struct file *filp, char __user *buf,
len = proxy_dev->req_len;
- if (count < len) {
+ if (count < len || len > sizeof(proxy_dev->buffer)) {
mutex_unlock(&proxy_dev->buf_lock);
pr_debug("Invalid size in recv: count=%zd, req_len=%zd\n",
count, len);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index da5b30771418..379291826261 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -126,16 +126,16 @@ static void vtpm_cancel(struct tpm_chip *chip)
notify_remote_via_evtchn(priv->evtchn);
}
-static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
+static size_t shr_data_offset(struct vtpm_shared_page *shr)
{
- return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
+ return struct_size(shr, extra_pages, shr->nr_extra_pages);
}
static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
- unsigned int offset = shr_data_offset(shr);
+ size_t offset = shr_data_offset(shr);
u32 ordinal;
unsigned long duration;
@@ -177,7 +177,7 @@ static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
- unsigned int offset = shr_data_offset(shr);
+ size_t offset = shr_data_offset(shr);
size_t length = shr->length;
if (shr->state == VTPM_STATE_IDLE)
@@ -253,20 +253,12 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
struct xenbus_transaction xbt;
const char *message = NULL;
int rv;
- grant_ref_t gref;
- priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- if (!priv->shr) {
- xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
- return -ENOMEM;
- }
-
- rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
+ rv = xenbus_setup_ring(dev, GFP_KERNEL, (void **)&priv->shr, 1,
+ &priv->ring_ref);
if (rv < 0)
return rv;
- priv->ring_ref = gref;
-
rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
if (rv)
return rv;
@@ -331,11 +323,7 @@ static void ring_free(struct tpm_private *priv)
if (!priv)
return;
- if (priv->ring_ref)
- gnttab_end_foreign_access(priv->ring_ref, 0,
- (unsigned long)priv->shr);
- else
- free_page((unsigned long)priv->shr);
+ xenbus_teardown_ring((void **)&priv->shr, 1, &priv->ring_ref);
if (priv->irq)
unbind_from_irqhandler(priv->irq, priv);
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index adf941c47506..ed45d04905c2 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -11,6 +11,7 @@
* of the boot process, for example.
*/
+#include <linux/console.h>
#include <linux/device.h>
#include <linux/serial.h>
#include <linux/tty.h>
@@ -163,6 +164,18 @@ static const struct tty_port_operations tpk_port_ops = {
static struct tty_driver *ttyprintk_driver;
+static struct tty_driver *ttyprintk_console_device(struct console *c,
+ int *index)
+{
+ *index = 0;
+ return ttyprintk_driver;
+}
+
+static struct console ttyprintk_console = {
+ .name = "ttyprintk",
+ .device = ttyprintk_console_device,
+};
+
static int __init ttyprintk_init(void)
{
int ret;
@@ -195,6 +208,8 @@ static int __init ttyprintk_init(void)
goto error;
}
+ register_console(&ttyprintk_console);
+
return 0;
error:
@@ -205,6 +220,7 @@ error:
static void __exit ttyprintk_exit(void)
{
+ unregister_console(&ttyprintk_console);
tty_unregister_driver(ttyprintk_driver);
tty_driver_kref_put(ttyprintk_driver);
tty_port_destroy(&tpk_port.port);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 660c5c388c29..9fa3c76a267f 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1957,8 +1957,15 @@ static void virtcons_remove(struct virtio_device *vdev)
list_del(&portdev->list);
spin_unlock_irq(&pdrvdata_lock);
+ /* Device is going away, exit any polling for buffers */
+ virtio_break_device(vdev);
+ if (use_multiport(portdev))
+ flush_work(&portdev->control_work);
+ else
+ flush_work(&portdev->config_work);
+
/* Disable interrupts for vqs */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
/* Finish up work that's lined up */
if (use_multiport(portdev))
cancel_work_sync(&portdev->control_work);
@@ -2148,7 +2155,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
portdev = vdev->priv;
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
if (use_multiport(portdev))
virtqueue_disable_cb(portdev->c_ivq);
@@ -2238,7 +2245,7 @@ static struct virtio_driver virtio_rproc_serial = {
.remove = virtcons_remove,
};
-static int __init init(void)
+static int __init virtio_console_init(void)
{
int err;
@@ -2273,7 +2280,7 @@ free:
return err;
}
-static void __exit fini(void)
+static void __exit virtio_console_fini(void)
{
reclaim_dma_bufs();
@@ -2283,8 +2290,8 @@ static void __exit fini(void)
class_destroy(pdrvdata.class);
debugfs_remove_recursive(pdrvdata.debugfs_dir);
}
-module_init(init);
-module_exit(fini);
+module_init(virtio_console_init);
+module_exit(virtio_console_fini);
MODULE_DESCRIPTION("Virtio console driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c
index 02225eb19cf6..619f3a30ec55 100644
--- a/drivers/char/xilinx_hwicap/fifo_icap.c
+++ b/drivers/char/xilinx_hwicap/fifo_icap.c
@@ -111,7 +111,7 @@ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata)
}
/**
- * fifo_icap_set_read_size - Set the the size register.
+ * fifo_icap_set_read_size - Set the size register.
* @drvdata: a pointer to the drvdata.
* @data: the size of the following read transaction, in words.
**/
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 067396bedf22..74a4928aea1d 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -241,7 +241,7 @@ static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
buffer[index++] = XHI_NOOP_PACKET;
/*
- * Write the data to the FIFO and intiate the transfer of data present
+ * Write the data to the FIFO and initiate the transfer of data present
* in the FIFO to the ICAP device.
*/
return drvdata->config->set_configuration(drvdata,
@@ -297,7 +297,7 @@ static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
buffer[index++] = XHI_NOOP_PACKET;
/*
- * Write the data to the FIFO and intiate the transfer of data present
+ * Write the data to the FIFO and initiate the transfer of data present
* in the FIFO to the ICAP device.
*/
status = drvdata->config->set_configuration(drvdata,
@@ -384,7 +384,7 @@ hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
drvdata->read_buffer + bytes_to_read,
4 - bytes_to_read);
} else {
- /* Get new data from the ICAP, and return was was requested. */
+ /* Get new data from the ICAP, and return what was requested. */
kbuf = (u32 *) get_zeroed_page(GFP_KERNEL);
if (!kbuf) {
status = -ENOMEM;
diff --git a/drivers/char/xillybus/xillybus_class.c b/drivers/char/xillybus/xillybus_class.c
index 5046486011c8..0f238648dcfe 100644
--- a/drivers/char/xillybus/xillybus_class.c
+++ b/drivers/char/xillybus/xillybus_class.c
@@ -174,18 +174,17 @@ void xillybus_cleanup_chrdev(void *private_data,
struct device *dev)
{
int minor;
- struct xilly_unit *unit;
- bool found = false;
+ struct xilly_unit *unit = NULL, *iter;
mutex_lock(&unit_mutex);
- list_for_each_entry(unit, &unit_list, list_entry)
- if (unit->private_data == private_data) {
- found = true;
+ list_for_each_entry(iter, &unit_list, list_entry)
+ if (iter->private_data == private_data) {
+ unit = iter;
break;
}
- if (!found) {
+ if (!unit) {
dev_err(dev, "Weird bug: Failed to find unit\n");
mutex_unlock(&unit_mutex);
return;
@@ -216,22 +215,21 @@ int xillybus_find_inode(struct inode *inode,
{
int minor = iminor(inode);
int major = imajor(inode);
- struct xilly_unit *unit;
- bool found = false;
+ struct xilly_unit *unit = NULL, *iter;
mutex_lock(&unit_mutex);
- list_for_each_entry(unit, &unit_list, list_entry)
- if (unit->major == major &&
- minor >= unit->lowest_minor &&
- minor < (unit->lowest_minor + unit->num_nodes)) {
- found = true;
+ list_for_each_entry(iter, &unit_list, list_entry)
+ if (iter->major == major &&
+ minor >= iter->lowest_minor &&
+ minor < (iter->lowest_minor + iter->num_nodes)) {
+ unit = iter;
break;
}
mutex_unlock(&unit_mutex);
- if (!found)
+ if (!unit)
return -ENODEV;
*private_data = unit->private_data;
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
index dc3551796e5e..39bcbfd908b4 100644
--- a/drivers/char/xillybus/xillyusb.c
+++ b/drivers/char/xillybus/xillyusb.c
@@ -549,6 +549,7 @@ static void cleanup_dev(struct kref *kref)
if (xdev->workq)
destroy_workqueue(xdev->workq);
+ usb_put_dev(xdev->udev);
kfree(xdev->channels); /* Argument may be NULL, and that's fine */
kfree(xdev);
}